Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * 2002-10-15 Posix Clocks & timers
4 : * by George Anzinger george@mvista.com
5 : * Copyright (C) 2002 2003 by MontaVista Software.
6 : *
7 : * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
8 : * Copyright (C) 2004 Boris Hu
9 : *
10 : * These are all the functions necessary to implement POSIX clocks & timers
11 : */
12 : #include <linux/mm.h>
13 : #include <linux/interrupt.h>
14 : #include <linux/slab.h>
15 : #include <linux/time.h>
16 : #include <linux/mutex.h>
17 : #include <linux/sched/task.h>
18 :
19 : #include <linux/uaccess.h>
20 : #include <linux/list.h>
21 : #include <linux/init.h>
22 : #include <linux/compiler.h>
23 : #include <linux/hash.h>
24 : #include <linux/posix-clock.h>
25 : #include <linux/posix-timers.h>
26 : #include <linux/syscalls.h>
27 : #include <linux/wait.h>
28 : #include <linux/workqueue.h>
29 : #include <linux/export.h>
30 : #include <linux/hashtable.h>
31 : #include <linux/compat.h>
32 : #include <linux/nospec.h>
33 : #include <linux/time_namespace.h>
34 :
35 : #include "timekeeping.h"
36 : #include "posix-timers.h"
37 :
38 : /*
39 : * Management arrays for POSIX timers. Timers are now kept in static hash table
40 : * with 512 entries.
41 : * Timer ids are allocated by local routine, which selects proper hash head by
42 : * key, constructed from current->signal address and per signal struct counter.
43 : * This keeps timer ids unique per process, but now they can intersect between
44 : * processes.
45 : */
46 :
47 : /*
48 : * Lets keep our timers in a slab cache :-)
49 : */
50 : static struct kmem_cache *posix_timers_cache;
51 :
52 : static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
53 : static DEFINE_SPINLOCK(hash_lock);
54 :
55 : static const struct k_clock * const posix_clocks[];
56 : static const struct k_clock *clockid_to_kclock(const clockid_t id);
57 : static const struct k_clock clock_realtime, clock_monotonic;
58 :
59 : /*
60 : * we assume that the new SIGEV_THREAD_ID shares no bits with the other
61 : * SIGEV values. Here we put out an error if this assumption fails.
62 : */
63 : #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
64 : ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
65 : #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
66 : #endif
67 :
68 : /*
69 : * The timer ID is turned into a timer address by idr_find().
70 : * Verifying a valid ID consists of:
71 : *
72 : * a) checking that idr_find() returns other than -1.
73 : * b) checking that the timer id matches the one in the timer itself.
74 : * c) that the timer owner is in the callers thread group.
75 : */
76 :
77 : /*
78 : * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
79 : * to implement others. This structure defines the various
80 : * clocks.
81 : *
82 : * RESOLUTION: Clock resolution is used to round up timer and interval
83 : * times, NOT to report clock times, which are reported with as
84 : * much resolution as the system can muster. In some cases this
85 : * resolution may depend on the underlying clock hardware and
86 : * may not be quantifiable until run time, and only then is the
87 : * necessary code is written. The standard says we should say
88 : * something about this issue in the documentation...
89 : *
90 : * FUNCTIONS: The CLOCKs structure defines possible functions to
91 : * handle various clock functions.
92 : *
93 : * The standard POSIX timer management code assumes the
94 : * following: 1.) The k_itimer struct (sched.h) is used for
95 : * the timer. 2.) The list, it_lock, it_clock, it_id and
96 : * it_pid fields are not modified by timer code.
97 : *
98 : * Permissions: It is assumed that the clock_settime() function defined
99 : * for each clock will take care of permission checks. Some
100 : * clocks may be set able by any user (i.e. local process
101 : * clocks) others not. Currently the only set able clock we
102 : * have is CLOCK_REALTIME and its high res counter part, both of
103 : * which we beg off on and pass to do_sys_settimeofday().
104 : */
105 : static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
106 :
107 : #define lock_timer(tid, flags) \
108 : ({ struct k_itimer *__timr; \
109 : __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
110 : __timr; \
111 : })
112 :
113 : static int hash(struct signal_struct *sig, unsigned int nr)
114 : {
115 0 : return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
116 : }
117 :
118 : static struct k_itimer *__posix_timers_find(struct hlist_head *head,
119 : struct signal_struct *sig,
120 : timer_t id)
121 : {
122 : struct k_itimer *timer;
123 :
124 0 : hlist_for_each_entry_rcu(timer, head, t_hash,
125 : lockdep_is_held(&hash_lock)) {
126 0 : if ((timer->it_signal == sig) && (timer->it_id == id))
127 : return timer;
128 : }
129 : return NULL;
130 : }
131 :
132 0 : static struct k_itimer *posix_timer_by_id(timer_t id)
133 : {
134 0 : struct signal_struct *sig = current->signal;
135 0 : struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
136 :
137 0 : return __posix_timers_find(head, sig, id);
138 : }
139 :
140 0 : static int posix_timer_add(struct k_itimer *timer)
141 : {
142 0 : struct signal_struct *sig = current->signal;
143 0 : int first_free_id = sig->posix_timer_id;
144 : struct hlist_head *head;
145 0 : int ret = -ENOENT;
146 :
147 : do {
148 0 : spin_lock(&hash_lock);
149 0 : head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
150 0 : if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
151 0 : hlist_add_head_rcu(&timer->t_hash, head);
152 0 : ret = sig->posix_timer_id;
153 : }
154 0 : if (++sig->posix_timer_id < 0)
155 0 : sig->posix_timer_id = 0;
156 0 : if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
157 : /* Loop over all possible ids completed */
158 0 : ret = -EAGAIN;
159 0 : spin_unlock(&hash_lock);
160 0 : } while (ret == -ENOENT);
161 0 : return ret;
162 : }
163 :
164 : static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
165 : {
166 0 : spin_unlock_irqrestore(&timr->it_lock, flags);
167 : }
168 :
169 : /* Get clock_realtime */
170 0 : static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
171 : {
172 0 : ktime_get_real_ts64(tp);
173 0 : return 0;
174 : }
175 :
176 0 : static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
177 : {
178 0 : return ktime_get_real();
179 : }
180 :
181 : /* Set clock_realtime */
182 0 : static int posix_clock_realtime_set(const clockid_t which_clock,
183 : const struct timespec64 *tp)
184 : {
185 0 : return do_sys_settimeofday64(tp, NULL);
186 : }
187 :
188 0 : static int posix_clock_realtime_adj(const clockid_t which_clock,
189 : struct __kernel_timex *t)
190 : {
191 0 : return do_adjtimex(t);
192 : }
193 :
194 : /*
195 : * Get monotonic time for posix timers
196 : */
197 0 : static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
198 : {
199 0 : ktime_get_ts64(tp);
200 0 : timens_add_monotonic(tp);
201 0 : return 0;
202 : }
203 :
204 0 : static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
205 : {
206 0 : return ktime_get();
207 : }
208 :
209 : /*
210 : * Get monotonic-raw time for posix timers
211 : */
212 0 : static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
213 : {
214 0 : ktime_get_raw_ts64(tp);
215 0 : timens_add_monotonic(tp);
216 0 : return 0;
217 : }
218 :
219 :
220 0 : static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
221 : {
222 0 : ktime_get_coarse_real_ts64(tp);
223 0 : return 0;
224 : }
225 :
226 0 : static int posix_get_monotonic_coarse(clockid_t which_clock,
227 : struct timespec64 *tp)
228 : {
229 0 : ktime_get_coarse_ts64(tp);
230 0 : timens_add_monotonic(tp);
231 0 : return 0;
232 : }
233 :
234 0 : static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
235 : {
236 0 : *tp = ktime_to_timespec64(KTIME_LOW_RES);
237 0 : return 0;
238 : }
239 :
240 0 : static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
241 : {
242 0 : ktime_get_boottime_ts64(tp);
243 0 : timens_add_boottime(tp);
244 0 : return 0;
245 : }
246 :
247 0 : static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
248 : {
249 0 : return ktime_get_boottime();
250 : }
251 :
252 0 : static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
253 : {
254 0 : ktime_get_clocktai_ts64(tp);
255 0 : return 0;
256 : }
257 :
258 0 : static ktime_t posix_get_tai_ktime(clockid_t which_clock)
259 : {
260 0 : return ktime_get_clocktai();
261 : }
262 :
263 0 : static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
264 : {
265 0 : tp->tv_sec = 0;
266 0 : tp->tv_nsec = hrtimer_resolution;
267 0 : return 0;
268 : }
269 :
270 : /*
271 : * Initialize everything, well, just everything in Posix clocks/timers ;)
272 : */
273 1 : static __init int init_posix_timers(void)
274 : {
275 1 : posix_timers_cache = kmem_cache_create("posix_timers_cache",
276 : sizeof(struct k_itimer), 0,
277 : SLAB_PANIC | SLAB_ACCOUNT, NULL);
278 1 : return 0;
279 : }
280 : __initcall(init_posix_timers);
281 :
282 : /*
283 : * The siginfo si_overrun field and the return value of timer_getoverrun(2)
284 : * are of type int. Clamp the overrun value to INT_MAX
285 : */
286 : static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
287 : {
288 0 : s64 sum = timr->it_overrun_last + (s64)baseval;
289 :
290 0 : return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
291 : }
292 :
293 0 : static void common_hrtimer_rearm(struct k_itimer *timr)
294 : {
295 0 : struct hrtimer *timer = &timr->it.real.timer;
296 :
297 0 : timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
298 : timr->it_interval);
299 0 : hrtimer_restart(timer);
300 0 : }
301 :
302 : /*
303 : * This function is exported for use by the signal deliver code. It is
304 : * called just prior to the info block being released and passes that
305 : * block to us. It's function is to update the overrun entry AND to
306 : * restart the timer. It should only be called if the timer is to be
307 : * restarted (i.e. we have flagged this in the sys_private entry of the
308 : * info block).
309 : *
310 : * To protect against the timer going away while the interrupt is queued,
311 : * we require that the it_requeue_pending flag be set.
312 : */
313 0 : void posixtimer_rearm(struct kernel_siginfo *info)
314 : {
315 : struct k_itimer *timr;
316 : unsigned long flags;
317 :
318 0 : timr = lock_timer(info->si_tid, &flags);
319 0 : if (!timr)
320 0 : return;
321 :
322 0 : if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
323 0 : timr->kclock->timer_rearm(timr);
324 :
325 0 : timr->it_active = 1;
326 0 : timr->it_overrun_last = timr->it_overrun;
327 0 : timr->it_overrun = -1LL;
328 0 : ++timr->it_requeue_pending;
329 :
330 0 : info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
331 : }
332 :
333 0 : unlock_timer(timr, flags);
334 : }
335 :
336 0 : int posix_timer_event(struct k_itimer *timr, int si_private)
337 : {
338 : enum pid_type type;
339 : int ret;
340 : /*
341 : * FIXME: if ->sigq is queued we can race with
342 : * dequeue_signal()->posixtimer_rearm().
343 : *
344 : * If dequeue_signal() sees the "right" value of
345 : * si_sys_private it calls posixtimer_rearm().
346 : * We re-queue ->sigq and drop ->it_lock().
347 : * posixtimer_rearm() locks the timer
348 : * and re-schedules it while ->sigq is pending.
349 : * Not really bad, but not that we want.
350 : */
351 0 : timr->sigq->info.si_sys_private = si_private;
352 :
353 0 : type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
354 0 : ret = send_sigqueue(timr->sigq, timr->it_pid, type);
355 : /* If we failed to send the signal the timer stops. */
356 0 : return ret > 0;
357 : }
358 :
359 : /*
360 : * This function gets called when a POSIX.1b interval timer expires. It
361 : * is used as a callback from the kernel internal timer. The
362 : * run_timer_list code ALWAYS calls with interrupts on.
363 :
364 : * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
365 : */
366 0 : static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
367 : {
368 : struct k_itimer *timr;
369 : unsigned long flags;
370 0 : int si_private = 0;
371 0 : enum hrtimer_restart ret = HRTIMER_NORESTART;
372 :
373 0 : timr = container_of(timer, struct k_itimer, it.real.timer);
374 0 : spin_lock_irqsave(&timr->it_lock, flags);
375 :
376 0 : timr->it_active = 0;
377 0 : if (timr->it_interval != 0)
378 0 : si_private = ++timr->it_requeue_pending;
379 :
380 0 : if (posix_timer_event(timr, si_private)) {
381 : /*
382 : * signal was not sent because of sig_ignor
383 : * we will not get a call back to restart it AND
384 : * it should be restarted.
385 : */
386 0 : if (timr->it_interval != 0) {
387 0 : ktime_t now = hrtimer_cb_get_time(timer);
388 :
389 : /*
390 : * FIXME: What we really want, is to stop this
391 : * timer completely and restart it in case the
392 : * SIG_IGN is removed. This is a non trivial
393 : * change which involves sighand locking
394 : * (sigh !), which we don't want to do late in
395 : * the release cycle.
396 : *
397 : * For now we just let timers with an interval
398 : * less than a jiffie expire every jiffie to
399 : * avoid softirq starvation in case of SIG_IGN
400 : * and a very small interval, which would put
401 : * the timer right back on the softirq pending
402 : * list. By moving now ahead of time we trick
403 : * hrtimer_forward() to expire the timer
404 : * later, while we still maintain the overrun
405 : * accuracy, but have some inconsistency in
406 : * the timer_gettime() case. This is at least
407 : * better than a starved softirq. A more
408 : * complex fix which solves also another related
409 : * inconsistency is already in the pipeline.
410 : */
411 : #ifdef CONFIG_HIGH_RES_TIMERS
412 : {
413 : ktime_t kj = NSEC_PER_SEC / HZ;
414 :
415 : if (timr->it_interval < kj)
416 : now = ktime_add(now, kj);
417 : }
418 : #endif
419 0 : timr->it_overrun += hrtimer_forward(timer, now,
420 : timr->it_interval);
421 0 : ret = HRTIMER_RESTART;
422 0 : ++timr->it_requeue_pending;
423 0 : timr->it_active = 1;
424 : }
425 : }
426 :
427 0 : unlock_timer(timr, flags);
428 0 : return ret;
429 : }
430 :
431 0 : static struct pid *good_sigevent(sigevent_t * event)
432 : {
433 0 : struct pid *pid = task_tgid(current);
434 : struct task_struct *rtn;
435 :
436 0 : switch (event->sigev_notify) {
437 : case SIGEV_SIGNAL | SIGEV_THREAD_ID:
438 0 : pid = find_vpid(event->sigev_notify_thread_id);
439 0 : rtn = pid_task(pid, PIDTYPE_PID);
440 0 : if (!rtn || !same_thread_group(rtn, current))
441 : return NULL;
442 : fallthrough;
443 : case SIGEV_SIGNAL:
444 : case SIGEV_THREAD:
445 0 : if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
446 : return NULL;
447 : fallthrough;
448 : case SIGEV_NONE:
449 : return pid;
450 : default:
451 : return NULL;
452 : }
453 : }
454 :
455 0 : static struct k_itimer * alloc_posix_timer(void)
456 : {
457 : struct k_itimer *tmr;
458 0 : tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
459 0 : if (!tmr)
460 : return tmr;
461 0 : if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
462 0 : kmem_cache_free(posix_timers_cache, tmr);
463 0 : return NULL;
464 : }
465 0 : clear_siginfo(&tmr->sigq->info);
466 0 : return tmr;
467 : }
468 :
469 0 : static void k_itimer_rcu_free(struct rcu_head *head)
470 : {
471 0 : struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
472 :
473 0 : kmem_cache_free(posix_timers_cache, tmr);
474 0 : }
475 :
476 : #define IT_ID_SET 1
477 : #define IT_ID_NOT_SET 0
478 0 : static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
479 : {
480 0 : if (it_id_set) {
481 : unsigned long flags;
482 0 : spin_lock_irqsave(&hash_lock, flags);
483 0 : hlist_del_rcu(&tmr->t_hash);
484 : spin_unlock_irqrestore(&hash_lock, flags);
485 : }
486 0 : put_pid(tmr->it_pid);
487 0 : sigqueue_free(tmr->sigq);
488 0 : call_rcu(&tmr->rcu, k_itimer_rcu_free);
489 0 : }
490 :
491 0 : static int common_timer_create(struct k_itimer *new_timer)
492 : {
493 0 : hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
494 0 : return 0;
495 : }
496 :
497 : /* Create a POSIX.1b interval timer. */
498 0 : static int do_timer_create(clockid_t which_clock, struct sigevent *event,
499 : timer_t __user *created_timer_id)
500 : {
501 0 : const struct k_clock *kc = clockid_to_kclock(which_clock);
502 : struct k_itimer *new_timer;
503 : int error, new_timer_id;
504 0 : int it_id_set = IT_ID_NOT_SET;
505 :
506 0 : if (!kc)
507 : return -EINVAL;
508 0 : if (!kc->timer_create)
509 : return -EOPNOTSUPP;
510 :
511 0 : new_timer = alloc_posix_timer();
512 0 : if (unlikely(!new_timer))
513 : return -EAGAIN;
514 :
515 0 : spin_lock_init(&new_timer->it_lock);
516 0 : new_timer_id = posix_timer_add(new_timer);
517 0 : if (new_timer_id < 0) {
518 : error = new_timer_id;
519 : goto out;
520 : }
521 :
522 0 : it_id_set = IT_ID_SET;
523 0 : new_timer->it_id = (timer_t) new_timer_id;
524 0 : new_timer->it_clock = which_clock;
525 0 : new_timer->kclock = kc;
526 0 : new_timer->it_overrun = -1LL;
527 :
528 0 : if (event) {
529 : rcu_read_lock();
530 0 : new_timer->it_pid = get_pid(good_sigevent(event));
531 : rcu_read_unlock();
532 0 : if (!new_timer->it_pid) {
533 : error = -EINVAL;
534 : goto out;
535 : }
536 0 : new_timer->it_sigev_notify = event->sigev_notify;
537 0 : new_timer->sigq->info.si_signo = event->sigev_signo;
538 0 : new_timer->sigq->info.si_value = event->sigev_value;
539 : } else {
540 0 : new_timer->it_sigev_notify = SIGEV_SIGNAL;
541 0 : new_timer->sigq->info.si_signo = SIGALRM;
542 0 : memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
543 0 : new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
544 0 : new_timer->it_pid = get_pid(task_tgid(current));
545 : }
546 :
547 0 : new_timer->sigq->info.si_tid = new_timer->it_id;
548 0 : new_timer->sigq->info.si_code = SI_TIMER;
549 :
550 0 : if (copy_to_user(created_timer_id,
551 : &new_timer_id, sizeof (new_timer_id))) {
552 : error = -EFAULT;
553 : goto out;
554 : }
555 :
556 0 : error = kc->timer_create(new_timer);
557 0 : if (error)
558 : goto out;
559 :
560 0 : spin_lock_irq(¤t->sighand->siglock);
561 0 : new_timer->it_signal = current->signal;
562 0 : list_add(&new_timer->list, ¤t->signal->posix_timers);
563 0 : spin_unlock_irq(¤t->sighand->siglock);
564 :
565 0 : return 0;
566 : /*
567 : * In the case of the timer belonging to another task, after
568 : * the task is unlocked, the timer is owned by the other task
569 : * and may cease to exist at any time. Don't use or modify
570 : * new_timer after the unlock call.
571 : */
572 : out:
573 0 : release_posix_timer(new_timer, it_id_set);
574 0 : return error;
575 : }
576 :
577 0 : SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
578 : struct sigevent __user *, timer_event_spec,
579 : timer_t __user *, created_timer_id)
580 : {
581 0 : if (timer_event_spec) {
582 : sigevent_t event;
583 :
584 0 : if (copy_from_user(&event, timer_event_spec, sizeof (event)))
585 : return -EFAULT;
586 0 : return do_timer_create(which_clock, &event, created_timer_id);
587 : }
588 0 : return do_timer_create(which_clock, NULL, created_timer_id);
589 : }
590 :
591 : #ifdef CONFIG_COMPAT
592 : COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
593 : struct compat_sigevent __user *, timer_event_spec,
594 : timer_t __user *, created_timer_id)
595 : {
596 : if (timer_event_spec) {
597 : sigevent_t event;
598 :
599 : if (get_compat_sigevent(&event, timer_event_spec))
600 : return -EFAULT;
601 : return do_timer_create(which_clock, &event, created_timer_id);
602 : }
603 : return do_timer_create(which_clock, NULL, created_timer_id);
604 : }
605 : #endif
606 :
607 : /*
608 : * Locking issues: We need to protect the result of the id look up until
609 : * we get the timer locked down so it is not deleted under us. The
610 : * removal is done under the idr spinlock so we use that here to bridge
611 : * the find to the timer lock. To avoid a dead lock, the timer id MUST
612 : * be release with out holding the timer lock.
613 : */
614 0 : static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
615 : {
616 : struct k_itimer *timr;
617 :
618 : /*
619 : * timer_t could be any type >= int and we want to make sure any
620 : * @timer_id outside positive int range fails lookup.
621 : */
622 0 : if ((unsigned long long)timer_id > INT_MAX)
623 : return NULL;
624 :
625 : rcu_read_lock();
626 0 : timr = posix_timer_by_id(timer_id);
627 0 : if (timr) {
628 0 : spin_lock_irqsave(&timr->it_lock, *flags);
629 0 : if (timr->it_signal == current->signal) {
630 : rcu_read_unlock();
631 0 : return timr;
632 : }
633 0 : spin_unlock_irqrestore(&timr->it_lock, *flags);
634 : }
635 : rcu_read_unlock();
636 :
637 0 : return NULL;
638 : }
639 :
640 0 : static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
641 : {
642 0 : struct hrtimer *timer = &timr->it.real.timer;
643 :
644 0 : return __hrtimer_expires_remaining_adjusted(timer, now);
645 : }
646 :
647 0 : static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
648 : {
649 0 : struct hrtimer *timer = &timr->it.real.timer;
650 :
651 0 : return hrtimer_forward(timer, now, timr->it_interval);
652 : }
653 :
654 : /*
655 : * Get the time remaining on a POSIX.1b interval timer. This function
656 : * is ALWAYS called with spin_lock_irq on the timer, thus it must not
657 : * mess with irq.
658 : *
659 : * We have a couple of messes to clean up here. First there is the case
660 : * of a timer that has a requeue pending. These timers should appear to
661 : * be in the timer list with an expiry as if we were to requeue them
662 : * now.
663 : *
664 : * The second issue is the SIGEV_NONE timer which may be active but is
665 : * not really ever put in the timer list (to save system resources).
666 : * This timer may be expired, and if so, we will do it here. Otherwise
667 : * it is the same as a requeue pending timer WRT to what we should
668 : * report.
669 : */
670 0 : void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
671 : {
672 0 : const struct k_clock *kc = timr->kclock;
673 : ktime_t now, remaining, iv;
674 : bool sig_none;
675 :
676 0 : sig_none = timr->it_sigev_notify == SIGEV_NONE;
677 0 : iv = timr->it_interval;
678 :
679 : /* interval timer ? */
680 0 : if (iv) {
681 0 : cur_setting->it_interval = ktime_to_timespec64(iv);
682 0 : } else if (!timr->it_active) {
683 : /*
684 : * SIGEV_NONE oneshot timers are never queued. Check them
685 : * below.
686 : */
687 0 : if (!sig_none)
688 : return;
689 : }
690 :
691 0 : now = kc->clock_get_ktime(timr->it_clock);
692 :
693 : /*
694 : * When a requeue is pending or this is a SIGEV_NONE timer move the
695 : * expiry time forward by intervals, so expiry is > now.
696 : */
697 0 : if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
698 0 : timr->it_overrun += kc->timer_forward(timr, now);
699 :
700 0 : remaining = kc->timer_remaining(timr, now);
701 : /* Return 0 only, when the timer is expired and not pending */
702 0 : if (remaining <= 0) {
703 : /*
704 : * A single shot SIGEV_NONE timer must return 0, when
705 : * it is expired !
706 : */
707 0 : if (!sig_none)
708 0 : cur_setting->it_value.tv_nsec = 1;
709 : } else {
710 0 : cur_setting->it_value = ktime_to_timespec64(remaining);
711 : }
712 : }
713 :
714 : /* Get the time remaining on a POSIX.1b interval timer. */
715 0 : static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
716 : {
717 : struct k_itimer *timr;
718 : const struct k_clock *kc;
719 : unsigned long flags;
720 0 : int ret = 0;
721 :
722 0 : timr = lock_timer(timer_id, &flags);
723 0 : if (!timr)
724 : return -EINVAL;
725 :
726 0 : memset(setting, 0, sizeof(*setting));
727 0 : kc = timr->kclock;
728 0 : if (WARN_ON_ONCE(!kc || !kc->timer_get))
729 : ret = -EINVAL;
730 : else
731 0 : kc->timer_get(timr, setting);
732 :
733 0 : unlock_timer(timr, flags);
734 0 : return ret;
735 : }
736 :
737 : /* Get the time remaining on a POSIX.1b interval timer. */
738 0 : SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
739 : struct __kernel_itimerspec __user *, setting)
740 : {
741 : struct itimerspec64 cur_setting;
742 :
743 0 : int ret = do_timer_gettime(timer_id, &cur_setting);
744 0 : if (!ret) {
745 0 : if (put_itimerspec64(&cur_setting, setting))
746 0 : ret = -EFAULT;
747 : }
748 0 : return ret;
749 : }
750 :
751 : #ifdef CONFIG_COMPAT_32BIT_TIME
752 :
753 : SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
754 : struct old_itimerspec32 __user *, setting)
755 : {
756 : struct itimerspec64 cur_setting;
757 :
758 : int ret = do_timer_gettime(timer_id, &cur_setting);
759 : if (!ret) {
760 : if (put_old_itimerspec32(&cur_setting, setting))
761 : ret = -EFAULT;
762 : }
763 : return ret;
764 : }
765 :
766 : #endif
767 :
768 : /*
769 : * Get the number of overruns of a POSIX.1b interval timer. This is to
770 : * be the overrun of the timer last delivered. At the same time we are
771 : * accumulating overruns on the next timer. The overrun is frozen when
772 : * the signal is delivered, either at the notify time (if the info block
773 : * is not queued) or at the actual delivery time (as we are informed by
774 : * the call back to posixtimer_rearm(). So all we need to do is
775 : * to pick up the frozen overrun.
776 : */
777 0 : SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
778 : {
779 : struct k_itimer *timr;
780 : int overrun;
781 : unsigned long flags;
782 :
783 0 : timr = lock_timer(timer_id, &flags);
784 0 : if (!timr)
785 : return -EINVAL;
786 :
787 0 : overrun = timer_overrun_to_int(timr, 0);
788 0 : unlock_timer(timr, flags);
789 :
790 0 : return overrun;
791 : }
792 :
793 0 : static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
794 : bool absolute, bool sigev_none)
795 : {
796 0 : struct hrtimer *timer = &timr->it.real.timer;
797 : enum hrtimer_mode mode;
798 :
799 0 : mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
800 : /*
801 : * Posix magic: Relative CLOCK_REALTIME timers are not affected by
802 : * clock modifications, so they become CLOCK_MONOTONIC based under the
803 : * hood. See hrtimer_init(). Update timr->kclock, so the generic
804 : * functions which use timr->kclock->clock_get_*() work.
805 : *
806 : * Note: it_clock stays unmodified, because the next timer_set() might
807 : * use ABSTIME, so it needs to switch back.
808 : */
809 0 : if (timr->it_clock == CLOCK_REALTIME)
810 0 : timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
811 :
812 0 : hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
813 0 : timr->it.real.timer.function = posix_timer_fn;
814 :
815 0 : if (!absolute)
816 0 : expires = ktime_add_safe(expires, timer->base->get_time());
817 0 : hrtimer_set_expires(timer, expires);
818 :
819 0 : if (!sigev_none)
820 : hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
821 0 : }
822 :
823 0 : static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
824 : {
825 0 : return hrtimer_try_to_cancel(&timr->it.real.timer);
826 : }
827 :
828 0 : static void common_timer_wait_running(struct k_itimer *timer)
829 : {
830 0 : hrtimer_cancel_wait_running(&timer->it.real.timer);
831 0 : }
832 :
833 : /*
834 : * On PREEMPT_RT this prevent priority inversion against softirq kthread in
835 : * case it gets preempted while executing a timer callback. See comments in
836 : * hrtimer_cancel_wait_running. For PREEMPT_RT=n this just results in a
837 : * cpu_relax().
838 : */
839 0 : static struct k_itimer *timer_wait_running(struct k_itimer *timer,
840 : unsigned long *flags)
841 : {
842 0 : const struct k_clock *kc = READ_ONCE(timer->kclock);
843 0 : timer_t timer_id = READ_ONCE(timer->it_id);
844 :
845 : /* Prevent kfree(timer) after dropping the lock */
846 : rcu_read_lock();
847 0 : unlock_timer(timer, *flags);
848 :
849 : /*
850 : * kc->timer_wait_running() might drop RCU lock. So @timer
851 : * cannot be touched anymore after the function returns!
852 : */
853 0 : if (!WARN_ON_ONCE(!kc->timer_wait_running))
854 0 : kc->timer_wait_running(timer);
855 :
856 : rcu_read_unlock();
857 : /* Relock the timer. It might be not longer hashed. */
858 0 : return lock_timer(timer_id, flags);
859 : }
860 :
861 : /* Set a POSIX.1b interval timer. */
862 0 : int common_timer_set(struct k_itimer *timr, int flags,
863 : struct itimerspec64 *new_setting,
864 : struct itimerspec64 *old_setting)
865 : {
866 0 : const struct k_clock *kc = timr->kclock;
867 : bool sigev_none;
868 : ktime_t expires;
869 :
870 0 : if (old_setting)
871 0 : common_timer_get(timr, old_setting);
872 :
873 : /* Prevent rearming by clearing the interval */
874 0 : timr->it_interval = 0;
875 : /*
876 : * Careful here. On SMP systems the timer expiry function could be
877 : * active and spinning on timr->it_lock.
878 : */
879 0 : if (kc->timer_try_to_cancel(timr) < 0)
880 : return TIMER_RETRY;
881 :
882 0 : timr->it_active = 0;
883 0 : timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
884 : ~REQUEUE_PENDING;
885 0 : timr->it_overrun_last = 0;
886 :
887 : /* Switch off the timer when it_value is zero */
888 0 : if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
889 : return 0;
890 :
891 0 : timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
892 0 : expires = timespec64_to_ktime(new_setting->it_value);
893 0 : if (flags & TIMER_ABSTIME)
894 : expires = timens_ktime_to_host(timr->it_clock, expires);
895 0 : sigev_none = timr->it_sigev_notify == SIGEV_NONE;
896 :
897 0 : kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
898 0 : timr->it_active = !sigev_none;
899 0 : return 0;
900 : }
901 :
902 0 : static int do_timer_settime(timer_t timer_id, int tmr_flags,
903 : struct itimerspec64 *new_spec64,
904 : struct itimerspec64 *old_spec64)
905 : {
906 : const struct k_clock *kc;
907 : struct k_itimer *timr;
908 : unsigned long flags;
909 0 : int error = 0;
910 :
911 0 : if (!timespec64_valid(&new_spec64->it_interval) ||
912 0 : !timespec64_valid(&new_spec64->it_value))
913 : return -EINVAL;
914 :
915 0 : if (old_spec64)
916 0 : memset(old_spec64, 0, sizeof(*old_spec64));
917 :
918 0 : timr = lock_timer(timer_id, &flags);
919 : retry:
920 0 : if (!timr)
921 : return -EINVAL;
922 :
923 0 : kc = timr->kclock;
924 0 : if (WARN_ON_ONCE(!kc || !kc->timer_set))
925 : error = -EINVAL;
926 : else
927 0 : error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64);
928 :
929 0 : if (error == TIMER_RETRY) {
930 : // We already got the old time...
931 0 : old_spec64 = NULL;
932 : /* Unlocks and relocks the timer if it still exists */
933 0 : timr = timer_wait_running(timr, &flags);
934 0 : goto retry;
935 : }
936 0 : unlock_timer(timr, flags);
937 :
938 0 : return error;
939 : }
940 :
941 : /* Set a POSIX.1b interval timer */
942 0 : SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
943 : const struct __kernel_itimerspec __user *, new_setting,
944 : struct __kernel_itimerspec __user *, old_setting)
945 : {
946 : struct itimerspec64 new_spec, old_spec;
947 0 : struct itimerspec64 *rtn = old_setting ? &old_spec : NULL;
948 0 : int error = 0;
949 :
950 0 : if (!new_setting)
951 : return -EINVAL;
952 :
953 0 : if (get_itimerspec64(&new_spec, new_setting))
954 : return -EFAULT;
955 :
956 0 : error = do_timer_settime(timer_id, flags, &new_spec, rtn);
957 0 : if (!error && old_setting) {
958 0 : if (put_itimerspec64(&old_spec, old_setting))
959 0 : error = -EFAULT;
960 : }
961 0 : return error;
962 : }
963 :
964 : #ifdef CONFIG_COMPAT_32BIT_TIME
965 : SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
966 : struct old_itimerspec32 __user *, new,
967 : struct old_itimerspec32 __user *, old)
968 : {
969 : struct itimerspec64 new_spec, old_spec;
970 : struct itimerspec64 *rtn = old ? &old_spec : NULL;
971 : int error = 0;
972 :
973 : if (!new)
974 : return -EINVAL;
975 : if (get_old_itimerspec32(&new_spec, new))
976 : return -EFAULT;
977 :
978 : error = do_timer_settime(timer_id, flags, &new_spec, rtn);
979 : if (!error && old) {
980 : if (put_old_itimerspec32(&old_spec, old))
981 : error = -EFAULT;
982 : }
983 : return error;
984 : }
985 : #endif
986 :
987 0 : int common_timer_del(struct k_itimer *timer)
988 : {
989 0 : const struct k_clock *kc = timer->kclock;
990 :
991 0 : timer->it_interval = 0;
992 0 : if (kc->timer_try_to_cancel(timer) < 0)
993 : return TIMER_RETRY;
994 0 : timer->it_active = 0;
995 0 : return 0;
996 : }
997 :
998 0 : static inline int timer_delete_hook(struct k_itimer *timer)
999 : {
1000 0 : const struct k_clock *kc = timer->kclock;
1001 :
1002 0 : if (WARN_ON_ONCE(!kc || !kc->timer_del))
1003 : return -EINVAL;
1004 0 : return kc->timer_del(timer);
1005 : }
1006 :
1007 : /* Delete a POSIX.1b interval timer. */
1008 0 : SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
1009 : {
1010 : struct k_itimer *timer;
1011 : unsigned long flags;
1012 :
1013 0 : timer = lock_timer(timer_id, &flags);
1014 :
1015 : retry_delete:
1016 0 : if (!timer)
1017 : return -EINVAL;
1018 :
1019 0 : if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) {
1020 : /* Unlocks and relocks the timer if it still exists */
1021 0 : timer = timer_wait_running(timer, &flags);
1022 0 : goto retry_delete;
1023 : }
1024 :
1025 0 : spin_lock(¤t->sighand->siglock);
1026 0 : list_del(&timer->list);
1027 0 : spin_unlock(¤t->sighand->siglock);
1028 : /*
1029 : * This keeps any tasks waiting on the spin lock from thinking
1030 : * they got something (see the lock code above).
1031 : */
1032 0 : timer->it_signal = NULL;
1033 :
1034 0 : unlock_timer(timer, flags);
1035 0 : release_posix_timer(timer, IT_ID_SET);
1036 0 : return 0;
1037 : }
1038 :
1039 : /*
1040 : * return timer owned by the process, used by exit_itimers
1041 : */
1042 0 : static void itimer_delete(struct k_itimer *timer)
1043 : {
1044 : retry_delete:
1045 0 : spin_lock_irq(&timer->it_lock);
1046 :
1047 0 : if (timer_delete_hook(timer) == TIMER_RETRY) {
1048 0 : spin_unlock_irq(&timer->it_lock);
1049 : goto retry_delete;
1050 : }
1051 0 : list_del(&timer->list);
1052 :
1053 0 : spin_unlock_irq(&timer->it_lock);
1054 0 : release_posix_timer(timer, IT_ID_SET);
1055 0 : }
1056 :
1057 : /*
1058 : * This is called by do_exit or de_thread, only when nobody else can
1059 : * modify the signal->posix_timers list. Yet we need sighand->siglock
1060 : * to prevent the race with /proc/pid/timers.
1061 : */
1062 367 : void exit_itimers(struct task_struct *tsk)
1063 : {
1064 : struct list_head timers;
1065 : struct k_itimer *tmr;
1066 :
1067 734 : if (list_empty(&tsk->signal->posix_timers))
1068 367 : return;
1069 :
1070 0 : spin_lock_irq(&tsk->sighand->siglock);
1071 0 : list_replace_init(&tsk->signal->posix_timers, &timers);
1072 0 : spin_unlock_irq(&tsk->sighand->siglock);
1073 :
1074 0 : while (!list_empty(&timers)) {
1075 0 : tmr = list_first_entry(&timers, struct k_itimer, list);
1076 0 : itimer_delete(tmr);
1077 : }
1078 : }
1079 :
1080 0 : SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
1081 : const struct __kernel_timespec __user *, tp)
1082 : {
1083 0 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1084 : struct timespec64 new_tp;
1085 :
1086 0 : if (!kc || !kc->clock_set)
1087 : return -EINVAL;
1088 :
1089 0 : if (get_timespec64(&new_tp, tp))
1090 : return -EFAULT;
1091 :
1092 0 : return kc->clock_set(which_clock, &new_tp);
1093 : }
1094 :
1095 0 : SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
1096 : struct __kernel_timespec __user *, tp)
1097 : {
1098 0 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1099 : struct timespec64 kernel_tp;
1100 : int error;
1101 :
1102 0 : if (!kc)
1103 : return -EINVAL;
1104 :
1105 0 : error = kc->clock_get_timespec(which_clock, &kernel_tp);
1106 :
1107 0 : if (!error && put_timespec64(&kernel_tp, tp))
1108 0 : error = -EFAULT;
1109 :
1110 0 : return error;
1111 : }
1112 :
1113 0 : int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
1114 : {
1115 0 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1116 :
1117 0 : if (!kc)
1118 : return -EINVAL;
1119 0 : if (!kc->clock_adj)
1120 : return -EOPNOTSUPP;
1121 :
1122 0 : return kc->clock_adj(which_clock, ktx);
1123 : }
1124 :
1125 0 : SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1126 : struct __kernel_timex __user *, utx)
1127 : {
1128 : struct __kernel_timex ktx;
1129 : int err;
1130 :
1131 0 : if (copy_from_user(&ktx, utx, sizeof(ktx)))
1132 : return -EFAULT;
1133 :
1134 0 : err = do_clock_adjtime(which_clock, &ktx);
1135 :
1136 0 : if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1137 : return -EFAULT;
1138 :
1139 0 : return err;
1140 : }
1141 :
1142 0 : SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1143 : struct __kernel_timespec __user *, tp)
1144 : {
1145 0 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1146 : struct timespec64 rtn_tp;
1147 : int error;
1148 :
1149 0 : if (!kc)
1150 : return -EINVAL;
1151 :
1152 0 : error = kc->clock_getres(which_clock, &rtn_tp);
1153 :
1154 0 : if (!error && tp && put_timespec64(&rtn_tp, tp))
1155 0 : error = -EFAULT;
1156 :
1157 0 : return error;
1158 : }
1159 :
1160 : #ifdef CONFIG_COMPAT_32BIT_TIME
1161 :
1162 : SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
1163 : struct old_timespec32 __user *, tp)
1164 : {
1165 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1166 : struct timespec64 ts;
1167 :
1168 : if (!kc || !kc->clock_set)
1169 : return -EINVAL;
1170 :
1171 : if (get_old_timespec32(&ts, tp))
1172 : return -EFAULT;
1173 :
1174 : return kc->clock_set(which_clock, &ts);
1175 : }
1176 :
1177 : SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
1178 : struct old_timespec32 __user *, tp)
1179 : {
1180 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1181 : struct timespec64 ts;
1182 : int err;
1183 :
1184 : if (!kc)
1185 : return -EINVAL;
1186 :
1187 : err = kc->clock_get_timespec(which_clock, &ts);
1188 :
1189 : if (!err && put_old_timespec32(&ts, tp))
1190 : err = -EFAULT;
1191 :
1192 : return err;
1193 : }
1194 :
1195 : SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
1196 : struct old_timex32 __user *, utp)
1197 : {
1198 : struct __kernel_timex ktx;
1199 : int err;
1200 :
1201 : err = get_old_timex32(&ktx, utp);
1202 : if (err)
1203 : return err;
1204 :
1205 : err = do_clock_adjtime(which_clock, &ktx);
1206 :
1207 : if (err >= 0 && put_old_timex32(utp, &ktx))
1208 : return -EFAULT;
1209 :
1210 : return err;
1211 : }
1212 :
1213 : SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
1214 : struct old_timespec32 __user *, tp)
1215 : {
1216 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1217 : struct timespec64 ts;
1218 : int err;
1219 :
1220 : if (!kc)
1221 : return -EINVAL;
1222 :
1223 : err = kc->clock_getres(which_clock, &ts);
1224 : if (!err && tp && put_old_timespec32(&ts, tp))
1225 : return -EFAULT;
1226 :
1227 : return err;
1228 : }
1229 :
1230 : #endif
1231 :
1232 : /*
1233 : * nanosleep for monotonic and realtime clocks
1234 : */
1235 0 : static int common_nsleep(const clockid_t which_clock, int flags,
1236 : const struct timespec64 *rqtp)
1237 : {
1238 0 : ktime_t texp = timespec64_to_ktime(*rqtp);
1239 :
1240 0 : return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1241 : HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1242 : which_clock);
1243 : }
1244 :
1245 0 : static int common_nsleep_timens(const clockid_t which_clock, int flags,
1246 : const struct timespec64 *rqtp)
1247 : {
1248 0 : ktime_t texp = timespec64_to_ktime(*rqtp);
1249 :
1250 0 : if (flags & TIMER_ABSTIME)
1251 : texp = timens_ktime_to_host(which_clock, texp);
1252 :
1253 0 : return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1254 : HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1255 : which_clock);
1256 : }
1257 :
1258 0 : SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1259 : const struct __kernel_timespec __user *, rqtp,
1260 : struct __kernel_timespec __user *, rmtp)
1261 : {
1262 0 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1263 : struct timespec64 t;
1264 :
1265 0 : if (!kc)
1266 : return -EINVAL;
1267 0 : if (!kc->nsleep)
1268 : return -EOPNOTSUPP;
1269 :
1270 0 : if (get_timespec64(&t, rqtp))
1271 : return -EFAULT;
1272 :
1273 0 : if (!timespec64_valid(&t))
1274 : return -EINVAL;
1275 0 : if (flags & TIMER_ABSTIME)
1276 0 : rmtp = NULL;
1277 0 : current->restart_block.fn = do_no_restart_syscall;
1278 0 : current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1279 0 : current->restart_block.nanosleep.rmtp = rmtp;
1280 :
1281 0 : return kc->nsleep(which_clock, flags, &t);
1282 : }
1283 :
1284 : #ifdef CONFIG_COMPAT_32BIT_TIME
1285 :
1286 : SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
1287 : struct old_timespec32 __user *, rqtp,
1288 : struct old_timespec32 __user *, rmtp)
1289 : {
1290 : const struct k_clock *kc = clockid_to_kclock(which_clock);
1291 : struct timespec64 t;
1292 :
1293 : if (!kc)
1294 : return -EINVAL;
1295 : if (!kc->nsleep)
1296 : return -EOPNOTSUPP;
1297 :
1298 : if (get_old_timespec32(&t, rqtp))
1299 : return -EFAULT;
1300 :
1301 : if (!timespec64_valid(&t))
1302 : return -EINVAL;
1303 : if (flags & TIMER_ABSTIME)
1304 : rmtp = NULL;
1305 : current->restart_block.fn = do_no_restart_syscall;
1306 : current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1307 : current->restart_block.nanosleep.compat_rmtp = rmtp;
1308 :
1309 : return kc->nsleep(which_clock, flags, &t);
1310 : }
1311 :
1312 : #endif
1313 :
1314 : static const struct k_clock clock_realtime = {
1315 : .clock_getres = posix_get_hrtimer_res,
1316 : .clock_get_timespec = posix_get_realtime_timespec,
1317 : .clock_get_ktime = posix_get_realtime_ktime,
1318 : .clock_set = posix_clock_realtime_set,
1319 : .clock_adj = posix_clock_realtime_adj,
1320 : .nsleep = common_nsleep,
1321 : .timer_create = common_timer_create,
1322 : .timer_set = common_timer_set,
1323 : .timer_get = common_timer_get,
1324 : .timer_del = common_timer_del,
1325 : .timer_rearm = common_hrtimer_rearm,
1326 : .timer_forward = common_hrtimer_forward,
1327 : .timer_remaining = common_hrtimer_remaining,
1328 : .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1329 : .timer_wait_running = common_timer_wait_running,
1330 : .timer_arm = common_hrtimer_arm,
1331 : };
1332 :
1333 : static const struct k_clock clock_monotonic = {
1334 : .clock_getres = posix_get_hrtimer_res,
1335 : .clock_get_timespec = posix_get_monotonic_timespec,
1336 : .clock_get_ktime = posix_get_monotonic_ktime,
1337 : .nsleep = common_nsleep_timens,
1338 : .timer_create = common_timer_create,
1339 : .timer_set = common_timer_set,
1340 : .timer_get = common_timer_get,
1341 : .timer_del = common_timer_del,
1342 : .timer_rearm = common_hrtimer_rearm,
1343 : .timer_forward = common_hrtimer_forward,
1344 : .timer_remaining = common_hrtimer_remaining,
1345 : .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1346 : .timer_wait_running = common_timer_wait_running,
1347 : .timer_arm = common_hrtimer_arm,
1348 : };
1349 :
1350 : static const struct k_clock clock_monotonic_raw = {
1351 : .clock_getres = posix_get_hrtimer_res,
1352 : .clock_get_timespec = posix_get_monotonic_raw,
1353 : };
1354 :
1355 : static const struct k_clock clock_realtime_coarse = {
1356 : .clock_getres = posix_get_coarse_res,
1357 : .clock_get_timespec = posix_get_realtime_coarse,
1358 : };
1359 :
1360 : static const struct k_clock clock_monotonic_coarse = {
1361 : .clock_getres = posix_get_coarse_res,
1362 : .clock_get_timespec = posix_get_monotonic_coarse,
1363 : };
1364 :
1365 : static const struct k_clock clock_tai = {
1366 : .clock_getres = posix_get_hrtimer_res,
1367 : .clock_get_ktime = posix_get_tai_ktime,
1368 : .clock_get_timespec = posix_get_tai_timespec,
1369 : .nsleep = common_nsleep,
1370 : .timer_create = common_timer_create,
1371 : .timer_set = common_timer_set,
1372 : .timer_get = common_timer_get,
1373 : .timer_del = common_timer_del,
1374 : .timer_rearm = common_hrtimer_rearm,
1375 : .timer_forward = common_hrtimer_forward,
1376 : .timer_remaining = common_hrtimer_remaining,
1377 : .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1378 : .timer_wait_running = common_timer_wait_running,
1379 : .timer_arm = common_hrtimer_arm,
1380 : };
1381 :
1382 : static const struct k_clock clock_boottime = {
1383 : .clock_getres = posix_get_hrtimer_res,
1384 : .clock_get_ktime = posix_get_boottime_ktime,
1385 : .clock_get_timespec = posix_get_boottime_timespec,
1386 : .nsleep = common_nsleep_timens,
1387 : .timer_create = common_timer_create,
1388 : .timer_set = common_timer_set,
1389 : .timer_get = common_timer_get,
1390 : .timer_del = common_timer_del,
1391 : .timer_rearm = common_hrtimer_rearm,
1392 : .timer_forward = common_hrtimer_forward,
1393 : .timer_remaining = common_hrtimer_remaining,
1394 : .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1395 : .timer_wait_running = common_timer_wait_running,
1396 : .timer_arm = common_hrtimer_arm,
1397 : };
1398 :
1399 : static const struct k_clock * const posix_clocks[] = {
1400 : [CLOCK_REALTIME] = &clock_realtime,
1401 : [CLOCK_MONOTONIC] = &clock_monotonic,
1402 : [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1403 : [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1404 : [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1405 : [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1406 : [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1407 : [CLOCK_BOOTTIME] = &clock_boottime,
1408 : [CLOCK_REALTIME_ALARM] = &alarm_clock,
1409 : [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1410 : [CLOCK_TAI] = &clock_tai,
1411 : };
1412 :
1413 : static const struct k_clock *clockid_to_kclock(const clockid_t id)
1414 : {
1415 0 : clockid_t idx = id;
1416 :
1417 0 : if (id < 0) {
1418 0 : return (id & CLOCKFD_MASK) == CLOCKFD ?
1419 0 : &clock_posix_dynamic : &clock_posix_cpu;
1420 : }
1421 :
1422 0 : if (id >= ARRAY_SIZE(posix_clocks))
1423 : return NULL;
1424 :
1425 0 : return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];
1426 : }
|