Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * linux/kernel/printk.c
4 : *
5 : * Copyright (C) 1991, 1992 Linus Torvalds
6 : *
7 : * Modified to make sys_syslog() more flexible: added commands to
8 : * return the last 4k of kernel messages, regardless of whether
9 : * they've been read or not. Added option to suppress kernel printk's
10 : * to the console. Added hook for sending the console messages
11 : * elsewhere, in preparation for a serial line console (someday).
12 : * Ted Ts'o, 2/11/93.
13 : * Modified for sysctl support, 1/8/97, Chris Horn.
14 : * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15 : * manfred@colorfullife.com
16 : * Rewrote bits to get rid of console_lock
17 : * 01Mar01 Andrew Morton
18 : */
19 :
20 : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 :
22 : #include <linux/kernel.h>
23 : #include <linux/mm.h>
24 : #include <linux/tty.h>
25 : #include <linux/tty_driver.h>
26 : #include <linux/console.h>
27 : #include <linux/init.h>
28 : #include <linux/jiffies.h>
29 : #include <linux/nmi.h>
30 : #include <linux/module.h>
31 : #include <linux/moduleparam.h>
32 : #include <linux/delay.h>
33 : #include <linux/smp.h>
34 : #include <linux/security.h>
35 : #include <linux/memblock.h>
36 : #include <linux/syscalls.h>
37 : #include <linux/crash_core.h>
38 : #include <linux/ratelimit.h>
39 : #include <linux/kmsg_dump.h>
40 : #include <linux/syslog.h>
41 : #include <linux/cpu.h>
42 : #include <linux/rculist.h>
43 : #include <linux/poll.h>
44 : #include <linux/irq_work.h>
45 : #include <linux/ctype.h>
46 : #include <linux/uio.h>
47 : #include <linux/sched/clock.h>
48 : #include <linux/sched/debug.h>
49 : #include <linux/sched/task_stack.h>
50 :
51 : #include <linux/uaccess.h>
52 : #include <asm/sections.h>
53 :
54 : #include <trace/events/initcall.h>
55 : #define CREATE_TRACE_POINTS
56 : #include <trace/events/printk.h>
57 :
58 : #include "printk_ringbuffer.h"
59 : #include "console_cmdline.h"
60 : #include "braille.h"
61 : #include "internal.h"
62 :
63 : int console_printk[4] = {
64 : CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
65 : MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
66 : CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
67 : CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
68 : };
69 : EXPORT_SYMBOL_GPL(console_printk);
70 :
71 : atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
72 : EXPORT_SYMBOL(ignore_console_lock_warning);
73 :
74 : EXPORT_TRACEPOINT_SYMBOL_GPL(console);
75 :
76 : /*
77 : * Low level drivers may need that to know if they can schedule in
78 : * their unblank() callback or not. So let's export it.
79 : */
80 : int oops_in_progress;
81 : EXPORT_SYMBOL(oops_in_progress);
82 :
83 : /*
84 : * console_mutex protects console_list updates and console->flags updates.
85 : * The flags are synchronized only for consoles that are registered, i.e.
86 : * accessible via the console list.
87 : */
88 : static DEFINE_MUTEX(console_mutex);
89 :
90 : /*
91 : * console_sem protects updates to console->seq and console_suspended,
92 : * and also provides serialization for console printing.
93 : */
94 : static DEFINE_SEMAPHORE(console_sem, 1);
95 : HLIST_HEAD(console_list);
96 : EXPORT_SYMBOL_GPL(console_list);
97 : DEFINE_STATIC_SRCU(console_srcu);
98 :
99 : /*
100 : * System may need to suppress printk message under certain
101 : * circumstances, like after kernel panic happens.
102 : */
103 : int __read_mostly suppress_printk;
104 :
105 : /*
106 : * During panic, heavy printk by other CPUs can delay the
107 : * panic and risk deadlock on console resources.
108 : */
109 : static int __read_mostly suppress_panic_printk;
110 :
111 : #ifdef CONFIG_LOCKDEP
112 : static struct lockdep_map console_lock_dep_map = {
113 : .name = "console_lock"
114 : };
115 :
116 : void lockdep_assert_console_list_lock_held(void)
117 : {
118 : lockdep_assert_held(&console_mutex);
119 : }
120 : EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
121 : #endif
122 :
123 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
124 : bool console_srcu_read_lock_is_held(void)
125 : {
126 : return srcu_read_lock_held(&console_srcu);
127 : }
128 : EXPORT_SYMBOL(console_srcu_read_lock_is_held);
129 : #endif
130 :
131 : enum devkmsg_log_bits {
132 : __DEVKMSG_LOG_BIT_ON = 0,
133 : __DEVKMSG_LOG_BIT_OFF,
134 : __DEVKMSG_LOG_BIT_LOCK,
135 : };
136 :
137 : enum devkmsg_log_masks {
138 : DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
139 : DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
140 : DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
141 : };
142 :
143 : /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
144 : #define DEVKMSG_LOG_MASK_DEFAULT 0
145 :
146 : static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
147 :
148 0 : static int __control_devkmsg(char *str)
149 : {
150 : size_t len;
151 :
152 0 : if (!str)
153 : return -EINVAL;
154 :
155 0 : len = str_has_prefix(str, "on");
156 0 : if (len) {
157 0 : devkmsg_log = DEVKMSG_LOG_MASK_ON;
158 0 : return len;
159 : }
160 :
161 0 : len = str_has_prefix(str, "off");
162 0 : if (len) {
163 0 : devkmsg_log = DEVKMSG_LOG_MASK_OFF;
164 0 : return len;
165 : }
166 :
167 0 : len = str_has_prefix(str, "ratelimit");
168 0 : if (len) {
169 0 : devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
170 0 : return len;
171 : }
172 :
173 : return -EINVAL;
174 : }
175 :
176 0 : static int __init control_devkmsg(char *str)
177 : {
178 0 : if (__control_devkmsg(str) < 0) {
179 0 : pr_warn("printk.devkmsg: bad option string '%s'\n", str);
180 0 : return 1;
181 : }
182 :
183 : /*
184 : * Set sysctl string accordingly:
185 : */
186 0 : if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
187 0 : strcpy(devkmsg_log_str, "on");
188 0 : else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
189 0 : strcpy(devkmsg_log_str, "off");
190 : /* else "ratelimit" which is set by default. */
191 :
192 : /*
193 : * Sysctl cannot change it anymore. The kernel command line setting of
194 : * this parameter is to force the setting to be permanent throughout the
195 : * runtime of the system. This is a precation measure against userspace
196 : * trying to be a smarta** and attempting to change it up on us.
197 : */
198 0 : devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
199 :
200 0 : return 1;
201 : }
202 : __setup("printk.devkmsg=", control_devkmsg);
203 :
204 : char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
205 : #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
206 0 : int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
207 : void *buffer, size_t *lenp, loff_t *ppos)
208 : {
209 : char old_str[DEVKMSG_STR_MAX_SIZE];
210 : unsigned int old;
211 : int err;
212 :
213 0 : if (write) {
214 0 : if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
215 : return -EINVAL;
216 :
217 0 : old = devkmsg_log;
218 0 : strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
219 : }
220 :
221 0 : err = proc_dostring(table, write, buffer, lenp, ppos);
222 0 : if (err)
223 : return err;
224 :
225 0 : if (write) {
226 0 : err = __control_devkmsg(devkmsg_log_str);
227 :
228 : /*
229 : * Do not accept an unknown string OR a known string with
230 : * trailing crap...
231 : */
232 0 : if (err < 0 || (err + 1 != *lenp)) {
233 :
234 : /* ... and restore old setting. */
235 0 : devkmsg_log = old;
236 0 : strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
237 :
238 0 : return -EINVAL;
239 : }
240 : }
241 :
242 : return 0;
243 : }
244 : #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
245 :
246 : /**
247 : * console_list_lock - Lock the console list
248 : *
249 : * For console list or console->flags updates
250 : */
251 0 : void console_list_lock(void)
252 : {
253 : /*
254 : * In unregister_console() and console_force_preferred_locked(),
255 : * synchronize_srcu() is called with the console_list_lock held.
256 : * Therefore it is not allowed that the console_list_lock is taken
257 : * with the srcu_lock held.
258 : *
259 : * Detecting if this context is really in the read-side critical
260 : * section is only possible if the appropriate debug options are
261 : * enabled.
262 : */
263 4 : WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
264 : srcu_read_lock_held(&console_srcu));
265 :
266 4 : mutex_lock(&console_mutex);
267 0 : }
268 : EXPORT_SYMBOL(console_list_lock);
269 :
270 : /**
271 : * console_list_unlock - Unlock the console list
272 : *
273 : * Counterpart to console_list_lock()
274 : */
275 0 : void console_list_unlock(void)
276 : {
277 4 : mutex_unlock(&console_mutex);
278 0 : }
279 : EXPORT_SYMBOL(console_list_unlock);
280 :
281 : /**
282 : * console_srcu_read_lock - Register a new reader for the
283 : * SRCU-protected console list
284 : *
285 : * Use for_each_console_srcu() to iterate the console list
286 : *
287 : * Context: Any context.
288 : * Return: A cookie to pass to console_srcu_read_unlock().
289 : */
290 0 : int console_srcu_read_lock(void)
291 : {
292 1865 : return srcu_read_lock_nmisafe(&console_srcu);
293 : }
294 : EXPORT_SYMBOL(console_srcu_read_lock);
295 :
296 : /**
297 : * console_srcu_read_unlock - Unregister an old reader from
298 : * the SRCU-protected console list
299 : * @cookie: cookie returned from console_srcu_read_lock()
300 : *
301 : * Counterpart to console_srcu_read_lock()
302 : */
303 0 : void console_srcu_read_unlock(int cookie)
304 : {
305 1865 : srcu_read_unlock_nmisafe(&console_srcu, cookie);
306 0 : }
307 : EXPORT_SYMBOL(console_srcu_read_unlock);
308 :
309 : /*
310 : * Helper macros to handle lockdep when locking/unlocking console_sem. We use
311 : * macros instead of functions so that _RET_IP_ contains useful information.
312 : */
313 : #define down_console_sem() do { \
314 : down(&console_sem);\
315 : mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
316 : } while (0)
317 :
318 964 : static int __down_trylock_console_sem(unsigned long ip)
319 : {
320 : int lock_failed;
321 : unsigned long flags;
322 :
323 : /*
324 : * Here and in __up_console_sem() we need to be in safe mode,
325 : * because spindump/WARN/etc from under console ->lock will
326 : * deadlock in printk()->down_trylock_console_sem() otherwise.
327 : */
328 964 : printk_safe_enter_irqsave(flags);
329 964 : lock_failed = down_trylock(&console_sem);
330 1928 : printk_safe_exit_irqrestore(flags);
331 :
332 964 : if (lock_failed)
333 : return 1;
334 : mutex_acquire(&console_lock_dep_map, 0, 1, ip);
335 : return 0;
336 : }
337 : #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
338 :
339 964 : static void __up_console_sem(unsigned long ip)
340 : {
341 : unsigned long flags;
342 :
343 : mutex_release(&console_lock_dep_map, ip);
344 :
345 964 : printk_safe_enter_irqsave(flags);
346 964 : up(&console_sem);
347 1928 : printk_safe_exit_irqrestore(flags);
348 964 : }
349 : #define up_console_sem() __up_console_sem(_RET_IP_)
350 :
351 : static bool panic_in_progress(void)
352 : {
353 1693 : return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
354 : }
355 :
356 : /*
357 : * This is used for debugging the mess that is the VT code by
358 : * keeping track if we have the console semaphore held. It's
359 : * definitely not the perfect debug tool (we don't know if _WE_
360 : * hold it and are racing, but it helps tracking those weird code
361 : * paths in the console code where we end up in places I want
362 : * locked without the console semaphore held).
363 : */
364 : static int console_locked, console_suspended;
365 :
366 : /*
367 : * Array of consoles built from command line options (console=)
368 : */
369 :
370 : #define MAX_CMDLINECONSOLES 8
371 :
372 : static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
373 :
374 : static int preferred_console = -1;
375 : int console_set_on_cmdline;
376 : EXPORT_SYMBOL(console_set_on_cmdline);
377 :
378 : /* Flag: console code may call schedule() */
379 : static int console_may_schedule;
380 :
381 : enum con_msg_format_flags {
382 : MSG_FORMAT_DEFAULT = 0,
383 : MSG_FORMAT_SYSLOG = (1 << 0),
384 : };
385 :
386 : static int console_msg_format = MSG_FORMAT_DEFAULT;
387 :
388 : /*
389 : * The printk log buffer consists of a sequenced collection of records, each
390 : * containing variable length message text. Every record also contains its
391 : * own meta-data (@info).
392 : *
393 : * Every record meta-data carries the timestamp in microseconds, as well as
394 : * the standard userspace syslog level and syslog facility. The usual kernel
395 : * messages use LOG_KERN; userspace-injected messages always carry a matching
396 : * syslog facility, by default LOG_USER. The origin of every message can be
397 : * reliably determined that way.
398 : *
399 : * The human readable log message of a record is available in @text, the
400 : * length of the message text in @text_len. The stored message is not
401 : * terminated.
402 : *
403 : * Optionally, a record can carry a dictionary of properties (key/value
404 : * pairs), to provide userspace with a machine-readable message context.
405 : *
406 : * Examples for well-defined, commonly used property names are:
407 : * DEVICE=b12:8 device identifier
408 : * b12:8 block dev_t
409 : * c127:3 char dev_t
410 : * n8 netdev ifindex
411 : * +sound:card0 subsystem:devname
412 : * SUBSYSTEM=pci driver-core subsystem name
413 : *
414 : * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
415 : * and values are terminated by a '\0' character.
416 : *
417 : * Example of record values:
418 : * record.text_buf = "it's a line" (unterminated)
419 : * record.info.seq = 56
420 : * record.info.ts_nsec = 36863
421 : * record.info.text_len = 11
422 : * record.info.facility = 0 (LOG_KERN)
423 : * record.info.flags = 0
424 : * record.info.level = 3 (LOG_ERR)
425 : * record.info.caller_id = 299 (task 299)
426 : * record.info.dev_info.subsystem = "pci" (terminated)
427 : * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated)
428 : *
429 : * The 'struct printk_info' buffer must never be directly exported to
430 : * userspace, it is a kernel-private implementation detail that might
431 : * need to be changed in the future, when the requirements change.
432 : *
433 : * /dev/kmsg exports the structured data in the following line format:
434 : * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
435 : *
436 : * Users of the export format should ignore possible additional values
437 : * separated by ',', and find the message after the ';' character.
438 : *
439 : * The optional key/value pairs are attached as continuation lines starting
440 : * with a space character and terminated by a newline. All possible
441 : * non-prinatable characters are escaped in the "\xff" notation.
442 : */
443 :
444 : /* syslog_lock protects syslog_* variables and write access to clear_seq. */
445 : static DEFINE_MUTEX(syslog_lock);
446 :
447 : #ifdef CONFIG_PRINTK
448 : DECLARE_WAIT_QUEUE_HEAD(log_wait);
449 : /* All 3 protected by @syslog_lock. */
450 : /* the next printk record to read by syslog(READ) or /proc/kmsg */
451 : static u64 syslog_seq;
452 : static size_t syslog_partial;
453 : static bool syslog_time;
454 :
455 : struct latched_seq {
456 : seqcount_latch_t latch;
457 : u64 val[2];
458 : };
459 :
460 : /*
461 : * The next printk record to read after the last 'clear' command. There are
462 : * two copies (updated with seqcount_latch) so that reads can locklessly
463 : * access a valid value. Writers are synchronized by @syslog_lock.
464 : */
465 : static struct latched_seq clear_seq = {
466 : .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
467 : .val[0] = 0,
468 : .val[1] = 0,
469 : };
470 :
471 : #define LOG_LEVEL(v) ((v) & 0x07)
472 : #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
473 :
474 : /* record buffer */
475 : #define LOG_ALIGN __alignof__(unsigned long)
476 : #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
477 : #define LOG_BUF_LEN_MAX (u32)(1 << 31)
478 : static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
479 : static char *log_buf = __log_buf;
480 : static u32 log_buf_len = __LOG_BUF_LEN;
481 :
482 : /*
483 : * Define the average message size. This only affects the number of
484 : * descriptors that will be available. Underestimating is better than
485 : * overestimating (too many available descriptors is better than not enough).
486 : */
487 : #define PRB_AVGBITS 5 /* 32 character average length */
488 :
489 : #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
490 : #error CONFIG_LOG_BUF_SHIFT value too small.
491 : #endif
492 : _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
493 : PRB_AVGBITS, &__log_buf[0]);
494 :
495 : static struct printk_ringbuffer printk_rb_dynamic;
496 :
497 : static struct printk_ringbuffer *prb = &printk_rb_static;
498 :
499 : /*
500 : * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
501 : * per_cpu_areas are initialised. This variable is set to true when
502 : * it's safe to access per-CPU data.
503 : */
504 : static bool __printk_percpu_data_ready __ro_after_init;
505 :
506 0 : bool printk_percpu_data_ready(void)
507 : {
508 1928 : return __printk_percpu_data_ready;
509 : }
510 :
511 : /* Must be called under syslog_lock. */
512 : static void latched_seq_write(struct latched_seq *ls, u64 val)
513 : {
514 0 : raw_write_seqcount_latch(&ls->latch);
515 0 : ls->val[0] = val;
516 0 : raw_write_seqcount_latch(&ls->latch);
517 0 : ls->val[1] = val;
518 : }
519 :
520 : /* Can be called from any context. */
521 : static u64 latched_seq_read_nolock(struct latched_seq *ls)
522 : {
523 : unsigned int seq;
524 : unsigned int idx;
525 : u64 val;
526 :
527 : do {
528 0 : seq = raw_read_seqcount_latch(&ls->latch);
529 0 : idx = seq & 0x1;
530 0 : val = ls->val[idx];
531 0 : } while (read_seqcount_latch_retry(&ls->latch, seq));
532 :
533 : return val;
534 : }
535 :
536 : /* Return log buffer address */
537 0 : char *log_buf_addr_get(void)
538 : {
539 0 : return log_buf;
540 : }
541 :
542 : /* Return log buffer size */
543 0 : u32 log_buf_len_get(void)
544 : {
545 0 : return log_buf_len;
546 : }
547 :
548 : /*
549 : * Define how much of the log buffer we could take at maximum. The value
550 : * must be greater than two. Note that only half of the buffer is available
551 : * when the index points to the middle.
552 : */
553 : #define MAX_LOG_TAKE_PART 4
554 : static const char trunc_msg[] = "<truncated>";
555 :
556 0 : static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
557 : {
558 : /*
559 : * The message should not take the whole buffer. Otherwise, it might
560 : * get removed too soon.
561 : */
562 0 : u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
563 :
564 0 : if (*text_len > max_text_len)
565 0 : *text_len = max_text_len;
566 :
567 : /* enable the warning message (if there is room) */
568 0 : *trunc_msg_len = strlen(trunc_msg);
569 0 : if (*text_len >= *trunc_msg_len)
570 0 : *text_len -= *trunc_msg_len;
571 : else
572 0 : *trunc_msg_len = 0;
573 0 : }
574 :
575 : int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
576 :
577 : static int syslog_action_restricted(int type)
578 : {
579 0 : if (dmesg_restrict)
580 : return 1;
581 : /*
582 : * Unless restricted, we allow "read all" and "get buffer size"
583 : * for everybody.
584 : */
585 0 : return type != SYSLOG_ACTION_READ_ALL &&
586 0 : type != SYSLOG_ACTION_SIZE_BUFFER;
587 : }
588 :
589 0 : static int check_syslog_permissions(int type, int source)
590 : {
591 : /*
592 : * If this is from /proc/kmsg and we've already opened it, then we've
593 : * already done the capabilities checks at open time.
594 : */
595 0 : if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
596 : goto ok;
597 :
598 0 : if (syslog_action_restricted(type)) {
599 0 : if (capable(CAP_SYSLOG))
600 : goto ok;
601 : /*
602 : * For historical reasons, accept CAP_SYS_ADMIN too, with
603 : * a warning.
604 : */
605 0 : if (capable(CAP_SYS_ADMIN)) {
606 0 : pr_warn_once("%s (%d): Attempt to access syslog with "
607 : "CAP_SYS_ADMIN but no CAP_SYSLOG "
608 : "(deprecated).\n",
609 : current->comm, task_pid_nr(current));
610 : goto ok;
611 : }
612 : return -EPERM;
613 : }
614 : ok:
615 : return security_syslog(type);
616 : }
617 :
618 : static void append_char(char **pp, char *e, char c)
619 : {
620 0 : if (*pp < e)
621 0 : *(*pp)++ = c;
622 : }
623 :
624 0 : static ssize_t info_print_ext_header(char *buf, size_t size,
625 : struct printk_info *info)
626 : {
627 0 : u64 ts_usec = info->ts_nsec;
628 : char caller[20];
629 : #ifdef CONFIG_PRINTK_CALLER
630 : u32 id = info->caller_id;
631 :
632 : snprintf(caller, sizeof(caller), ",caller=%c%u",
633 : id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
634 : #else
635 0 : caller[0] = '\0';
636 : #endif
637 :
638 0 : do_div(ts_usec, 1000);
639 :
640 0 : return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
641 0 : (info->facility << 3) | info->level, info->seq,
642 0 : ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
643 : }
644 :
645 0 : static ssize_t msg_add_ext_text(char *buf, size_t size,
646 : const char *text, size_t text_len,
647 : unsigned char endc)
648 : {
649 0 : char *p = buf, *e = buf + size;
650 : size_t i;
651 :
652 : /* escape non-printable characters */
653 0 : for (i = 0; i < text_len; i++) {
654 0 : unsigned char c = text[i];
655 :
656 0 : if (c < ' ' || c >= 127 || c == '\\')
657 0 : p += scnprintf(p, e - p, "\\x%02x", c);
658 : else
659 0 : append_char(&p, e, c);
660 : }
661 0 : append_char(&p, e, endc);
662 :
663 0 : return p - buf;
664 : }
665 :
666 0 : static ssize_t msg_add_dict_text(char *buf, size_t size,
667 : const char *key, const char *val)
668 : {
669 0 : size_t val_len = strlen(val);
670 : ssize_t len;
671 :
672 0 : if (!val_len)
673 : return 0;
674 :
675 0 : len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */
676 0 : len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
677 0 : len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
678 :
679 0 : return len;
680 : }
681 :
682 0 : static ssize_t msg_print_ext_body(char *buf, size_t size,
683 : char *text, size_t text_len,
684 : struct dev_printk_info *dev_info)
685 : {
686 : ssize_t len;
687 :
688 0 : len = msg_add_ext_text(buf, size, text, text_len, '\n');
689 :
690 0 : if (!dev_info)
691 : goto out;
692 :
693 0 : len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
694 0 : dev_info->subsystem);
695 0 : len += msg_add_dict_text(buf + len, size - len, "DEVICE",
696 0 : dev_info->device);
697 : out:
698 0 : return len;
699 : }
700 :
701 : static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
702 : bool is_extended, bool may_supress);
703 :
704 : /* /dev/kmsg - userspace message inject/listen interface */
705 : struct devkmsg_user {
706 : atomic64_t seq;
707 : struct ratelimit_state rs;
708 : struct mutex lock;
709 : struct printk_buffers pbufs;
710 : };
711 :
712 : static __printf(3, 4) __cold
713 0 : int devkmsg_emit(int facility, int level, const char *fmt, ...)
714 : {
715 : va_list args;
716 : int r;
717 :
718 0 : va_start(args, fmt);
719 0 : r = vprintk_emit(facility, level, NULL, fmt, args);
720 0 : va_end(args);
721 :
722 0 : return r;
723 : }
724 :
725 0 : static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
726 : {
727 : char *buf, *line;
728 0 : int level = default_message_loglevel;
729 0 : int facility = 1; /* LOG_USER */
730 0 : struct file *file = iocb->ki_filp;
731 0 : struct devkmsg_user *user = file->private_data;
732 0 : size_t len = iov_iter_count(from);
733 0 : ssize_t ret = len;
734 :
735 0 : if (len > PRINTKRB_RECORD_MAX)
736 : return -EINVAL;
737 :
738 : /* Ignore when user logging is disabled. */
739 0 : if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
740 : return len;
741 :
742 : /* Ratelimit when not explicitly enabled. */
743 0 : if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
744 0 : if (!___ratelimit(&user->rs, current->comm))
745 : return ret;
746 : }
747 :
748 0 : buf = kmalloc(len+1, GFP_KERNEL);
749 0 : if (buf == NULL)
750 : return -ENOMEM;
751 :
752 0 : buf[len] = '\0';
753 0 : if (!copy_from_iter_full(buf, len, from)) {
754 0 : kfree(buf);
755 0 : return -EFAULT;
756 : }
757 :
758 : /*
759 : * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
760 : * the decimal value represents 32bit, the lower 3 bit are the log
761 : * level, the rest are the log facility.
762 : *
763 : * If no prefix or no userspace facility is specified, we
764 : * enforce LOG_USER, to be able to reliably distinguish
765 : * kernel-generated messages from userspace-injected ones.
766 : */
767 0 : line = buf;
768 0 : if (line[0] == '<') {
769 0 : char *endp = NULL;
770 : unsigned int u;
771 :
772 0 : u = simple_strtoul(line + 1, &endp, 10);
773 0 : if (endp && endp[0] == '>') {
774 0 : level = LOG_LEVEL(u);
775 0 : if (LOG_FACILITY(u) != 0)
776 0 : facility = LOG_FACILITY(u);
777 0 : endp++;
778 0 : line = endp;
779 : }
780 : }
781 :
782 0 : devkmsg_emit(facility, level, "%s", line);
783 0 : kfree(buf);
784 0 : return ret;
785 : }
786 :
787 0 : static ssize_t devkmsg_read(struct file *file, char __user *buf,
788 : size_t count, loff_t *ppos)
789 : {
790 0 : struct devkmsg_user *user = file->private_data;
791 0 : char *outbuf = &user->pbufs.outbuf[0];
792 0 : struct printk_message pmsg = {
793 0 : .pbufs = &user->pbufs,
794 : };
795 : ssize_t ret;
796 :
797 0 : ret = mutex_lock_interruptible(&user->lock);
798 0 : if (ret)
799 : return ret;
800 :
801 0 : if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
802 0 : if (file->f_flags & O_NONBLOCK) {
803 : ret = -EAGAIN;
804 : goto out;
805 : }
806 :
807 : /*
808 : * Guarantee this task is visible on the waitqueue before
809 : * checking the wake condition.
810 : *
811 : * The full memory barrier within set_current_state() of
812 : * prepare_to_wait_event() pairs with the full memory barrier
813 : * within wq_has_sleeper().
814 : *
815 : * This pairs with __wake_up_klogd:A.
816 : */
817 0 : ret = wait_event_interruptible(log_wait,
818 : printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
819 : false)); /* LMM(devkmsg_read:A) */
820 0 : if (ret)
821 : goto out;
822 : }
823 :
824 0 : if (pmsg.dropped) {
825 : /* our last seen message is gone, return error and reset */
826 0 : atomic64_set(&user->seq, pmsg.seq);
827 0 : ret = -EPIPE;
828 0 : goto out;
829 : }
830 :
831 0 : atomic64_set(&user->seq, pmsg.seq + 1);
832 :
833 0 : if (pmsg.outbuf_len > count) {
834 : ret = -EINVAL;
835 : goto out;
836 : }
837 :
838 0 : if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
839 : ret = -EFAULT;
840 : goto out;
841 : }
842 0 : ret = pmsg.outbuf_len;
843 : out:
844 0 : mutex_unlock(&user->lock);
845 0 : return ret;
846 : }
847 :
848 : /*
849 : * Be careful when modifying this function!!!
850 : *
851 : * Only few operations are supported because the device works only with the
852 : * entire variable length messages (records). Non-standard values are
853 : * returned in the other cases and has been this way for quite some time.
854 : * User space applications might depend on this behavior.
855 : */
856 0 : static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
857 : {
858 0 : struct devkmsg_user *user = file->private_data;
859 0 : loff_t ret = 0;
860 :
861 0 : if (offset)
862 : return -ESPIPE;
863 :
864 0 : switch (whence) {
865 : case SEEK_SET:
866 : /* the first record */
867 0 : atomic64_set(&user->seq, prb_first_valid_seq(prb));
868 : break;
869 : case SEEK_DATA:
870 : /*
871 : * The first record after the last SYSLOG_ACTION_CLEAR,
872 : * like issued by 'dmesg -c'. Reading /dev/kmsg itself
873 : * changes no global state, and does not clear anything.
874 : */
875 0 : atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
876 : break;
877 : case SEEK_END:
878 : /* after the last record */
879 0 : atomic64_set(&user->seq, prb_next_seq(prb));
880 : break;
881 : default:
882 : ret = -EINVAL;
883 : }
884 : return ret;
885 : }
886 :
887 0 : static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
888 : {
889 0 : struct devkmsg_user *user = file->private_data;
890 : struct printk_info info;
891 0 : __poll_t ret = 0;
892 :
893 0 : poll_wait(file, &log_wait, wait);
894 :
895 0 : if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
896 : /* return error when data has vanished underneath us */
897 0 : if (info.seq != atomic64_read(&user->seq))
898 : ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
899 : else
900 0 : ret = EPOLLIN|EPOLLRDNORM;
901 : }
902 :
903 0 : return ret;
904 : }
905 :
906 0 : static int devkmsg_open(struct inode *inode, struct file *file)
907 : {
908 : struct devkmsg_user *user;
909 : int err;
910 :
911 0 : if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
912 : return -EPERM;
913 :
914 : /* write-only does not need any file context */
915 0 : if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
916 0 : err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
917 : SYSLOG_FROM_READER);
918 0 : if (err)
919 : return err;
920 : }
921 :
922 0 : user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
923 0 : if (!user)
924 : return -ENOMEM;
925 :
926 0 : ratelimit_default_init(&user->rs);
927 0 : ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
928 :
929 0 : mutex_init(&user->lock);
930 :
931 0 : atomic64_set(&user->seq, prb_first_valid_seq(prb));
932 :
933 0 : file->private_data = user;
934 0 : return 0;
935 : }
936 :
937 0 : static int devkmsg_release(struct inode *inode, struct file *file)
938 : {
939 0 : struct devkmsg_user *user = file->private_data;
940 :
941 0 : ratelimit_state_exit(&user->rs);
942 :
943 0 : mutex_destroy(&user->lock);
944 0 : kvfree(user);
945 0 : return 0;
946 : }
947 :
948 : const struct file_operations kmsg_fops = {
949 : .open = devkmsg_open,
950 : .read = devkmsg_read,
951 : .write_iter = devkmsg_write,
952 : .llseek = devkmsg_llseek,
953 : .poll = devkmsg_poll,
954 : .release = devkmsg_release,
955 : };
956 :
957 : #ifdef CONFIG_CRASH_CORE
958 : /*
959 : * This appends the listed symbols to /proc/vmcore
960 : *
961 : * /proc/vmcore is used by various utilities, like crash and makedumpfile to
962 : * obtain access to symbols that are otherwise very difficult to locate. These
963 : * symbols are specifically used so that utilities can access and extract the
964 : * dmesg log from a vmcore file after a crash.
965 : */
966 : void log_buf_vmcoreinfo_setup(void)
967 : {
968 : struct dev_printk_info *dev_info = NULL;
969 :
970 : VMCOREINFO_SYMBOL(prb);
971 : VMCOREINFO_SYMBOL(printk_rb_static);
972 : VMCOREINFO_SYMBOL(clear_seq);
973 :
974 : /*
975 : * Export struct size and field offsets. User space tools can
976 : * parse it and detect any changes to structure down the line.
977 : */
978 :
979 : VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
980 : VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
981 : VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
982 : VMCOREINFO_OFFSET(printk_ringbuffer, fail);
983 :
984 : VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
985 : VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
986 : VMCOREINFO_OFFSET(prb_desc_ring, descs);
987 : VMCOREINFO_OFFSET(prb_desc_ring, infos);
988 : VMCOREINFO_OFFSET(prb_desc_ring, head_id);
989 : VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
990 :
991 : VMCOREINFO_STRUCT_SIZE(prb_desc);
992 : VMCOREINFO_OFFSET(prb_desc, state_var);
993 : VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
994 :
995 : VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
996 : VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
997 : VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
998 :
999 : VMCOREINFO_STRUCT_SIZE(printk_info);
1000 : VMCOREINFO_OFFSET(printk_info, seq);
1001 : VMCOREINFO_OFFSET(printk_info, ts_nsec);
1002 : VMCOREINFO_OFFSET(printk_info, text_len);
1003 : VMCOREINFO_OFFSET(printk_info, caller_id);
1004 : VMCOREINFO_OFFSET(printk_info, dev_info);
1005 :
1006 : VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1007 : VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1008 : VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1009 : VMCOREINFO_OFFSET(dev_printk_info, device);
1010 : VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1011 :
1012 : VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1013 : VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1014 : VMCOREINFO_OFFSET(prb_data_ring, data);
1015 : VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1016 : VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1017 :
1018 : VMCOREINFO_SIZE(atomic_long_t);
1019 : VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1020 :
1021 : VMCOREINFO_STRUCT_SIZE(latched_seq);
1022 : VMCOREINFO_OFFSET(latched_seq, val);
1023 : }
1024 : #endif
1025 :
1026 : /* requested log_buf_len from kernel cmdline */
1027 : static unsigned long __initdata new_log_buf_len;
1028 :
1029 : /* we practice scaling the ring buffer by powers of 2 */
1030 0 : static void __init log_buf_len_update(u64 size)
1031 : {
1032 0 : if (size > (u64)LOG_BUF_LEN_MAX) {
1033 0 : size = (u64)LOG_BUF_LEN_MAX;
1034 0 : pr_err("log_buf over 2G is not supported.\n");
1035 : }
1036 :
1037 0 : if (size)
1038 0 : size = roundup_pow_of_two(size);
1039 0 : if (size > log_buf_len)
1040 0 : new_log_buf_len = (unsigned long)size;
1041 0 : }
1042 :
1043 : /* save requested log_buf_len since it's too early to process it */
1044 0 : static int __init log_buf_len_setup(char *str)
1045 : {
1046 : u64 size;
1047 :
1048 0 : if (!str)
1049 : return -EINVAL;
1050 :
1051 0 : size = memparse(str, &str);
1052 :
1053 0 : log_buf_len_update(size);
1054 :
1055 0 : return 0;
1056 : }
1057 : early_param("log_buf_len", log_buf_len_setup);
1058 :
1059 : #ifdef CONFIG_SMP
1060 : #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1061 :
1062 : static void __init log_buf_add_cpu(void)
1063 : {
1064 : unsigned int cpu_extra;
1065 :
1066 : /*
1067 : * archs should set up cpu_possible_bits properly with
1068 : * set_cpu_possible() after setup_arch() but just in
1069 : * case lets ensure this is valid.
1070 : */
1071 : if (num_possible_cpus() == 1)
1072 : return;
1073 :
1074 : cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1075 :
1076 : /* by default this will only continue through for large > 64 CPUs */
1077 : if (cpu_extra <= __LOG_BUF_LEN / 2)
1078 : return;
1079 :
1080 : pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1081 : __LOG_CPU_MAX_BUF_LEN);
1082 : pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1083 : cpu_extra);
1084 : pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1085 :
1086 : log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1087 : }
1088 : #else /* !CONFIG_SMP */
1089 : static inline void log_buf_add_cpu(void) {}
1090 : #endif /* CONFIG_SMP */
1091 :
1092 : static void __init set_percpu_data_ready(void)
1093 : {
1094 1 : __printk_percpu_data_ready = true;
1095 : }
1096 :
1097 0 : static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1098 : struct printk_record *r)
1099 : {
1100 : struct prb_reserved_entry e;
1101 : struct printk_record dest_r;
1102 :
1103 0 : prb_rec_init_wr(&dest_r, r->info->text_len);
1104 :
1105 0 : if (!prb_reserve(&e, rb, &dest_r))
1106 : return 0;
1107 :
1108 0 : memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1109 0 : dest_r.info->text_len = r->info->text_len;
1110 0 : dest_r.info->facility = r->info->facility;
1111 0 : dest_r.info->level = r->info->level;
1112 0 : dest_r.info->flags = r->info->flags;
1113 0 : dest_r.info->ts_nsec = r->info->ts_nsec;
1114 0 : dest_r.info->caller_id = r->info->caller_id;
1115 0 : memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1116 :
1117 0 : prb_final_commit(&e);
1118 :
1119 0 : return prb_record_text_space(&e);
1120 : }
1121 :
1122 : static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1123 :
1124 1 : void __init setup_log_buf(int early)
1125 : {
1126 : struct printk_info *new_infos;
1127 : unsigned int new_descs_count;
1128 : struct prb_desc *new_descs;
1129 : struct printk_info info;
1130 : struct printk_record r;
1131 : unsigned int text_size;
1132 : size_t new_descs_size;
1133 : size_t new_infos_size;
1134 : unsigned long flags;
1135 : char *new_log_buf;
1136 : unsigned int free;
1137 : u64 seq;
1138 :
1139 : /*
1140 : * Some archs call setup_log_buf() multiple times - first is very
1141 : * early, e.g. from setup_arch(), and second - when percpu_areas
1142 : * are initialised.
1143 : */
1144 1 : if (!early)
1145 : set_percpu_data_ready();
1146 :
1147 1 : if (log_buf != __log_buf)
1148 1 : return;
1149 :
1150 : if (!early && !new_log_buf_len)
1151 : log_buf_add_cpu();
1152 :
1153 1 : if (!new_log_buf_len)
1154 : return;
1155 :
1156 0 : new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1157 0 : if (new_descs_count == 0) {
1158 0 : pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1159 0 : return;
1160 : }
1161 :
1162 0 : new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1163 0 : if (unlikely(!new_log_buf)) {
1164 0 : pr_err("log_buf_len: %lu text bytes not available\n",
1165 : new_log_buf_len);
1166 0 : return;
1167 : }
1168 :
1169 0 : new_descs_size = new_descs_count * sizeof(struct prb_desc);
1170 0 : new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1171 0 : if (unlikely(!new_descs)) {
1172 0 : pr_err("log_buf_len: %zu desc bytes not available\n",
1173 : new_descs_size);
1174 0 : goto err_free_log_buf;
1175 : }
1176 :
1177 0 : new_infos_size = new_descs_count * sizeof(struct printk_info);
1178 0 : new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1179 0 : if (unlikely(!new_infos)) {
1180 0 : pr_err("log_buf_len: %zu info bytes not available\n",
1181 : new_infos_size);
1182 : goto err_free_descs;
1183 : }
1184 :
1185 0 : prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1186 :
1187 0 : prb_init(&printk_rb_dynamic,
1188 0 : new_log_buf, ilog2(new_log_buf_len),
1189 0 : new_descs, ilog2(new_descs_count),
1190 : new_infos);
1191 :
1192 0 : local_irq_save(flags);
1193 :
1194 0 : log_buf_len = new_log_buf_len;
1195 0 : log_buf = new_log_buf;
1196 0 : new_log_buf_len = 0;
1197 :
1198 0 : free = __LOG_BUF_LEN;
1199 0 : prb_for_each_record(0, &printk_rb_static, seq, &r) {
1200 0 : text_size = add_to_rb(&printk_rb_dynamic, &r);
1201 0 : if (text_size > free)
1202 : free = 0;
1203 : else
1204 0 : free -= text_size;
1205 : }
1206 :
1207 0 : prb = &printk_rb_dynamic;
1208 :
1209 0 : local_irq_restore(flags);
1210 :
1211 : /*
1212 : * Copy any remaining messages that might have appeared from
1213 : * NMI context after copying but before switching to the
1214 : * dynamic buffer.
1215 : */
1216 0 : prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1217 0 : text_size = add_to_rb(&printk_rb_dynamic, &r);
1218 0 : if (text_size > free)
1219 : free = 0;
1220 : else
1221 0 : free -= text_size;
1222 : }
1223 :
1224 0 : if (seq != prb_next_seq(&printk_rb_static)) {
1225 0 : pr_err("dropped %llu messages\n",
1226 : prb_next_seq(&printk_rb_static) - seq);
1227 : }
1228 :
1229 0 : pr_info("log_buf_len: %u bytes\n", log_buf_len);
1230 0 : pr_info("early log buf free: %u(%u%%)\n",
1231 : free, (free * 100) / __LOG_BUF_LEN);
1232 0 : return;
1233 :
1234 : err_free_descs:
1235 0 : memblock_free(new_descs, new_descs_size);
1236 : err_free_log_buf:
1237 0 : memblock_free(new_log_buf, new_log_buf_len);
1238 : }
1239 :
1240 : static bool __read_mostly ignore_loglevel;
1241 :
1242 0 : static int __init ignore_loglevel_setup(char *str)
1243 : {
1244 0 : ignore_loglevel = true;
1245 0 : pr_info("debug: ignoring loglevel setting.\n");
1246 :
1247 0 : return 0;
1248 : }
1249 :
1250 : early_param("ignore_loglevel", ignore_loglevel_setup);
1251 : module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1252 : MODULE_PARM_DESC(ignore_loglevel,
1253 : "ignore loglevel setting (prints all kernel messages to the console)");
1254 :
1255 : static bool suppress_message_printing(int level)
1256 : {
1257 1693 : return (level >= console_loglevel && !ignore_loglevel);
1258 : }
1259 :
1260 : #ifdef CONFIG_BOOT_PRINTK_DELAY
1261 :
1262 : static int boot_delay; /* msecs delay after each printk during bootup */
1263 : static unsigned long long loops_per_msec; /* based on boot_delay */
1264 :
1265 : static int __init boot_delay_setup(char *str)
1266 : {
1267 : unsigned long lpj;
1268 :
1269 : lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
1270 : loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1271 :
1272 : get_option(&str, &boot_delay);
1273 : if (boot_delay > 10 * 1000)
1274 : boot_delay = 0;
1275 :
1276 : pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1277 : "HZ: %d, loops_per_msec: %llu\n",
1278 : boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1279 : return 0;
1280 : }
1281 : early_param("boot_delay", boot_delay_setup);
1282 :
1283 : static void boot_delay_msec(int level)
1284 : {
1285 : unsigned long long k;
1286 : unsigned long timeout;
1287 :
1288 : if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1289 : || suppress_message_printing(level)) {
1290 : return;
1291 : }
1292 :
1293 : k = (unsigned long long)loops_per_msec * boot_delay;
1294 :
1295 : timeout = jiffies + msecs_to_jiffies(boot_delay);
1296 : while (k) {
1297 : k--;
1298 : cpu_relax();
1299 : /*
1300 : * use (volatile) jiffies to prevent
1301 : * compiler reduction; loop termination via jiffies
1302 : * is secondary and may or may not happen.
1303 : */
1304 : if (time_after(jiffies, timeout))
1305 : break;
1306 : touch_nmi_watchdog();
1307 : }
1308 : }
1309 : #else
1310 : static inline void boot_delay_msec(int level)
1311 : {
1312 : }
1313 : #endif
1314 :
1315 : static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1316 : module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1317 :
1318 : static size_t print_syslog(unsigned int level, char *buf)
1319 : {
1320 0 : return sprintf(buf, "<%u>", level);
1321 : }
1322 :
1323 : static size_t print_time(u64 ts, char *buf)
1324 : {
1325 0 : unsigned long rem_nsec = do_div(ts, 1000000000);
1326 :
1327 0 : return sprintf(buf, "[%5lu.%06lu]",
1328 : (unsigned long)ts, rem_nsec / 1000);
1329 : }
1330 :
1331 : #ifdef CONFIG_PRINTK_CALLER
1332 : static size_t print_caller(u32 id, char *buf)
1333 : {
1334 : char caller[12];
1335 :
1336 : snprintf(caller, sizeof(caller), "%c%u",
1337 : id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1338 : return sprintf(buf, "[%6s]", caller);
1339 : }
1340 : #else
1341 : #define print_caller(id, buf) 0
1342 : #endif
1343 :
1344 1691 : static size_t info_print_prefix(const struct printk_info *info, bool syslog,
1345 : bool time, char *buf)
1346 : {
1347 1691 : size_t len = 0;
1348 :
1349 1691 : if (syslog)
1350 0 : len = print_syslog((info->facility << 3) | info->level, buf);
1351 :
1352 1691 : if (time)
1353 0 : len += print_time(info->ts_nsec, buf + len);
1354 :
1355 1691 : len += print_caller(info->caller_id, buf + len);
1356 :
1357 1691 : if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1358 0 : buf[len++] = ' ';
1359 0 : buf[len] = '\0';
1360 : }
1361 :
1362 1691 : return len;
1363 : }
1364 :
1365 : /*
1366 : * Prepare the record for printing. The text is shifted within the given
1367 : * buffer to avoid a need for another one. The following operations are
1368 : * done:
1369 : *
1370 : * - Add prefix for each line.
1371 : * - Drop truncated lines that no longer fit into the buffer.
1372 : * - Add the trailing newline that has been removed in vprintk_store().
1373 : * - Add a string terminator.
1374 : *
1375 : * Since the produced string is always terminated, the maximum possible
1376 : * return value is @r->text_buf_size - 1;
1377 : *
1378 : * Return: The length of the updated/prepared text, including the added
1379 : * prefixes and the newline. The terminator is not counted. The dropped
1380 : * line(s) are not counted.
1381 : */
1382 1691 : static size_t record_print_text(struct printk_record *r, bool syslog,
1383 : bool time)
1384 : {
1385 1691 : size_t text_len = r->info->text_len;
1386 1691 : size_t buf_size = r->text_buf_size;
1387 1691 : char *text = r->text_buf;
1388 : char prefix[PRINTK_PREFIX_MAX];
1389 1691 : bool truncated = false;
1390 : size_t prefix_len;
1391 : size_t line_len;
1392 1691 : size_t len = 0;
1393 : char *next;
1394 :
1395 : /*
1396 : * If the message was truncated because the buffer was not large
1397 : * enough, treat the available text as if it were the full text.
1398 : */
1399 1691 : if (text_len > buf_size)
1400 0 : text_len = buf_size;
1401 :
1402 1691 : prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1403 :
1404 : /*
1405 : * @text_len: bytes of unprocessed text
1406 : * @line_len: bytes of current line _without_ newline
1407 : * @text: pointer to beginning of current line
1408 : * @len: number of bytes prepared in r->text_buf
1409 : */
1410 : for (;;) {
1411 1691 : next = memchr(text, '\n', text_len);
1412 1691 : if (next) {
1413 0 : line_len = next - text;
1414 : } else {
1415 : /* Drop truncated line(s). */
1416 1691 : if (truncated)
1417 : break;
1418 : line_len = text_len;
1419 : }
1420 :
1421 : /*
1422 : * Truncate the text if there is not enough space to add the
1423 : * prefix and a trailing newline and a terminator.
1424 : */
1425 1691 : if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1426 : /* Drop even the current line if no space. */
1427 0 : if (len + prefix_len + line_len + 1 + 1 > buf_size)
1428 : break;
1429 :
1430 0 : text_len = buf_size - len - prefix_len - 1 - 1;
1431 0 : truncated = true;
1432 : }
1433 :
1434 1691 : memmove(text + prefix_len, text, text_len);
1435 1691 : memcpy(text, prefix, prefix_len);
1436 :
1437 : /*
1438 : * Increment the prepared length to include the text and
1439 : * prefix that were just moved+copied. Also increment for the
1440 : * newline at the end of this line. If this is the last line,
1441 : * there is no newline, but it will be added immediately below.
1442 : */
1443 1691 : len += prefix_len + line_len + 1;
1444 1691 : if (text_len == line_len) {
1445 : /*
1446 : * This is the last line. Add the trailing newline
1447 : * removed in vprintk_store().
1448 : */
1449 1691 : text[prefix_len + line_len] = '\n';
1450 1691 : break;
1451 : }
1452 :
1453 : /*
1454 : * Advance beyond the added prefix and the related line with
1455 : * its newline.
1456 : */
1457 0 : text += prefix_len + line_len + 1;
1458 :
1459 : /*
1460 : * The remaining text has only decreased by the line with its
1461 : * newline.
1462 : *
1463 : * Note that @text_len can become zero. It happens when @text
1464 : * ended with a newline (either due to truncation or the
1465 : * original string ending with "\n\n"). The loop is correctly
1466 : * repeated and (if not truncated) an empty line with a prefix
1467 : * will be prepared.
1468 : */
1469 0 : text_len -= line_len + 1;
1470 : }
1471 :
1472 : /*
1473 : * If a buffer was provided, it will be terminated. Space for the
1474 : * string terminator is guaranteed to be available. The terminator is
1475 : * not counted in the return value.
1476 : */
1477 1691 : if (buf_size > 0)
1478 1691 : r->text_buf[len] = 0;
1479 :
1480 1691 : return len;
1481 : }
1482 :
1483 : static size_t get_record_print_text_size(struct printk_info *info,
1484 : unsigned int line_count,
1485 : bool syslog, bool time)
1486 : {
1487 : char prefix[PRINTK_PREFIX_MAX];
1488 : size_t prefix_len;
1489 :
1490 0 : prefix_len = info_print_prefix(info, syslog, time, prefix);
1491 :
1492 : /*
1493 : * Each line will be preceded with a prefix. The intermediate
1494 : * newlines are already within the text, but a final trailing
1495 : * newline will be added.
1496 : */
1497 0 : return ((prefix_len * line_count) + info->text_len + 1);
1498 : }
1499 :
1500 : /*
1501 : * Beginning with @start_seq, find the first record where it and all following
1502 : * records up to (but not including) @max_seq fit into @size.
1503 : *
1504 : * @max_seq is simply an upper bound and does not need to exist. If the caller
1505 : * does not require an upper bound, -1 can be used for @max_seq.
1506 : */
1507 0 : static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1508 : bool syslog, bool time)
1509 : {
1510 : struct printk_info info;
1511 : unsigned int line_count;
1512 0 : size_t len = 0;
1513 : u64 seq;
1514 :
1515 : /* Determine the size of the records up to @max_seq. */
1516 0 : prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1517 0 : if (info.seq >= max_seq)
1518 : break;
1519 0 : len += get_record_print_text_size(&info, line_count, syslog, time);
1520 : }
1521 :
1522 : /*
1523 : * Adjust the upper bound for the next loop to avoid subtracting
1524 : * lengths that were never added.
1525 : */
1526 0 : if (seq < max_seq)
1527 0 : max_seq = seq;
1528 :
1529 : /*
1530 : * Move first record forward until length fits into the buffer. Ignore
1531 : * newest messages that were not counted in the above cycle. Messages
1532 : * might appear and get lost in the meantime. This is a best effort
1533 : * that prevents an infinite loop that could occur with a retry.
1534 : */
1535 0 : prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1536 0 : if (len <= size || info.seq >= max_seq)
1537 : break;
1538 0 : len -= get_record_print_text_size(&info, line_count, syslog, time);
1539 : }
1540 :
1541 0 : return seq;
1542 : }
1543 :
1544 : /* The caller is responsible for making sure @size is greater than 0. */
1545 0 : static int syslog_print(char __user *buf, int size)
1546 : {
1547 : struct printk_info info;
1548 : struct printk_record r;
1549 : char *text;
1550 0 : int len = 0;
1551 : u64 seq;
1552 :
1553 0 : text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1554 0 : if (!text)
1555 : return -ENOMEM;
1556 :
1557 0 : prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1558 :
1559 0 : mutex_lock(&syslog_lock);
1560 :
1561 : /*
1562 : * Wait for the @syslog_seq record to be available. @syslog_seq may
1563 : * change while waiting.
1564 : */
1565 : do {
1566 0 : seq = syslog_seq;
1567 :
1568 0 : mutex_unlock(&syslog_lock);
1569 : /*
1570 : * Guarantee this task is visible on the waitqueue before
1571 : * checking the wake condition.
1572 : *
1573 : * The full memory barrier within set_current_state() of
1574 : * prepare_to_wait_event() pairs with the full memory barrier
1575 : * within wq_has_sleeper().
1576 : *
1577 : * This pairs with __wake_up_klogd:A.
1578 : */
1579 0 : len = wait_event_interruptible(log_wait,
1580 : prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1581 0 : mutex_lock(&syslog_lock);
1582 :
1583 0 : if (len)
1584 : goto out;
1585 0 : } while (syslog_seq != seq);
1586 :
1587 : /*
1588 : * Copy records that fit into the buffer. The above cycle makes sure
1589 : * that the first record is always available.
1590 : */
1591 : do {
1592 : size_t n;
1593 : size_t skip;
1594 : int err;
1595 :
1596 0 : if (!prb_read_valid(prb, syslog_seq, &r))
1597 : break;
1598 :
1599 0 : if (r.info->seq != syslog_seq) {
1600 : /* message is gone, move to next valid one */
1601 0 : syslog_seq = r.info->seq;
1602 0 : syslog_partial = 0;
1603 : }
1604 :
1605 : /*
1606 : * To keep reading/counting partial line consistent,
1607 : * use printk_time value as of the beginning of a line.
1608 : */
1609 0 : if (!syslog_partial)
1610 0 : syslog_time = printk_time;
1611 :
1612 0 : skip = syslog_partial;
1613 0 : n = record_print_text(&r, true, syslog_time);
1614 0 : if (n - syslog_partial <= size) {
1615 : /* message fits into buffer, move forward */
1616 0 : syslog_seq = r.info->seq + 1;
1617 0 : n -= syslog_partial;
1618 0 : syslog_partial = 0;
1619 0 : } else if (!len){
1620 : /* partial read(), remember position */
1621 0 : n = size;
1622 0 : syslog_partial += n;
1623 : } else
1624 : n = 0;
1625 :
1626 0 : if (!n)
1627 : break;
1628 :
1629 0 : mutex_unlock(&syslog_lock);
1630 0 : err = copy_to_user(buf, text + skip, n);
1631 0 : mutex_lock(&syslog_lock);
1632 :
1633 0 : if (err) {
1634 0 : if (!len)
1635 0 : len = -EFAULT;
1636 : break;
1637 : }
1638 :
1639 0 : len += n;
1640 0 : size -= n;
1641 0 : buf += n;
1642 0 : } while (size);
1643 : out:
1644 0 : mutex_unlock(&syslog_lock);
1645 0 : kfree(text);
1646 0 : return len;
1647 : }
1648 :
1649 0 : static int syslog_print_all(char __user *buf, int size, bool clear)
1650 : {
1651 : struct printk_info info;
1652 : struct printk_record r;
1653 : char *text;
1654 0 : int len = 0;
1655 : u64 seq;
1656 : bool time;
1657 :
1658 0 : text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1659 0 : if (!text)
1660 : return -ENOMEM;
1661 :
1662 0 : time = printk_time;
1663 : /*
1664 : * Find first record that fits, including all following records,
1665 : * into the user-provided buffer for this dump.
1666 : */
1667 0 : seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1668 : size, true, time);
1669 :
1670 0 : prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1671 :
1672 0 : len = 0;
1673 0 : prb_for_each_record(seq, prb, seq, &r) {
1674 : int textlen;
1675 :
1676 0 : textlen = record_print_text(&r, true, time);
1677 :
1678 0 : if (len + textlen > size) {
1679 0 : seq--;
1680 0 : break;
1681 : }
1682 :
1683 0 : if (copy_to_user(buf + len, text, textlen))
1684 : len = -EFAULT;
1685 : else
1686 0 : len += textlen;
1687 :
1688 0 : if (len < 0)
1689 : break;
1690 : }
1691 :
1692 0 : if (clear) {
1693 0 : mutex_lock(&syslog_lock);
1694 0 : latched_seq_write(&clear_seq, seq);
1695 0 : mutex_unlock(&syslog_lock);
1696 : }
1697 :
1698 0 : kfree(text);
1699 0 : return len;
1700 : }
1701 :
1702 0 : static void syslog_clear(void)
1703 : {
1704 0 : mutex_lock(&syslog_lock);
1705 0 : latched_seq_write(&clear_seq, prb_next_seq(prb));
1706 0 : mutex_unlock(&syslog_lock);
1707 0 : }
1708 :
1709 0 : int do_syslog(int type, char __user *buf, int len, int source)
1710 : {
1711 : struct printk_info info;
1712 0 : bool clear = false;
1713 : static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1714 : int error;
1715 :
1716 0 : error = check_syslog_permissions(type, source);
1717 0 : if (error)
1718 : return error;
1719 :
1720 0 : switch (type) {
1721 : case SYSLOG_ACTION_CLOSE: /* Close log */
1722 : break;
1723 : case SYSLOG_ACTION_OPEN: /* Open log */
1724 : break;
1725 : case SYSLOG_ACTION_READ: /* Read from log */
1726 0 : if (!buf || len < 0)
1727 : return -EINVAL;
1728 0 : if (!len)
1729 : return 0;
1730 0 : if (!access_ok(buf, len))
1731 : return -EFAULT;
1732 0 : error = syslog_print(buf, len);
1733 0 : break;
1734 : /* Read/clear last kernel messages */
1735 : case SYSLOG_ACTION_READ_CLEAR:
1736 0 : clear = true;
1737 : fallthrough;
1738 : /* Read last kernel messages */
1739 : case SYSLOG_ACTION_READ_ALL:
1740 0 : if (!buf || len < 0)
1741 : return -EINVAL;
1742 0 : if (!len)
1743 : return 0;
1744 0 : if (!access_ok(buf, len))
1745 : return -EFAULT;
1746 0 : error = syslog_print_all(buf, len, clear);
1747 0 : break;
1748 : /* Clear ring buffer */
1749 : case SYSLOG_ACTION_CLEAR:
1750 0 : syslog_clear();
1751 0 : break;
1752 : /* Disable logging to console */
1753 : case SYSLOG_ACTION_CONSOLE_OFF:
1754 0 : if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1755 0 : saved_console_loglevel = console_loglevel;
1756 0 : console_loglevel = minimum_console_loglevel;
1757 0 : break;
1758 : /* Enable logging to console */
1759 : case SYSLOG_ACTION_CONSOLE_ON:
1760 0 : if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1761 0 : console_loglevel = saved_console_loglevel;
1762 0 : saved_console_loglevel = LOGLEVEL_DEFAULT;
1763 : }
1764 : break;
1765 : /* Set level of messages printed to console */
1766 : case SYSLOG_ACTION_CONSOLE_LEVEL:
1767 0 : if (len < 1 || len > 8)
1768 : return -EINVAL;
1769 0 : if (len < minimum_console_loglevel)
1770 0 : len = minimum_console_loglevel;
1771 0 : console_loglevel = len;
1772 : /* Implicitly re-enable logging to console */
1773 0 : saved_console_loglevel = LOGLEVEL_DEFAULT;
1774 0 : break;
1775 : /* Number of chars in the log buffer */
1776 : case SYSLOG_ACTION_SIZE_UNREAD:
1777 0 : mutex_lock(&syslog_lock);
1778 0 : if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1779 : /* No unread messages. */
1780 0 : mutex_unlock(&syslog_lock);
1781 0 : return 0;
1782 : }
1783 0 : if (info.seq != syslog_seq) {
1784 : /* messages are gone, move to first one */
1785 0 : syslog_seq = info.seq;
1786 0 : syslog_partial = 0;
1787 : }
1788 0 : if (source == SYSLOG_FROM_PROC) {
1789 : /*
1790 : * Short-cut for poll(/"proc/kmsg") which simply checks
1791 : * for pending data, not the size; return the count of
1792 : * records, not the length.
1793 : */
1794 0 : error = prb_next_seq(prb) - syslog_seq;
1795 : } else {
1796 0 : bool time = syslog_partial ? syslog_time : printk_time;
1797 : unsigned int line_count;
1798 : u64 seq;
1799 :
1800 0 : prb_for_each_info(syslog_seq, prb, seq, &info,
1801 : &line_count) {
1802 0 : error += get_record_print_text_size(&info, line_count,
1803 : true, time);
1804 0 : time = printk_time;
1805 : }
1806 0 : error -= syslog_partial;
1807 : }
1808 0 : mutex_unlock(&syslog_lock);
1809 0 : break;
1810 : /* Size of the log buffer */
1811 : case SYSLOG_ACTION_SIZE_BUFFER:
1812 0 : error = log_buf_len;
1813 0 : break;
1814 : default:
1815 0 : error = -EINVAL;
1816 0 : break;
1817 : }
1818 :
1819 : return error;
1820 : }
1821 :
1822 0 : SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1823 : {
1824 0 : return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1825 : }
1826 :
1827 : /*
1828 : * Special console_lock variants that help to reduce the risk of soft-lockups.
1829 : * They allow to pass console_lock to another printk() call using a busy wait.
1830 : */
1831 :
1832 : #ifdef CONFIG_LOCKDEP
1833 : static struct lockdep_map console_owner_dep_map = {
1834 : .name = "console_owner"
1835 : };
1836 : #endif
1837 :
1838 : static DEFINE_RAW_SPINLOCK(console_owner_lock);
1839 : static struct task_struct *console_owner;
1840 : static bool console_waiter;
1841 :
1842 : /**
1843 : * console_lock_spinning_enable - mark beginning of code where another
1844 : * thread might safely busy wait
1845 : *
1846 : * This basically converts console_lock into a spinlock. This marks
1847 : * the section where the console_lock owner can not sleep, because
1848 : * there may be a waiter spinning (like a spinlock). Also it must be
1849 : * ready to hand over the lock at the end of the section.
1850 : */
1851 : static void console_lock_spinning_enable(void)
1852 : {
1853 1691 : raw_spin_lock(&console_owner_lock);
1854 1691 : console_owner = current;
1855 1691 : raw_spin_unlock(&console_owner_lock);
1856 :
1857 : /* The waiter may spin on us after setting console_owner */
1858 : spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1859 : }
1860 :
1861 : /**
1862 : * console_lock_spinning_disable_and_check - mark end of code where another
1863 : * thread was able to busy wait and check if there is a waiter
1864 : * @cookie: cookie returned from console_srcu_read_lock()
1865 : *
1866 : * This is called at the end of the section where spinning is allowed.
1867 : * It has two functions. First, it is a signal that it is no longer
1868 : * safe to start busy waiting for the lock. Second, it checks if
1869 : * there is a busy waiter and passes the lock rights to her.
1870 : *
1871 : * Important: Callers lose both the console_lock and the SRCU read lock if
1872 : * there was a busy waiter. They must not touch items synchronized by
1873 : * console_lock or SRCU read lock in this case.
1874 : *
1875 : * Return: 1 if the lock rights were passed, 0 otherwise.
1876 : */
1877 : static int console_lock_spinning_disable_and_check(int cookie)
1878 : {
1879 : int waiter;
1880 :
1881 1691 : raw_spin_lock(&console_owner_lock);
1882 1691 : waiter = READ_ONCE(console_waiter);
1883 1691 : console_owner = NULL;
1884 1691 : raw_spin_unlock(&console_owner_lock);
1885 :
1886 1691 : if (!waiter) {
1887 : spin_release(&console_owner_dep_map, _THIS_IP_);
1888 : return 0;
1889 : }
1890 :
1891 : /* The waiter is now free to continue */
1892 0 : WRITE_ONCE(console_waiter, false);
1893 :
1894 : spin_release(&console_owner_dep_map, _THIS_IP_);
1895 :
1896 : /*
1897 : * Preserve lockdep lock ordering. Release the SRCU read lock before
1898 : * releasing the console_lock.
1899 : */
1900 0 : console_srcu_read_unlock(cookie);
1901 :
1902 : /*
1903 : * Hand off console_lock to waiter. The waiter will perform
1904 : * the up(). After this, the waiter is the console_lock owner.
1905 : */
1906 : mutex_release(&console_lock_dep_map, _THIS_IP_);
1907 : return 1;
1908 : }
1909 :
1910 : /**
1911 : * console_trylock_spinning - try to get console_lock by busy waiting
1912 : *
1913 : * This allows to busy wait for the console_lock when the current
1914 : * owner is running in specially marked sections. It means that
1915 : * the current owner is running and cannot reschedule until it
1916 : * is ready to lose the lock.
1917 : *
1918 : * Return: 1 if we got the lock, 0 othrewise
1919 : */
1920 964 : static int console_trylock_spinning(void)
1921 : {
1922 964 : struct task_struct *owner = NULL;
1923 : bool waiter;
1924 964 : bool spin = false;
1925 : unsigned long flags;
1926 :
1927 964 : if (console_trylock())
1928 : return 1;
1929 :
1930 : /*
1931 : * It's unsafe to spin once a panic has begun. If we are the
1932 : * panic CPU, we may have already halted the owner of the
1933 : * console_sem. If we are not the panic CPU, then we should
1934 : * avoid taking console_sem, so the panic CPU has a better
1935 : * chance of cleanly acquiring it later.
1936 : */
1937 0 : if (panic_in_progress())
1938 : return 0;
1939 :
1940 0 : printk_safe_enter_irqsave(flags);
1941 :
1942 0 : raw_spin_lock(&console_owner_lock);
1943 0 : owner = READ_ONCE(console_owner);
1944 0 : waiter = READ_ONCE(console_waiter);
1945 0 : if (!waiter && owner && owner != current) {
1946 0 : WRITE_ONCE(console_waiter, true);
1947 0 : spin = true;
1948 : }
1949 0 : raw_spin_unlock(&console_owner_lock);
1950 :
1951 : /*
1952 : * If there is an active printk() writing to the
1953 : * consoles, instead of having it write our data too,
1954 : * see if we can offload that load from the active
1955 : * printer, and do some printing ourselves.
1956 : * Go into a spin only if there isn't already a waiter
1957 : * spinning, and there is an active printer, and
1958 : * that active printer isn't us (recursive printk?).
1959 : */
1960 0 : if (!spin) {
1961 0 : printk_safe_exit_irqrestore(flags);
1962 0 : return 0;
1963 : }
1964 :
1965 : /* We spin waiting for the owner to release us */
1966 : spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1967 : /* Owner will clear console_waiter on hand off */
1968 0 : while (READ_ONCE(console_waiter))
1969 : cpu_relax();
1970 : spin_release(&console_owner_dep_map, _THIS_IP_);
1971 :
1972 0 : printk_safe_exit_irqrestore(flags);
1973 : /*
1974 : * The owner passed the console lock to us.
1975 : * Since we did not spin on console lock, annotate
1976 : * this as a trylock. Otherwise lockdep will
1977 : * complain.
1978 : */
1979 : mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
1980 :
1981 0 : return 1;
1982 : }
1983 :
1984 : /*
1985 : * Recursion is tracked separately on each CPU. If NMIs are supported, an
1986 : * additional NMI context per CPU is also separately tracked. Until per-CPU
1987 : * is available, a separate "early tracking" is performed.
1988 : */
1989 : static DEFINE_PER_CPU(u8, printk_count);
1990 : static u8 printk_count_early;
1991 : #ifdef CONFIG_HAVE_NMI
1992 : static DEFINE_PER_CPU(u8, printk_count_nmi);
1993 : static u8 printk_count_nmi_early;
1994 : #endif
1995 :
1996 : /*
1997 : * Recursion is limited to keep the output sane. printk() should not require
1998 : * more than 1 level of recursion (allowing, for example, printk() to trigger
1999 : * a WARN), but a higher value is used in case some printk-internal errors
2000 : * exist, such as the ringbuffer validation checks failing.
2001 : */
2002 : #define PRINTK_MAX_RECURSION 3
2003 :
2004 : /*
2005 : * Return a pointer to the dedicated counter for the CPU+context of the
2006 : * caller.
2007 : */
2008 : static u8 *__printk_recursion_counter(void)
2009 : {
2010 : #ifdef CONFIG_HAVE_NMI
2011 : if (in_nmi()) {
2012 : if (printk_percpu_data_ready())
2013 : return this_cpu_ptr(&printk_count_nmi);
2014 : return &printk_count_nmi_early;
2015 : }
2016 : #endif
2017 964 : if (printk_percpu_data_ready())
2018 : return this_cpu_ptr(&printk_count);
2019 : return &printk_count_early;
2020 : }
2021 :
2022 : /*
2023 : * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2024 : * The caller must check the boolean return value to see if the recursion is
2025 : * allowed. On failure, interrupts are not disabled.
2026 : *
2027 : * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2028 : * that is passed to printk_exit_irqrestore().
2029 : */
2030 : #define printk_enter_irqsave(recursion_ptr, flags) \
2031 : ({ \
2032 : bool success = true; \
2033 : \
2034 : typecheck(u8 *, recursion_ptr); \
2035 : local_irq_save(flags); \
2036 : (recursion_ptr) = __printk_recursion_counter(); \
2037 : if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \
2038 : local_irq_restore(flags); \
2039 : success = false; \
2040 : } else { \
2041 : (*(recursion_ptr))++; \
2042 : } \
2043 : success; \
2044 : })
2045 :
2046 : /* Exit recursion tracking, restoring interrupts. */
2047 : #define printk_exit_irqrestore(recursion_ptr, flags) \
2048 : do { \
2049 : typecheck(u8 *, recursion_ptr); \
2050 : (*(recursion_ptr))--; \
2051 : local_irq_restore(flags); \
2052 : } while (0)
2053 :
2054 : int printk_delay_msec __read_mostly;
2055 :
2056 : static inline void printk_delay(int level)
2057 : {
2058 964 : boot_delay_msec(level);
2059 :
2060 964 : if (unlikely(printk_delay_msec)) {
2061 : int m = printk_delay_msec;
2062 :
2063 0 : while (m--) {
2064 : mdelay(1);
2065 : touch_nmi_watchdog();
2066 : }
2067 : }
2068 : }
2069 :
2070 : static inline u32 printk_caller_id(void)
2071 : {
2072 1928 : return in_task() ? task_pid_nr(current) :
2073 : 0x80000000 + smp_processor_id();
2074 : }
2075 :
2076 : /**
2077 : * printk_parse_prefix - Parse level and control flags.
2078 : *
2079 : * @text: The terminated text message.
2080 : * @level: A pointer to the current level value, will be updated.
2081 : * @flags: A pointer to the current printk_info flags, will be updated.
2082 : *
2083 : * @level may be NULL if the caller is not interested in the parsed value.
2084 : * Otherwise the variable pointed to by @level must be set to
2085 : * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2086 : *
2087 : * @flags may be NULL if the caller is not interested in the parsed value.
2088 : * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2089 : * value.
2090 : *
2091 : * Return: The length of the parsed level and control flags.
2092 : */
2093 1928 : u16 printk_parse_prefix(const char *text, int *level,
2094 : enum printk_info_flags *flags)
2095 : {
2096 1928 : u16 prefix_len = 0;
2097 : int kern_level;
2098 :
2099 5358 : while (*text) {
2100 3430 : kern_level = printk_get_level(text);
2101 3430 : if (!kern_level)
2102 : break;
2103 :
2104 1502 : switch (kern_level) {
2105 : case '0' ... '7':
2106 1346 : if (level && *level == LOGLEVEL_DEFAULT)
2107 673 : *level = kern_level - '0';
2108 : break;
2109 : case 'c': /* KERN_CONT */
2110 156 : if (flags)
2111 78 : *flags |= LOG_CONT;
2112 : }
2113 :
2114 1502 : prefix_len += 2;
2115 1502 : text += 2;
2116 : }
2117 :
2118 1928 : return prefix_len;
2119 : }
2120 :
2121 : __printf(5, 0)
2122 964 : static u16 printk_sprint(char *text, u16 size, int facility,
2123 : enum printk_info_flags *flags, const char *fmt,
2124 : va_list args)
2125 : {
2126 : u16 text_len;
2127 :
2128 964 : text_len = vscnprintf(text, size, fmt, args);
2129 :
2130 : /* Mark and strip a trailing newline. */
2131 964 : if (text_len && text[text_len - 1] == '\n') {
2132 411 : text_len--;
2133 411 : *flags |= LOG_NEWLINE;
2134 : }
2135 :
2136 : /* Strip log level and control flags. */
2137 964 : if (facility == 0) {
2138 : u16 prefix_len;
2139 :
2140 964 : prefix_len = printk_parse_prefix(text, NULL, NULL);
2141 964 : if (prefix_len) {
2142 751 : text_len -= prefix_len;
2143 751 : memmove(text, text + prefix_len, text_len);
2144 : }
2145 : }
2146 :
2147 964 : trace_console(text, text_len);
2148 :
2149 964 : return text_len;
2150 : }
2151 :
2152 : __printf(4, 0)
2153 964 : int vprintk_store(int facility, int level,
2154 : const struct dev_printk_info *dev_info,
2155 : const char *fmt, va_list args)
2156 : {
2157 : struct prb_reserved_entry e;
2158 964 : enum printk_info_flags flags = 0;
2159 : struct printk_record r;
2160 : unsigned long irqflags;
2161 964 : u16 trunc_msg_len = 0;
2162 : char prefix_buf[8];
2163 : u8 *recursion_ptr;
2164 : u16 reserve_size;
2165 : va_list args2;
2166 : u32 caller_id;
2167 : u16 text_len;
2168 964 : int ret = 0;
2169 : u64 ts_nsec;
2170 :
2171 2892 : if (!printk_enter_irqsave(recursion_ptr, irqflags))
2172 : return 0;
2173 :
2174 : /*
2175 : * Since the duration of printk() can vary depending on the message
2176 : * and state of the ringbuffer, grab the timestamp now so that it is
2177 : * close to the call of printk(). This provides a more deterministic
2178 : * timestamp with respect to the caller.
2179 : */
2180 964 : ts_nsec = local_clock();
2181 :
2182 964 : caller_id = printk_caller_id();
2183 :
2184 : /*
2185 : * The sprintf needs to come first since the syslog prefix might be
2186 : * passed in as a parameter. An extra byte must be reserved so that
2187 : * later the vscnprintf() into the reserved buffer has room for the
2188 : * terminating '\0', which is not counted by vsnprintf().
2189 : */
2190 964 : va_copy(args2, args);
2191 964 : reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2192 964 : va_end(args2);
2193 :
2194 964 : if (reserve_size > PRINTKRB_RECORD_MAX)
2195 0 : reserve_size = PRINTKRB_RECORD_MAX;
2196 :
2197 : /* Extract log level or control flags. */
2198 964 : if (facility == 0)
2199 964 : printk_parse_prefix(&prefix_buf[0], &level, &flags);
2200 :
2201 964 : if (level == LOGLEVEL_DEFAULT)
2202 291 : level = default_message_loglevel;
2203 :
2204 964 : if (dev_info)
2205 0 : flags |= LOG_NEWLINE;
2206 :
2207 964 : if (flags & LOG_CONT) {
2208 156 : prb_rec_init_wr(&r, reserve_size);
2209 78 : if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2210 63 : text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2211 : facility, &flags, fmt, args);
2212 63 : r.info->text_len += text_len;
2213 :
2214 63 : if (flags & LOG_NEWLINE) {
2215 16 : r.info->flags |= LOG_NEWLINE;
2216 16 : prb_final_commit(&e);
2217 : } else {
2218 47 : prb_commit(&e);
2219 : }
2220 :
2221 63 : ret = text_len;
2222 63 : goto out;
2223 : }
2224 : }
2225 :
2226 : /*
2227 : * Explicitly initialize the record before every prb_reserve() call.
2228 : * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2229 : * structure when they fail.
2230 : */
2231 1802 : prb_rec_init_wr(&r, reserve_size);
2232 901 : if (!prb_reserve(&e, prb, &r)) {
2233 : /* truncate the message if it is too long for empty buffer */
2234 0 : truncate_msg(&reserve_size, &trunc_msg_len);
2235 :
2236 0 : prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2237 0 : if (!prb_reserve(&e, prb, &r))
2238 : goto out;
2239 : }
2240 :
2241 : /* fill message */
2242 901 : text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2243 901 : if (trunc_msg_len)
2244 0 : memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2245 901 : r.info->text_len = text_len + trunc_msg_len;
2246 901 : r.info->facility = facility;
2247 901 : r.info->level = level & 7;
2248 901 : r.info->flags = flags & 0x1f;
2249 901 : r.info->ts_nsec = ts_nsec;
2250 901 : r.info->caller_id = caller_id;
2251 901 : if (dev_info)
2252 0 : memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2253 :
2254 : /* A message without a trailing newline can be continued. */
2255 901 : if (!(flags & LOG_NEWLINE))
2256 506 : prb_commit(&e);
2257 : else
2258 395 : prb_final_commit(&e);
2259 :
2260 901 : ret = text_len + trunc_msg_len;
2261 : out:
2262 1928 : printk_exit_irqrestore(recursion_ptr, irqflags);
2263 964 : return ret;
2264 : }
2265 :
2266 964 : asmlinkage int vprintk_emit(int facility, int level,
2267 : const struct dev_printk_info *dev_info,
2268 : const char *fmt, va_list args)
2269 : {
2270 : int printed_len;
2271 964 : bool in_sched = false;
2272 :
2273 : /* Suppress unimportant messages after panic happens */
2274 964 : if (unlikely(suppress_printk))
2275 : return 0;
2276 :
2277 964 : if (unlikely(suppress_panic_printk) &&
2278 0 : atomic_read(&panic_cpu) != raw_smp_processor_id())
2279 : return 0;
2280 :
2281 964 : if (level == LOGLEVEL_SCHED) {
2282 0 : level = LOGLEVEL_DEFAULT;
2283 0 : in_sched = true;
2284 : }
2285 :
2286 1928 : printk_delay(level);
2287 :
2288 964 : printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2289 :
2290 : /* If called from the scheduler, we can not call up(). */
2291 964 : if (!in_sched) {
2292 : /*
2293 : * The caller may be holding system-critical or
2294 : * timing-sensitive locks. Disable preemption during
2295 : * printing of all remaining records to all consoles so that
2296 : * this context can return as soon as possible. Hopefully
2297 : * another printk() caller will take over the printing.
2298 : */
2299 964 : preempt_disable();
2300 : /*
2301 : * Try to acquire and then immediately release the console
2302 : * semaphore. The release will print out buffers. With the
2303 : * spinning variant, this context tries to take over the
2304 : * printing from another printing context.
2305 : */
2306 964 : if (console_trylock_spinning())
2307 964 : console_unlock();
2308 964 : preempt_enable();
2309 : }
2310 :
2311 : wake_up_klogd();
2312 964 : return printed_len;
2313 : }
2314 : EXPORT_SYMBOL(vprintk_emit);
2315 :
2316 964 : int vprintk_default(const char *fmt, va_list args)
2317 : {
2318 964 : return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2319 : }
2320 : EXPORT_SYMBOL_GPL(vprintk_default);
2321 :
2322 963 : asmlinkage __visible int _printk(const char *fmt, ...)
2323 : {
2324 : va_list args;
2325 : int r;
2326 :
2327 963 : va_start(args, fmt);
2328 963 : r = vprintk(fmt, args);
2329 963 : va_end(args);
2330 :
2331 963 : return r;
2332 : }
2333 : EXPORT_SYMBOL(_printk);
2334 :
2335 : static bool pr_flush(int timeout_ms, bool reset_on_progress);
2336 : static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2337 :
2338 : #else /* CONFIG_PRINTK */
2339 :
2340 : #define printk_time false
2341 :
2342 : #define prb_read_valid(rb, seq, r) false
2343 : #define prb_first_valid_seq(rb) 0
2344 : #define prb_next_seq(rb) 0
2345 :
2346 : static u64 syslog_seq;
2347 :
2348 : static size_t record_print_text(const struct printk_record *r,
2349 : bool syslog, bool time)
2350 : {
2351 : return 0;
2352 : }
2353 : static ssize_t info_print_ext_header(char *buf, size_t size,
2354 : struct printk_info *info)
2355 : {
2356 : return 0;
2357 : }
2358 : static ssize_t msg_print_ext_body(char *buf, size_t size,
2359 : char *text, size_t text_len,
2360 : struct dev_printk_info *dev_info) { return 0; }
2361 : static void console_lock_spinning_enable(void) { }
2362 : static int console_lock_spinning_disable_and_check(int cookie) { return 0; }
2363 : static bool suppress_message_printing(int level) { return false; }
2364 : static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
2365 : static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2366 :
2367 : #endif /* CONFIG_PRINTK */
2368 :
2369 : #ifdef CONFIG_EARLY_PRINTK
2370 : struct console *early_console;
2371 :
2372 0 : asmlinkage __visible void early_printk(const char *fmt, ...)
2373 : {
2374 : va_list ap;
2375 : char buf[512];
2376 : int n;
2377 :
2378 0 : if (!early_console)
2379 0 : return;
2380 :
2381 0 : va_start(ap, fmt);
2382 0 : n = vscnprintf(buf, sizeof(buf), fmt, ap);
2383 0 : va_end(ap);
2384 :
2385 0 : early_console->write(early_console, buf, n);
2386 : }
2387 : #endif
2388 :
2389 : static void set_user_specified(struct console_cmdline *c, bool user_specified)
2390 : {
2391 1 : if (!user_specified)
2392 : return;
2393 :
2394 : /*
2395 : * @c console was defined by the user on the command line.
2396 : * Do not clear when added twice also by SPCR or the device tree.
2397 : */
2398 1 : c->user_specified = true;
2399 : /* At least one console defined by the user on the command line. */
2400 1 : console_set_on_cmdline = 1;
2401 : }
2402 :
2403 1 : static int __add_preferred_console(char *name, int idx, char *options,
2404 : char *brl_options, bool user_specified)
2405 : {
2406 : struct console_cmdline *c;
2407 : int i;
2408 :
2409 : /*
2410 : * See if this tty is not yet registered, and
2411 : * if we have a slot free.
2412 : */
2413 2 : for (i = 0, c = console_cmdline;
2414 1 : i < MAX_CMDLINECONSOLES && c->name[0];
2415 0 : i++, c++) {
2416 0 : if (strcmp(c->name, name) == 0 && c->index == idx) {
2417 0 : if (!brl_options)
2418 0 : preferred_console = i;
2419 0 : set_user_specified(c, user_specified);
2420 : return 0;
2421 : }
2422 : }
2423 1 : if (i == MAX_CMDLINECONSOLES)
2424 : return -E2BIG;
2425 1 : if (!brl_options)
2426 1 : preferred_console = i;
2427 1 : strscpy(c->name, name, sizeof(c->name));
2428 1 : c->options = options;
2429 2 : set_user_specified(c, user_specified);
2430 1 : braille_set_options(c, brl_options);
2431 :
2432 1 : c->index = idx;
2433 1 : return 0;
2434 : }
2435 :
2436 0 : static int __init console_msg_format_setup(char *str)
2437 : {
2438 0 : if (!strcmp(str, "syslog"))
2439 0 : console_msg_format = MSG_FORMAT_SYSLOG;
2440 0 : if (!strcmp(str, "default"))
2441 0 : console_msg_format = MSG_FORMAT_DEFAULT;
2442 0 : return 1;
2443 : }
2444 : __setup("console_msg_format=", console_msg_format_setup);
2445 :
2446 : /*
2447 : * Set up a console. Called via do_early_param() in init/main.c
2448 : * for each "console=" parameter in the boot command line.
2449 : */
2450 1 : static int __init console_setup(char *str)
2451 : {
2452 : char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
2453 1 : char *s, *options, *brl_options = NULL;
2454 : int idx;
2455 :
2456 : /*
2457 : * console="" or console=null have been suggested as a way to
2458 : * disable console output. Use ttynull that has been created
2459 : * for exactly this purpose.
2460 : */
2461 1 : if (str[0] == 0 || strcmp(str, "null") == 0) {
2462 0 : __add_preferred_console("ttynull", 0, NULL, NULL, true);
2463 0 : return 1;
2464 : }
2465 :
2466 1 : if (_braille_console_setup(&str, &brl_options))
2467 : return 1;
2468 :
2469 : /*
2470 : * Decode str into name, index, options.
2471 : */
2472 1 : if (str[0] >= '0' && str[0] <= '9') {
2473 0 : strcpy(buf, "ttyS");
2474 0 : strncpy(buf + 4, str, sizeof(buf) - 5);
2475 : } else {
2476 1 : strncpy(buf, str, sizeof(buf) - 1);
2477 : }
2478 1 : buf[sizeof(buf) - 1] = 0;
2479 1 : options = strchr(str, ',');
2480 1 : if (options)
2481 0 : *(options++) = 0;
2482 : #ifdef __sparc__
2483 : if (!strcmp(str, "ttya"))
2484 : strcpy(buf, "ttyS0");
2485 : if (!strcmp(str, "ttyb"))
2486 : strcpy(buf, "ttyS1");
2487 : #endif
2488 4 : for (s = buf; *s; s++)
2489 6 : if (isdigit(*s) || *s == ',')
2490 : break;
2491 1 : idx = simple_strtoul(s, NULL, 10);
2492 1 : *s = 0;
2493 :
2494 1 : __add_preferred_console(buf, idx, options, brl_options, true);
2495 1 : return 1;
2496 : }
2497 : __setup("console=", console_setup);
2498 :
2499 : /**
2500 : * add_preferred_console - add a device to the list of preferred consoles.
2501 : * @name: device name
2502 : * @idx: device index
2503 : * @options: options for this console
2504 : *
2505 : * The last preferred console added will be used for kernel messages
2506 : * and stdin/out/err for init. Normally this is used by console_setup
2507 : * above to handle user-supplied console arguments; however it can also
2508 : * be used by arch-specific code either to override the user or more
2509 : * commonly to provide a default console (ie from PROM variables) when
2510 : * the user has not supplied one.
2511 : */
2512 0 : int add_preferred_console(char *name, int idx, char *options)
2513 : {
2514 0 : return __add_preferred_console(name, idx, options, NULL, false);
2515 : }
2516 :
2517 : bool console_suspend_enabled = true;
2518 : EXPORT_SYMBOL(console_suspend_enabled);
2519 :
2520 0 : static int __init console_suspend_disable(char *str)
2521 : {
2522 0 : console_suspend_enabled = false;
2523 0 : return 1;
2524 : }
2525 : __setup("no_console_suspend", console_suspend_disable);
2526 : module_param_named(console_suspend, console_suspend_enabled,
2527 : bool, S_IRUGO | S_IWUSR);
2528 : MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2529 : " and hibernate operations");
2530 :
2531 : static bool printk_console_no_auto_verbose;
2532 :
2533 0 : void console_verbose(void)
2534 : {
2535 0 : if (console_loglevel && !printk_console_no_auto_verbose)
2536 0 : console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2537 0 : }
2538 : EXPORT_SYMBOL_GPL(console_verbose);
2539 :
2540 : module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2541 : MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2542 :
2543 : /**
2544 : * suspend_console - suspend the console subsystem
2545 : *
2546 : * This disables printk() while we go into suspend states
2547 : */
2548 0 : void suspend_console(void)
2549 : {
2550 0 : if (!console_suspend_enabled)
2551 : return;
2552 0 : pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2553 0 : pr_flush(1000, true);
2554 : console_lock();
2555 0 : console_suspended = 1;
2556 0 : up_console_sem();
2557 : }
2558 :
2559 0 : void resume_console(void)
2560 : {
2561 0 : if (!console_suspend_enabled)
2562 : return;
2563 0 : down_console_sem();
2564 0 : console_suspended = 0;
2565 0 : console_unlock();
2566 : pr_flush(1000, true);
2567 : }
2568 :
2569 : /**
2570 : * console_cpu_notify - print deferred console messages after CPU hotplug
2571 : * @cpu: unused
2572 : *
2573 : * If printk() is called from a CPU that is not online yet, the messages
2574 : * will be printed on the console only if there are CON_ANYTIME consoles.
2575 : * This function is called when a new CPU comes online (or fails to come
2576 : * up) or goes offline.
2577 : */
2578 0 : static int console_cpu_notify(unsigned int cpu)
2579 : {
2580 : if (!cpuhp_tasks_frozen) {
2581 : /* If trylock fails, someone else is doing the printing */
2582 0 : if (console_trylock())
2583 0 : console_unlock();
2584 : }
2585 0 : return 0;
2586 : }
2587 :
2588 : /**
2589 : * console_lock - block the console subsystem from printing
2590 : *
2591 : * Acquires a lock which guarantees that no consoles will
2592 : * be in or enter their write() callback.
2593 : *
2594 : * Can sleep, returns nothing.
2595 : */
2596 0 : void console_lock(void)
2597 : {
2598 : might_sleep();
2599 :
2600 0 : down_console_sem();
2601 0 : if (console_suspended)
2602 : return;
2603 0 : console_locked = 1;
2604 0 : console_may_schedule = 1;
2605 : }
2606 : EXPORT_SYMBOL(console_lock);
2607 :
2608 : /**
2609 : * console_trylock - try to block the console subsystem from printing
2610 : *
2611 : * Try to acquire a lock which guarantees that no consoles will
2612 : * be in or enter their write() callback.
2613 : *
2614 : * returns 1 on success, and 0 on failure to acquire the lock.
2615 : */
2616 964 : int console_trylock(void)
2617 : {
2618 964 : if (down_trylock_console_sem())
2619 : return 0;
2620 964 : if (console_suspended) {
2621 0 : up_console_sem();
2622 0 : return 0;
2623 : }
2624 964 : console_locked = 1;
2625 964 : console_may_schedule = 0;
2626 964 : return 1;
2627 : }
2628 : EXPORT_SYMBOL(console_trylock);
2629 :
2630 0 : int is_console_locked(void)
2631 : {
2632 0 : return console_locked;
2633 : }
2634 : EXPORT_SYMBOL(is_console_locked);
2635 :
2636 : /*
2637 : * Return true when this CPU should unlock console_sem without pushing all
2638 : * messages to the console. This reduces the chance that the console is
2639 : * locked when the panic CPU tries to use it.
2640 : */
2641 : static bool abandon_console_lock_in_panic(void)
2642 : {
2643 1693 : if (!panic_in_progress())
2644 : return false;
2645 :
2646 : /*
2647 : * We can use raw_smp_processor_id() here because it is impossible for
2648 : * the task to be migrated to the panic_cpu, or away from it. If
2649 : * panic_cpu has already been set, and we're not currently executing on
2650 : * that CPU, then we never will be.
2651 : */
2652 0 : return atomic_read(&panic_cpu) != raw_smp_processor_id();
2653 : }
2654 :
2655 : /*
2656 : * Check if the given console is currently capable and allowed to print
2657 : * records.
2658 : *
2659 : * Requires the console_srcu_read_lock.
2660 : */
2661 : static inline bool console_is_usable(struct console *con)
2662 : {
2663 3366 : short flags = console_srcu_read_flags(con);
2664 :
2665 3366 : if (!(flags & CON_ENABLED))
2666 : return false;
2667 :
2668 3366 : if (!con->write)
2669 : return false;
2670 :
2671 : /*
2672 : * Console drivers may assume that per-cpu resources have been
2673 : * allocated. So unless they're explicitly marked as being able to
2674 : * cope (CON_ANYTIME) don't call them until this CPU is officially up.
2675 : */
2676 3366 : if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
2677 : return false;
2678 :
2679 : return true;
2680 : }
2681 :
2682 : static void __console_unlock(void)
2683 : {
2684 964 : console_locked = 0;
2685 964 : up_console_sem();
2686 : }
2687 :
2688 : /*
2689 : * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
2690 : * is achieved by shifting the existing message over and inserting the dropped
2691 : * message.
2692 : *
2693 : * @pmsg is the printk message to prepend.
2694 : *
2695 : * @dropped is the dropped count to report in the dropped message.
2696 : *
2697 : * If the message text in @pmsg->pbufs->outbuf does not have enough space for
2698 : * the dropped message, the message text will be sufficiently truncated.
2699 : *
2700 : * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2701 : */
2702 : #ifdef CONFIG_PRINTK
2703 0 : static void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2704 : {
2705 0 : struct printk_buffers *pbufs = pmsg->pbufs;
2706 0 : const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2707 0 : const size_t outbuf_sz = sizeof(pbufs->outbuf);
2708 0 : char *scratchbuf = &pbufs->scratchbuf[0];
2709 0 : char *outbuf = &pbufs->outbuf[0];
2710 : size_t len;
2711 :
2712 0 : len = scnprintf(scratchbuf, scratchbuf_sz,
2713 : "** %lu printk messages dropped **\n", dropped);
2714 :
2715 : /*
2716 : * Make sure outbuf is sufficiently large before prepending.
2717 : * Keep at least the prefix when the message must be truncated.
2718 : * It is a rather theoretical problem when someone tries to
2719 : * use a minimalist buffer.
2720 : */
2721 0 : if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2722 : return;
2723 :
2724 0 : if (pmsg->outbuf_len + len >= outbuf_sz) {
2725 : /* Truncate the message, but keep it terminated. */
2726 0 : pmsg->outbuf_len = outbuf_sz - (len + 1);
2727 0 : outbuf[pmsg->outbuf_len] = 0;
2728 : }
2729 :
2730 0 : memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2731 0 : memcpy(outbuf, scratchbuf, len);
2732 0 : pmsg->outbuf_len += len;
2733 : }
2734 : #else
2735 : #define console_prepend_dropped(pmsg, dropped)
2736 : #endif /* CONFIG_PRINTK */
2737 :
2738 : /*
2739 : * Read and format the specified record (or a later record if the specified
2740 : * record is not available).
2741 : *
2742 : * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
2743 : * struct printk_buffers.
2744 : *
2745 : * @seq is the record to read and format. If it is not available, the next
2746 : * valid record is read.
2747 : *
2748 : * @is_extended specifies if the message should be formatted for extended
2749 : * console output.
2750 : *
2751 : * @may_supress specifies if records may be skipped based on loglevel.
2752 : *
2753 : * Returns false if no record is available. Otherwise true and all fields
2754 : * of @pmsg are valid. (See the documentation of struct printk_message
2755 : * for information about the @pmsg fields.)
2756 : */
2757 3366 : static bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2758 : bool is_extended, bool may_suppress)
2759 : {
2760 : static int panic_console_dropped;
2761 :
2762 3366 : struct printk_buffers *pbufs = pmsg->pbufs;
2763 3366 : const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2764 3366 : const size_t outbuf_sz = sizeof(pbufs->outbuf);
2765 3366 : char *scratchbuf = &pbufs->scratchbuf[0];
2766 3366 : char *outbuf = &pbufs->outbuf[0];
2767 : struct printk_info info;
2768 : struct printk_record r;
2769 3366 : size_t len = 0;
2770 :
2771 : /*
2772 : * Formatting extended messages requires a separate buffer, so use the
2773 : * scratch buffer to read in the ringbuffer text.
2774 : *
2775 : * Formatting normal messages is done in-place, so read the ringbuffer
2776 : * text directly into the output buffer.
2777 : */
2778 3366 : if (is_extended)
2779 0 : prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
2780 : else
2781 3366 : prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
2782 :
2783 3366 : if (!prb_read_valid(prb, seq, &r))
2784 : return false;
2785 :
2786 1693 : pmsg->seq = r.info->seq;
2787 1693 : pmsg->dropped = r.info->seq - seq;
2788 :
2789 : /*
2790 : * Check for dropped messages in panic here so that printk
2791 : * suppression can occur as early as possible if necessary.
2792 : */
2793 1693 : if (pmsg->dropped &&
2794 0 : panic_in_progress() &&
2795 0 : panic_console_dropped++ > 10) {
2796 0 : suppress_panic_printk = 1;
2797 0 : pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
2798 : }
2799 :
2800 : /* Skip record that has level above the console loglevel. */
2801 3386 : if (may_suppress && suppress_message_printing(r.info->level))
2802 : goto out;
2803 :
2804 1691 : if (is_extended) {
2805 0 : len = info_print_ext_header(outbuf, outbuf_sz, r.info);
2806 0 : len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
2807 0 : &r.text_buf[0], r.info->text_len, &r.info->dev_info);
2808 : } else {
2809 1691 : len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
2810 : }
2811 : out:
2812 1693 : pmsg->outbuf_len = len;
2813 1693 : return true;
2814 : }
2815 :
2816 : /*
2817 : * Print one record for the given console. The record printed is whatever
2818 : * record is the next available record for the given console.
2819 : *
2820 : * @handover will be set to true if a printk waiter has taken over the
2821 : * console_lock, in which case the caller is no longer holding both the
2822 : * console_lock and the SRCU read lock. Otherwise it is set to false.
2823 : *
2824 : * @cookie is the cookie from the SRCU read lock.
2825 : *
2826 : * Returns false if the given console has no next record to print, otherwise
2827 : * true.
2828 : *
2829 : * Requires the console_lock and the SRCU read lock.
2830 : */
2831 3366 : static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
2832 : {
2833 : static struct printk_buffers pbufs;
2834 :
2835 3366 : bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
2836 3366 : char *outbuf = &pbufs.outbuf[0];
2837 3366 : struct printk_message pmsg = {
2838 : .pbufs = &pbufs,
2839 : };
2840 : unsigned long flags;
2841 :
2842 3366 : *handover = false;
2843 :
2844 3366 : if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
2845 : return false;
2846 :
2847 1693 : con->dropped += pmsg.dropped;
2848 :
2849 : /* Skip messages of formatted length 0. */
2850 1693 : if (pmsg.outbuf_len == 0) {
2851 2 : con->seq = pmsg.seq + 1;
2852 2 : goto skip;
2853 : }
2854 :
2855 1691 : if (con->dropped && !is_extended) {
2856 0 : console_prepend_dropped(&pmsg, con->dropped);
2857 0 : con->dropped = 0;
2858 : }
2859 :
2860 : /*
2861 : * While actively printing out messages, if another printk()
2862 : * were to occur on another CPU, it may wait for this one to
2863 : * finish. This task can not be preempted if there is a
2864 : * waiter waiting to take over.
2865 : *
2866 : * Interrupts are disabled because the hand over to a waiter
2867 : * must not be interrupted until the hand over is completed
2868 : * (@console_waiter is cleared).
2869 : */
2870 1691 : printk_safe_enter_irqsave(flags);
2871 1691 : console_lock_spinning_enable();
2872 :
2873 : /* Do not trace print latency. */
2874 : stop_critical_timings();
2875 :
2876 : /* Write everything out to the hardware. */
2877 1691 : con->write(con, outbuf, pmsg.outbuf_len);
2878 :
2879 : start_critical_timings();
2880 :
2881 1691 : con->seq = pmsg.seq + 1;
2882 :
2883 1691 : *handover = console_lock_spinning_disable_and_check(cookie);
2884 1691 : printk_safe_exit_irqrestore(flags);
2885 : skip:
2886 : return true;
2887 : }
2888 :
2889 : /*
2890 : * Print out all remaining records to all consoles.
2891 : *
2892 : * @do_cond_resched is set by the caller. It can be true only in schedulable
2893 : * context.
2894 : *
2895 : * @next_seq is set to the sequence number after the last available record.
2896 : * The value is valid only when this function returns true. It means that all
2897 : * usable consoles are completely flushed.
2898 : *
2899 : * @handover will be set to true if a printk waiter has taken over the
2900 : * console_lock, in which case the caller is no longer holding the
2901 : * console_lock. Otherwise it is set to false.
2902 : *
2903 : * Returns true when there was at least one usable console and all messages
2904 : * were flushed to all usable consoles. A returned false informs the caller
2905 : * that everything was not flushed (either there were no usable consoles or
2906 : * another context has taken over printing or it is a panic situation and this
2907 : * is not the panic CPU). Regardless the reason, the caller should assume it
2908 : * is not useful to immediately try again.
2909 : *
2910 : * Requires the console_lock.
2911 : */
2912 964 : static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
2913 : {
2914 964 : bool any_usable = false;
2915 : struct console *con;
2916 : bool any_progress;
2917 : int cookie;
2918 :
2919 964 : *next_seq = 0;
2920 964 : *handover = false;
2921 :
2922 : do {
2923 1865 : any_progress = false;
2924 :
2925 1865 : cookie = console_srcu_read_lock();
2926 5231 : for_each_console_srcu(con) {
2927 : bool progress;
2928 :
2929 3366 : if (!console_is_usable(con))
2930 0 : continue;
2931 3366 : any_usable = true;
2932 :
2933 3366 : progress = console_emit_next_record(con, handover, cookie);
2934 :
2935 : /*
2936 : * If a handover has occurred, the SRCU read lock
2937 : * is already released.
2938 : */
2939 3366 : if (*handover)
2940 : return false;
2941 :
2942 : /* Track the next of the highest seq flushed. */
2943 3366 : if (con->seq > *next_seq)
2944 1006 : *next_seq = con->seq;
2945 :
2946 3366 : if (!progress)
2947 1673 : continue;
2948 1693 : any_progress = true;
2949 :
2950 : /* Allow panic_cpu to take over the consoles safely. */
2951 1693 : if (abandon_console_lock_in_panic())
2952 : goto abandon;
2953 :
2954 1693 : if (do_cond_resched)
2955 0 : cond_resched();
2956 : }
2957 1865 : console_srcu_read_unlock(cookie);
2958 1865 : } while (any_progress);
2959 :
2960 : return any_usable;
2961 :
2962 : abandon:
2963 0 : console_srcu_read_unlock(cookie);
2964 0 : return false;
2965 : }
2966 :
2967 : /**
2968 : * console_unlock - unblock the console subsystem from printing
2969 : *
2970 : * Releases the console_lock which the caller holds to block printing of
2971 : * the console subsystem.
2972 : *
2973 : * While the console_lock was held, console output may have been buffered
2974 : * by printk(). If this is the case, console_unlock(); emits
2975 : * the output prior to releasing the lock.
2976 : *
2977 : * console_unlock(); may be called from any context.
2978 : */
2979 964 : void console_unlock(void)
2980 : {
2981 : bool do_cond_resched;
2982 : bool handover;
2983 : bool flushed;
2984 : u64 next_seq;
2985 :
2986 964 : if (console_suspended) {
2987 0 : up_console_sem();
2988 0 : return;
2989 : }
2990 :
2991 : /*
2992 : * Console drivers are called with interrupts disabled, so
2993 : * @console_may_schedule should be cleared before; however, we may
2994 : * end up dumping a lot of lines, for example, if called from
2995 : * console registration path, and should invoke cond_resched()
2996 : * between lines if allowable. Not doing so can cause a very long
2997 : * scheduling stall on a slow console leading to RCU stall and
2998 : * softlockup warnings which exacerbate the issue with more
2999 : * messages practically incapacitating the system. Therefore, create
3000 : * a local to use for the printing loop.
3001 : */
3002 964 : do_cond_resched = console_may_schedule;
3003 :
3004 : do {
3005 964 : console_may_schedule = 0;
3006 :
3007 964 : flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3008 964 : if (!handover)
3009 : __console_unlock();
3010 :
3011 : /*
3012 : * Abort if there was a failure to flush all messages to all
3013 : * usable consoles. Either it is not possible to flush (in
3014 : * which case it would be an infinite loop of retrying) or
3015 : * another context has taken over printing.
3016 : */
3017 964 : if (!flushed)
3018 : break;
3019 :
3020 : /*
3021 : * Some context may have added new records after
3022 : * console_flush_all() but before unlocking the console.
3023 : * Re-check if there is a new record to flush. If the trylock
3024 : * fails, another context is already handling the printing.
3025 : */
3026 837 : } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3027 : }
3028 : EXPORT_SYMBOL(console_unlock);
3029 :
3030 : /**
3031 : * console_conditional_schedule - yield the CPU if required
3032 : *
3033 : * If the console code is currently allowed to sleep, and
3034 : * if this CPU should yield the CPU to another task, do
3035 : * so here.
3036 : *
3037 : * Must be called within console_lock();.
3038 : */
3039 0 : void __sched console_conditional_schedule(void)
3040 : {
3041 0 : if (console_may_schedule)
3042 0 : cond_resched();
3043 0 : }
3044 : EXPORT_SYMBOL(console_conditional_schedule);
3045 :
3046 0 : void console_unblank(void)
3047 : {
3048 : struct console *c;
3049 : int cookie;
3050 :
3051 : /*
3052 : * Stop console printing because the unblank() callback may
3053 : * assume the console is not within its write() callback.
3054 : *
3055 : * If @oops_in_progress is set, this may be an atomic context.
3056 : * In that case, attempt a trylock as best-effort.
3057 : */
3058 0 : if (oops_in_progress) {
3059 0 : if (down_trylock_console_sem() != 0)
3060 : return;
3061 : } else
3062 : console_lock();
3063 :
3064 0 : console_locked = 1;
3065 0 : console_may_schedule = 0;
3066 :
3067 0 : cookie = console_srcu_read_lock();
3068 0 : for_each_console_srcu(c) {
3069 0 : if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank)
3070 0 : c->unblank();
3071 : }
3072 0 : console_srcu_read_unlock(cookie);
3073 :
3074 0 : console_unlock();
3075 :
3076 0 : if (!oops_in_progress)
3077 : pr_flush(1000, true);
3078 : }
3079 :
3080 : /**
3081 : * console_flush_on_panic - flush console content on panic
3082 : * @mode: flush all messages in buffer or just the pending ones
3083 : *
3084 : * Immediately output all pending messages no matter what.
3085 : */
3086 0 : void console_flush_on_panic(enum con_flush_mode mode)
3087 : {
3088 : /*
3089 : * If someone else is holding the console lock, trylock will fail
3090 : * and may_schedule may be set. Ignore and proceed to unlock so
3091 : * that messages are flushed out. As this can be called from any
3092 : * context and we don't want to get preempted while flushing,
3093 : * ensure may_schedule is cleared.
3094 : */
3095 0 : console_trylock();
3096 0 : console_may_schedule = 0;
3097 :
3098 0 : if (mode == CONSOLE_REPLAY_ALL) {
3099 : struct console *c;
3100 : int cookie;
3101 : u64 seq;
3102 :
3103 0 : seq = prb_first_valid_seq(prb);
3104 :
3105 0 : cookie = console_srcu_read_lock();
3106 0 : for_each_console_srcu(c) {
3107 : /*
3108 : * If the above console_trylock() failed, this is an
3109 : * unsynchronized assignment. But in that case, the
3110 : * kernel is in "hope and pray" mode anyway.
3111 : */
3112 0 : c->seq = seq;
3113 : }
3114 : console_srcu_read_unlock(cookie);
3115 : }
3116 0 : console_unlock();
3117 0 : }
3118 :
3119 : /*
3120 : * Return the console tty driver structure and its associated index
3121 : */
3122 0 : struct tty_driver *console_device(int *index)
3123 : {
3124 : struct console *c;
3125 0 : struct tty_driver *driver = NULL;
3126 : int cookie;
3127 :
3128 : /*
3129 : * Take console_lock to serialize device() callback with
3130 : * other console operations. For example, fg_console is
3131 : * modified under console_lock when switching vt.
3132 : */
3133 : console_lock();
3134 :
3135 0 : cookie = console_srcu_read_lock();
3136 0 : for_each_console_srcu(c) {
3137 0 : if (!c->device)
3138 0 : continue;
3139 0 : driver = c->device(c, index);
3140 0 : if (driver)
3141 : break;
3142 : }
3143 0 : console_srcu_read_unlock(cookie);
3144 :
3145 0 : console_unlock();
3146 0 : return driver;
3147 : }
3148 :
3149 : /*
3150 : * Prevent further output on the passed console device so that (for example)
3151 : * serial drivers can disable console output before suspending a port, and can
3152 : * re-enable output afterwards.
3153 : */
3154 0 : void console_stop(struct console *console)
3155 : {
3156 0 : __pr_flush(console, 1000, true);
3157 : console_list_lock();
3158 0 : console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3159 : console_list_unlock();
3160 :
3161 : /*
3162 : * Ensure that all SRCU list walks have completed. All contexts must
3163 : * be able to see that this console is disabled so that (for example)
3164 : * the caller can suspend the port without risk of another context
3165 : * using the port.
3166 : */
3167 0 : synchronize_srcu(&console_srcu);
3168 0 : }
3169 : EXPORT_SYMBOL(console_stop);
3170 :
3171 0 : void console_start(struct console *console)
3172 : {
3173 : console_list_lock();
3174 0 : console_srcu_write_flags(console, console->flags | CON_ENABLED);
3175 : console_list_unlock();
3176 0 : __pr_flush(console, 1000, true);
3177 0 : }
3178 : EXPORT_SYMBOL(console_start);
3179 :
3180 : static int __read_mostly keep_bootcon;
3181 :
3182 0 : static int __init keep_bootcon_setup(char *str)
3183 : {
3184 0 : keep_bootcon = 1;
3185 0 : pr_info("debug: skip boot console de-registration.\n");
3186 :
3187 0 : return 0;
3188 : }
3189 :
3190 : early_param("keep_bootcon", keep_bootcon_setup);
3191 :
3192 : /*
3193 : * This is called by register_console() to try to match
3194 : * the newly registered console with any of the ones selected
3195 : * by either the command line or add_preferred_console() and
3196 : * setup/enable it.
3197 : *
3198 : * Care need to be taken with consoles that are statically
3199 : * enabled such as netconsole
3200 : */
3201 3 : static int try_enable_preferred_console(struct console *newcon,
3202 : bool user_specified)
3203 : {
3204 : struct console_cmdline *c;
3205 : int i, err;
3206 :
3207 8 : for (i = 0, c = console_cmdline;
3208 5 : i < MAX_CMDLINECONSOLES && c->name[0];
3209 2 : i++, c++) {
3210 3 : if (c->user_specified != user_specified)
3211 1 : continue;
3212 2 : if (!newcon->match ||
3213 0 : newcon->match(newcon, c->name, c->index, c->options) != 0) {
3214 : /* default matching */
3215 : BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3216 2 : if (strcmp(c->name, newcon->name) != 0)
3217 1 : continue;
3218 1 : if (newcon->index >= 0 &&
3219 0 : newcon->index != c->index)
3220 0 : continue;
3221 1 : if (newcon->index < 0)
3222 1 : newcon->index = c->index;
3223 :
3224 1 : if (_braille_register_console(newcon, c))
3225 : return 0;
3226 :
3227 2 : if (newcon->setup &&
3228 1 : (err = newcon->setup(newcon, c->options)) != 0)
3229 : return err;
3230 : }
3231 1 : newcon->flags |= CON_ENABLED;
3232 1 : if (i == preferred_console)
3233 1 : newcon->flags |= CON_CONSDEV;
3234 : return 0;
3235 : }
3236 :
3237 : /*
3238 : * Some consoles, such as pstore and netconsole, can be enabled even
3239 : * without matching. Accept the pre-enabled consoles only when match()
3240 : * and setup() had a chance to be called.
3241 : */
3242 2 : if (newcon->flags & CON_ENABLED && c->user_specified == user_specified)
3243 : return 0;
3244 :
3245 1 : return -ENOENT;
3246 : }
3247 :
3248 : /* Try to enable the console unconditionally */
3249 0 : static void try_enable_default_console(struct console *newcon)
3250 : {
3251 0 : if (newcon->index < 0)
3252 0 : newcon->index = 0;
3253 :
3254 0 : if (newcon->setup && newcon->setup(newcon, NULL) != 0)
3255 : return;
3256 :
3257 0 : newcon->flags |= CON_ENABLED;
3258 :
3259 0 : if (newcon->device)
3260 0 : newcon->flags |= CON_CONSDEV;
3261 : }
3262 :
3263 : #define con_printk(lvl, con, fmt, ...) \
3264 : printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \
3265 : (con->flags & CON_BOOT) ? "boot" : "", \
3266 : con->name, con->index, ##__VA_ARGS__)
3267 :
3268 2 : static void console_init_seq(struct console *newcon, bool bootcon_registered)
3269 : {
3270 : struct console *con;
3271 : bool handover;
3272 :
3273 2 : if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3274 : /* Get a consistent copy of @syslog_seq. */
3275 1 : mutex_lock(&syslog_lock);
3276 1 : newcon->seq = syslog_seq;
3277 1 : mutex_unlock(&syslog_lock);
3278 : } else {
3279 : /* Begin with next message added to ringbuffer. */
3280 1 : newcon->seq = prb_next_seq(prb);
3281 :
3282 : /*
3283 : * If any enabled boot consoles are due to be unregistered
3284 : * shortly, some may not be caught up and may be the same
3285 : * device as @newcon. Since it is not known which boot console
3286 : * is the same device, flush all consoles and, if necessary,
3287 : * start with the message of the enabled boot console that is
3288 : * the furthest behind.
3289 : */
3290 1 : if (bootcon_registered && !keep_bootcon) {
3291 : /*
3292 : * Hold the console_lock to stop console printing and
3293 : * guarantee safe access to console->seq.
3294 : */
3295 : console_lock();
3296 :
3297 : /*
3298 : * Flush all consoles and set the console to start at
3299 : * the next unprinted sequence number.
3300 : */
3301 0 : if (!console_flush_all(true, &newcon->seq, &handover)) {
3302 : /*
3303 : * Flushing failed. Just choose the lowest
3304 : * sequence of the enabled boot consoles.
3305 : */
3306 :
3307 : /*
3308 : * If there was a handover, this context no
3309 : * longer holds the console_lock.
3310 : */
3311 0 : if (handover)
3312 : console_lock();
3313 :
3314 0 : newcon->seq = prb_next_seq(prb);
3315 0 : for_each_console(con) {
3316 0 : if ((con->flags & CON_BOOT) &&
3317 0 : (con->flags & CON_ENABLED) &&
3318 0 : con->seq < newcon->seq) {
3319 0 : newcon->seq = con->seq;
3320 : }
3321 : }
3322 : }
3323 :
3324 0 : console_unlock();
3325 : }
3326 : }
3327 2 : }
3328 :
3329 : #define console_first() \
3330 : hlist_entry(console_list.first, struct console, node)
3331 :
3332 : static int unregister_console_locked(struct console *console);
3333 :
3334 : /*
3335 : * The console driver calls this routine during kernel initialization
3336 : * to register the console printing procedure with printk() and to
3337 : * print any messages that were printed by the kernel before the
3338 : * console driver was initialized.
3339 : *
3340 : * This can happen pretty early during the boot process (because of
3341 : * early_printk) - sometimes before setup_arch() completes - be careful
3342 : * of what kernel features are used - they may not be initialised yet.
3343 : *
3344 : * There are two types of consoles - bootconsoles (early_printk) and
3345 : * "real" consoles (everything which is not a bootconsole) which are
3346 : * handled differently.
3347 : * - Any number of bootconsoles can be registered at any time.
3348 : * - As soon as a "real" console is registered, all bootconsoles
3349 : * will be unregistered automatically.
3350 : * - Once a "real" console is registered, any attempt to register a
3351 : * bootconsoles will be rejected
3352 : */
3353 2 : void register_console(struct console *newcon)
3354 : {
3355 : struct console *con;
3356 2 : bool bootcon_registered = false;
3357 2 : bool realcon_registered = false;
3358 : int err;
3359 :
3360 : console_list_lock();
3361 :
3362 3 : for_each_console(con) {
3363 1 : if (WARN(con == newcon, "console '%s%d' already registered\n",
3364 : con->name, con->index)) {
3365 : goto unlock;
3366 : }
3367 :
3368 1 : if (con->flags & CON_BOOT)
3369 : bootcon_registered = true;
3370 : else
3371 1 : realcon_registered = true;
3372 : }
3373 :
3374 : /* Do not register boot consoles when there already is a real one. */
3375 2 : if ((newcon->flags & CON_BOOT) && realcon_registered) {
3376 0 : pr_info("Too late to register bootconsole %s%d\n",
3377 : newcon->name, newcon->index);
3378 0 : goto unlock;
3379 : }
3380 :
3381 : /*
3382 : * See if we want to enable this console driver by default.
3383 : *
3384 : * Nope when a console is preferred by the command line, device
3385 : * tree, or SPCR.
3386 : *
3387 : * The first real console with tty binding (driver) wins. More
3388 : * consoles might get enabled before the right one is found.
3389 : *
3390 : * Note that a console with tty binding will have CON_CONSDEV
3391 : * flag set and will be first in the list.
3392 : */
3393 2 : if (preferred_console < 0) {
3394 0 : if (hlist_empty(&console_list) || !console_first()->device ||
3395 0 : console_first()->flags & CON_BOOT) {
3396 0 : try_enable_default_console(newcon);
3397 : }
3398 : }
3399 :
3400 : /* See if this console matches one we selected on the command line */
3401 2 : err = try_enable_preferred_console(newcon, true);
3402 :
3403 : /* If not, try to match against the platform default(s) */
3404 2 : if (err == -ENOENT)
3405 1 : err = try_enable_preferred_console(newcon, false);
3406 :
3407 : /* printk() messages are not printed to the Braille console. */
3408 2 : if (err || newcon->flags & CON_BRL)
3409 : goto unlock;
3410 :
3411 : /*
3412 : * If we have a bootconsole, and are switching to a real console,
3413 : * don't print everything out again, since when the boot console, and
3414 : * the real console are the same physical device, it's annoying to
3415 : * see the beginning boot messages twice
3416 : */
3417 2 : if (bootcon_registered &&
3418 : ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
3419 0 : newcon->flags &= ~CON_PRINTBUFFER;
3420 : }
3421 :
3422 2 : newcon->dropped = 0;
3423 2 : console_init_seq(newcon, bootcon_registered);
3424 :
3425 : /*
3426 : * Put this console in the list - keep the
3427 : * preferred driver at the head of the list.
3428 : */
3429 2 : if (hlist_empty(&console_list)) {
3430 : /* Ensure CON_CONSDEV is always set for the head. */
3431 1 : newcon->flags |= CON_CONSDEV;
3432 1 : hlist_add_head_rcu(&newcon->node, &console_list);
3433 :
3434 1 : } else if (newcon->flags & CON_CONSDEV) {
3435 : /* Only the new head can have CON_CONSDEV set. */
3436 0 : console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
3437 0 : hlist_add_head_rcu(&newcon->node, &console_list);
3438 :
3439 : } else {
3440 1 : hlist_add_behind_rcu(&newcon->node, console_list.first);
3441 : }
3442 :
3443 : /*
3444 : * No need to synchronize SRCU here! The caller does not rely
3445 : * on all contexts being able to see the new console before
3446 : * register_console() completes.
3447 : */
3448 :
3449 2 : console_sysfs_notify();
3450 :
3451 : /*
3452 : * By unregistering the bootconsoles after we enable the real console
3453 : * we get the "console xxx enabled" message on all the consoles -
3454 : * boot consoles, real consoles, etc - this is to ensure that end
3455 : * users know there might be something in the kernel's log buffer that
3456 : * went to the bootconsole (that they do not see on the real console)
3457 : */
3458 2 : con_printk(KERN_INFO, newcon, "enabled\n");
3459 2 : if (bootcon_registered &&
3460 0 : ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
3461 0 : !keep_bootcon) {
3462 : struct hlist_node *tmp;
3463 :
3464 0 : hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3465 0 : if (con->flags & CON_BOOT)
3466 0 : unregister_console_locked(con);
3467 : }
3468 : }
3469 : unlock:
3470 : console_list_unlock();
3471 2 : }
3472 : EXPORT_SYMBOL(register_console);
3473 :
3474 : /* Must be called under console_list_lock(). */
3475 1 : static int unregister_console_locked(struct console *console)
3476 : {
3477 : int res;
3478 :
3479 : lockdep_assert_console_list_lock_held();
3480 :
3481 1 : con_printk(KERN_INFO, console, "disabled\n");
3482 :
3483 1 : res = _braille_unregister_console(console);
3484 : if (res < 0)
3485 : return res;
3486 : if (res > 0)
3487 : return 0;
3488 :
3489 : /* Disable it unconditionally */
3490 2 : console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3491 :
3492 1 : if (!console_is_registered_locked(console))
3493 : return -ENODEV;
3494 :
3495 0 : hlist_del_init_rcu(&console->node);
3496 :
3497 : /*
3498 : * <HISTORICAL>
3499 : * If this isn't the last console and it has CON_CONSDEV set, we
3500 : * need to set it on the next preferred console.
3501 : * </HISTORICAL>
3502 : *
3503 : * The above makes no sense as there is no guarantee that the next
3504 : * console has any device attached. Oh well....
3505 : */
3506 0 : if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
3507 0 : console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
3508 :
3509 : /*
3510 : * Ensure that all SRCU list walks have completed. All contexts
3511 : * must not be able to see this console in the list so that any
3512 : * exit/cleanup routines can be performed safely.
3513 : */
3514 0 : synchronize_srcu(&console_srcu);
3515 :
3516 0 : console_sysfs_notify();
3517 :
3518 0 : if (console->exit)
3519 0 : res = console->exit(console);
3520 :
3521 : return res;
3522 : }
3523 :
3524 1 : int unregister_console(struct console *console)
3525 : {
3526 : int res;
3527 :
3528 : console_list_lock();
3529 1 : res = unregister_console_locked(console);
3530 : console_list_unlock();
3531 1 : return res;
3532 : }
3533 : EXPORT_SYMBOL(unregister_console);
3534 :
3535 : /**
3536 : * console_force_preferred_locked - force a registered console preferred
3537 : * @con: The registered console to force preferred.
3538 : *
3539 : * Must be called under console_list_lock().
3540 : */
3541 0 : void console_force_preferred_locked(struct console *con)
3542 : {
3543 : struct console *cur_pref_con;
3544 :
3545 0 : if (!console_is_registered_locked(con))
3546 : return;
3547 :
3548 0 : cur_pref_con = console_first();
3549 :
3550 : /* Already preferred? */
3551 0 : if (cur_pref_con == con)
3552 : return;
3553 :
3554 : /*
3555 : * Delete, but do not re-initialize the entry. This allows the console
3556 : * to continue to appear registered (via any hlist_unhashed_lockless()
3557 : * checks), even though it was briefly removed from the console list.
3558 : */
3559 0 : hlist_del_rcu(&con->node);
3560 :
3561 : /*
3562 : * Ensure that all SRCU list walks have completed so that the console
3563 : * can be added to the beginning of the console list and its forward
3564 : * list pointer can be re-initialized.
3565 : */
3566 0 : synchronize_srcu(&console_srcu);
3567 :
3568 0 : con->flags |= CON_CONSDEV;
3569 0 : WARN_ON(!con->device);
3570 :
3571 : /* Only the new head can have CON_CONSDEV set. */
3572 0 : console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
3573 0 : hlist_add_head_rcu(&con->node, &console_list);
3574 : }
3575 : EXPORT_SYMBOL(console_force_preferred_locked);
3576 :
3577 : /*
3578 : * Initialize the console device. This is called *early*, so
3579 : * we can't necessarily depend on lots of kernel help here.
3580 : * Just do some early initializations, and do the complex setup
3581 : * later.
3582 : */
3583 1 : void __init console_init(void)
3584 : {
3585 : int ret;
3586 : initcall_t call;
3587 : initcall_entry_t *ce;
3588 :
3589 : /* Setup the default TTY line discipline. */
3590 1 : n_tty_init();
3591 :
3592 : /*
3593 : * set up the console device so that later boot sequences can
3594 : * inform about problems etc..
3595 : */
3596 1 : ce = __con_initcall_start;
3597 1 : trace_initcall_level("console");
3598 2 : while (ce < __con_initcall_end) {
3599 1 : call = initcall_from_entry(ce);
3600 : trace_initcall_start(call);
3601 1 : ret = call();
3602 1 : trace_initcall_finish(call, ret);
3603 1 : ce++;
3604 : }
3605 1 : }
3606 :
3607 : /*
3608 : * Some boot consoles access data that is in the init section and which will
3609 : * be discarded after the initcalls have been run. To make sure that no code
3610 : * will access this data, unregister the boot consoles in a late initcall.
3611 : *
3612 : * If for some reason, such as deferred probe or the driver being a loadable
3613 : * module, the real console hasn't registered yet at this point, there will
3614 : * be a brief interval in which no messages are logged to the console, which
3615 : * makes it difficult to diagnose problems that occur during this time.
3616 : *
3617 : * To mitigate this problem somewhat, only unregister consoles whose memory
3618 : * intersects with the init section. Note that all other boot consoles will
3619 : * get unregistered when the real preferred console is registered.
3620 : */
3621 1 : static int __init printk_late_init(void)
3622 : {
3623 : struct hlist_node *tmp;
3624 : struct console *con;
3625 : int ret;
3626 :
3627 : console_list_lock();
3628 3 : hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3629 2 : if (!(con->flags & CON_BOOT))
3630 2 : continue;
3631 :
3632 : /* Check addresses that might be used for enabled consoles. */
3633 0 : if (init_section_intersects(con, sizeof(*con)) ||
3634 0 : init_section_contains(con->write, 0) ||
3635 0 : init_section_contains(con->read, 0) ||
3636 0 : init_section_contains(con->device, 0) ||
3637 0 : init_section_contains(con->unblank, 0) ||
3638 0 : init_section_contains(con->data, 0)) {
3639 : /*
3640 : * Please, consider moving the reported consoles out
3641 : * of the init section.
3642 : */
3643 0 : pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
3644 : con->name, con->index);
3645 0 : unregister_console_locked(con);
3646 : }
3647 : }
3648 1 : console_list_unlock();
3649 :
3650 1 : ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
3651 : console_cpu_notify);
3652 1 : WARN_ON(ret < 0);
3653 1 : ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
3654 : console_cpu_notify, NULL);
3655 1 : WARN_ON(ret < 0);
3656 1 : printk_sysctl_init();
3657 1 : return 0;
3658 : }
3659 : late_initcall(printk_late_init);
3660 :
3661 : #if defined CONFIG_PRINTK
3662 : /* If @con is specified, only wait for that console. Otherwise wait for all. */
3663 0 : static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
3664 : {
3665 0 : int remaining = timeout_ms;
3666 : struct console *c;
3667 0 : u64 last_diff = 0;
3668 : u64 printk_seq;
3669 : int cookie;
3670 : u64 diff;
3671 : u64 seq;
3672 :
3673 : might_sleep();
3674 :
3675 0 : seq = prb_next_seq(prb);
3676 :
3677 : for (;;) {
3678 0 : diff = 0;
3679 :
3680 : /*
3681 : * Hold the console_lock to guarantee safe access to
3682 : * console->seq and to prevent changes to @console_suspended
3683 : * until all consoles have been processed.
3684 : */
3685 : console_lock();
3686 :
3687 0 : cookie = console_srcu_read_lock();
3688 0 : for_each_console_srcu(c) {
3689 0 : if (con && con != c)
3690 0 : continue;
3691 0 : if (!console_is_usable(c))
3692 0 : continue;
3693 0 : printk_seq = c->seq;
3694 0 : if (printk_seq < seq)
3695 0 : diff += seq - printk_seq;
3696 : }
3697 0 : console_srcu_read_unlock(cookie);
3698 :
3699 : /*
3700 : * If consoles are suspended, it cannot be expected that they
3701 : * make forward progress, so timeout immediately. @diff is
3702 : * still used to return a valid flush status.
3703 : */
3704 0 : if (console_suspended)
3705 : remaining = 0;
3706 0 : else if (diff != last_diff && reset_on_progress)
3707 0 : remaining = timeout_ms;
3708 :
3709 0 : console_unlock();
3710 :
3711 0 : if (diff == 0 || remaining == 0)
3712 : break;
3713 :
3714 0 : if (remaining < 0) {
3715 : /* no timeout limit */
3716 0 : msleep(100);
3717 0 : } else if (remaining < 100) {
3718 0 : msleep(remaining);
3719 0 : remaining = 0;
3720 : } else {
3721 0 : msleep(100);
3722 0 : remaining -= 100;
3723 : }
3724 :
3725 : last_diff = diff;
3726 : }
3727 :
3728 0 : return (diff == 0);
3729 : }
3730 :
3731 : /**
3732 : * pr_flush() - Wait for printing threads to catch up.
3733 : *
3734 : * @timeout_ms: The maximum time (in ms) to wait.
3735 : * @reset_on_progress: Reset the timeout if forward progress is seen.
3736 : *
3737 : * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
3738 : * represents infinite waiting.
3739 : *
3740 : * If @reset_on_progress is true, the timeout will be reset whenever any
3741 : * printer has been seen to make some forward progress.
3742 : *
3743 : * Context: Process context. May sleep while acquiring console lock.
3744 : * Return: true if all enabled printers are caught up.
3745 : */
3746 : static bool pr_flush(int timeout_ms, bool reset_on_progress)
3747 : {
3748 0 : return __pr_flush(NULL, timeout_ms, reset_on_progress);
3749 : }
3750 :
3751 : /*
3752 : * Delayed printk version, for scheduler-internal messages:
3753 : */
3754 : #define PRINTK_PENDING_WAKEUP 0x01
3755 : #define PRINTK_PENDING_OUTPUT 0x02
3756 :
3757 : static DEFINE_PER_CPU(int, printk_pending);
3758 :
3759 0 : static void wake_up_klogd_work_func(struct irq_work *irq_work)
3760 : {
3761 0 : int pending = this_cpu_xchg(printk_pending, 0);
3762 :
3763 0 : if (pending & PRINTK_PENDING_OUTPUT) {
3764 : /* If trylock fails, someone else is doing the printing */
3765 0 : if (console_trylock())
3766 0 : console_unlock();
3767 : }
3768 :
3769 0 : if (pending & PRINTK_PENDING_WAKEUP)
3770 0 : wake_up_interruptible(&log_wait);
3771 0 : }
3772 :
3773 : static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
3774 : IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
3775 :
3776 964 : static void __wake_up_klogd(int val)
3777 : {
3778 964 : if (!printk_percpu_data_ready())
3779 : return;
3780 :
3781 947 : preempt_disable();
3782 : /*
3783 : * Guarantee any new records can be seen by tasks preparing to wait
3784 : * before this context checks if the wait queue is empty.
3785 : *
3786 : * The full memory barrier within wq_has_sleeper() pairs with the full
3787 : * memory barrier within set_current_state() of
3788 : * prepare_to_wait_event(), which is called after ___wait_event() adds
3789 : * the waiter but before it has checked the wait condition.
3790 : *
3791 : * This pairs with devkmsg_read:A and syslog_print:A.
3792 : */
3793 1894 : if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
3794 947 : (val & PRINTK_PENDING_OUTPUT)) {
3795 0 : this_cpu_or(printk_pending, val);
3796 0 : irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
3797 : }
3798 947 : preempt_enable();
3799 : }
3800 :
3801 0 : void wake_up_klogd(void)
3802 : {
3803 964 : __wake_up_klogd(PRINTK_PENDING_WAKEUP);
3804 0 : }
3805 :
3806 0 : void defer_console_output(void)
3807 : {
3808 : /*
3809 : * New messages may have been added directly to the ringbuffer
3810 : * using vprintk_store(), so wake any waiters as well.
3811 : */
3812 0 : __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
3813 0 : }
3814 :
3815 0 : void printk_trigger_flush(void)
3816 : {
3817 : defer_console_output();
3818 0 : }
3819 :
3820 0 : int vprintk_deferred(const char *fmt, va_list args)
3821 : {
3822 : int r;
3823 :
3824 0 : r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
3825 : defer_console_output();
3826 :
3827 0 : return r;
3828 : }
3829 :
3830 0 : int _printk_deferred(const char *fmt, ...)
3831 : {
3832 : va_list args;
3833 : int r;
3834 :
3835 0 : va_start(args, fmt);
3836 0 : r = vprintk_deferred(fmt, args);
3837 0 : va_end(args);
3838 :
3839 0 : return r;
3840 : }
3841 :
3842 : /*
3843 : * printk rate limiting, lifted from the networking subsystem.
3844 : *
3845 : * This enforces a rate limit: not more than 10 kernel messages
3846 : * every 5s to make a denial-of-service attack impossible.
3847 : */
3848 : DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
3849 :
3850 0 : int __printk_ratelimit(const char *func)
3851 : {
3852 0 : return ___ratelimit(&printk_ratelimit_state, func);
3853 : }
3854 : EXPORT_SYMBOL(__printk_ratelimit);
3855 :
3856 : /**
3857 : * printk_timed_ratelimit - caller-controlled printk ratelimiting
3858 : * @caller_jiffies: pointer to caller's state
3859 : * @interval_msecs: minimum interval between prints
3860 : *
3861 : * printk_timed_ratelimit() returns true if more than @interval_msecs
3862 : * milliseconds have elapsed since the last time printk_timed_ratelimit()
3863 : * returned true.
3864 : */
3865 0 : bool printk_timed_ratelimit(unsigned long *caller_jiffies,
3866 : unsigned int interval_msecs)
3867 : {
3868 0 : unsigned long elapsed = jiffies - *caller_jiffies;
3869 :
3870 0 : if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
3871 : return false;
3872 :
3873 0 : *caller_jiffies = jiffies;
3874 0 : return true;
3875 : }
3876 : EXPORT_SYMBOL(printk_timed_ratelimit);
3877 :
3878 : static DEFINE_SPINLOCK(dump_list_lock);
3879 : static LIST_HEAD(dump_list);
3880 :
3881 : /**
3882 : * kmsg_dump_register - register a kernel log dumper.
3883 : * @dumper: pointer to the kmsg_dumper structure
3884 : *
3885 : * Adds a kernel log dumper to the system. The dump callback in the
3886 : * structure will be called when the kernel oopses or panics and must be
3887 : * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
3888 : */
3889 1 : int kmsg_dump_register(struct kmsg_dumper *dumper)
3890 : {
3891 : unsigned long flags;
3892 1 : int err = -EBUSY;
3893 :
3894 : /* The dump callback needs to be set */
3895 1 : if (!dumper->dump)
3896 : return -EINVAL;
3897 :
3898 1 : spin_lock_irqsave(&dump_list_lock, flags);
3899 : /* Don't allow registering multiple times */
3900 1 : if (!dumper->registered) {
3901 1 : dumper->registered = 1;
3902 2 : list_add_tail_rcu(&dumper->list, &dump_list);
3903 1 : err = 0;
3904 : }
3905 1 : spin_unlock_irqrestore(&dump_list_lock, flags);
3906 :
3907 1 : return err;
3908 : }
3909 : EXPORT_SYMBOL_GPL(kmsg_dump_register);
3910 :
3911 : /**
3912 : * kmsg_dump_unregister - unregister a kmsg dumper.
3913 : * @dumper: pointer to the kmsg_dumper structure
3914 : *
3915 : * Removes a dump device from the system. Returns zero on success and
3916 : * %-EINVAL otherwise.
3917 : */
3918 0 : int kmsg_dump_unregister(struct kmsg_dumper *dumper)
3919 : {
3920 : unsigned long flags;
3921 0 : int err = -EINVAL;
3922 :
3923 0 : spin_lock_irqsave(&dump_list_lock, flags);
3924 0 : if (dumper->registered) {
3925 0 : dumper->registered = 0;
3926 0 : list_del_rcu(&dumper->list);
3927 0 : err = 0;
3928 : }
3929 0 : spin_unlock_irqrestore(&dump_list_lock, flags);
3930 0 : synchronize_rcu();
3931 :
3932 0 : return err;
3933 : }
3934 : EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
3935 :
3936 : static bool always_kmsg_dump;
3937 : module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
3938 :
3939 0 : const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
3940 : {
3941 : switch (reason) {
3942 : case KMSG_DUMP_PANIC:
3943 : return "Panic";
3944 : case KMSG_DUMP_OOPS:
3945 : return "Oops";
3946 : case KMSG_DUMP_EMERG:
3947 : return "Emergency";
3948 : case KMSG_DUMP_SHUTDOWN:
3949 : return "Shutdown";
3950 : default:
3951 : return "Unknown";
3952 : }
3953 : }
3954 : EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
3955 :
3956 : /**
3957 : * kmsg_dump - dump kernel log to kernel message dumpers.
3958 : * @reason: the reason (oops, panic etc) for dumping
3959 : *
3960 : * Call each of the registered dumper's dump() callback, which can
3961 : * retrieve the kmsg records with kmsg_dump_get_line() or
3962 : * kmsg_dump_get_buffer().
3963 : */
3964 1 : void kmsg_dump(enum kmsg_dump_reason reason)
3965 : {
3966 : struct kmsg_dumper *dumper;
3967 :
3968 : rcu_read_lock();
3969 2 : list_for_each_entry_rcu(dumper, &dump_list, list) {
3970 1 : enum kmsg_dump_reason max_reason = dumper->max_reason;
3971 :
3972 : /*
3973 : * If client has not provided a specific max_reason, default
3974 : * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
3975 : */
3976 1 : if (max_reason == KMSG_DUMP_UNDEF) {
3977 1 : max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
3978 : KMSG_DUMP_OOPS;
3979 : }
3980 1 : if (reason > max_reason)
3981 1 : continue;
3982 :
3983 : /* invoke dumper which will iterate over records */
3984 0 : dumper->dump(dumper, reason);
3985 : }
3986 : rcu_read_unlock();
3987 1 : }
3988 :
3989 : /**
3990 : * kmsg_dump_get_line - retrieve one kmsg log line
3991 : * @iter: kmsg dump iterator
3992 : * @syslog: include the "<4>" prefixes
3993 : * @line: buffer to copy the line to
3994 : * @size: maximum size of the buffer
3995 : * @len: length of line placed into buffer
3996 : *
3997 : * Start at the beginning of the kmsg buffer, with the oldest kmsg
3998 : * record, and copy one record into the provided buffer.
3999 : *
4000 : * Consecutive calls will return the next available record moving
4001 : * towards the end of the buffer with the youngest messages.
4002 : *
4003 : * A return value of FALSE indicates that there are no more records to
4004 : * read.
4005 : */
4006 0 : bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4007 : char *line, size_t size, size_t *len)
4008 : {
4009 0 : u64 min_seq = latched_seq_read_nolock(&clear_seq);
4010 : struct printk_info info;
4011 : unsigned int line_count;
4012 : struct printk_record r;
4013 0 : size_t l = 0;
4014 0 : bool ret = false;
4015 :
4016 0 : if (iter->cur_seq < min_seq)
4017 0 : iter->cur_seq = min_seq;
4018 :
4019 0 : prb_rec_init_rd(&r, &info, line, size);
4020 :
4021 : /* Read text or count text lines? */
4022 0 : if (line) {
4023 0 : if (!prb_read_valid(prb, iter->cur_seq, &r))
4024 : goto out;
4025 0 : l = record_print_text(&r, syslog, printk_time);
4026 : } else {
4027 0 : if (!prb_read_valid_info(prb, iter->cur_seq,
4028 : &info, &line_count)) {
4029 : goto out;
4030 : }
4031 0 : l = get_record_print_text_size(&info, line_count, syslog,
4032 : printk_time);
4033 :
4034 : }
4035 :
4036 0 : iter->cur_seq = r.info->seq + 1;
4037 0 : ret = true;
4038 : out:
4039 0 : if (len)
4040 0 : *len = l;
4041 0 : return ret;
4042 : }
4043 : EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4044 :
4045 : /**
4046 : * kmsg_dump_get_buffer - copy kmsg log lines
4047 : * @iter: kmsg dump iterator
4048 : * @syslog: include the "<4>" prefixes
4049 : * @buf: buffer to copy the line to
4050 : * @size: maximum size of the buffer
4051 : * @len_out: length of line placed into buffer
4052 : *
4053 : * Start at the end of the kmsg buffer and fill the provided buffer
4054 : * with as many of the *youngest* kmsg records that fit into it.
4055 : * If the buffer is large enough, all available kmsg records will be
4056 : * copied with a single call.
4057 : *
4058 : * Consecutive calls will fill the buffer with the next block of
4059 : * available older records, not including the earlier retrieved ones.
4060 : *
4061 : * A return value of FALSE indicates that there are no more records to
4062 : * read.
4063 : */
4064 0 : bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4065 : char *buf, size_t size, size_t *len_out)
4066 : {
4067 0 : u64 min_seq = latched_seq_read_nolock(&clear_seq);
4068 : struct printk_info info;
4069 : struct printk_record r;
4070 : u64 seq;
4071 : u64 next_seq;
4072 0 : size_t len = 0;
4073 0 : bool ret = false;
4074 0 : bool time = printk_time;
4075 :
4076 0 : if (!buf || !size)
4077 : goto out;
4078 :
4079 0 : if (iter->cur_seq < min_seq)
4080 0 : iter->cur_seq = min_seq;
4081 :
4082 0 : if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4083 0 : if (info.seq != iter->cur_seq) {
4084 : /* messages are gone, move to first available one */
4085 0 : iter->cur_seq = info.seq;
4086 : }
4087 : }
4088 :
4089 : /* last entry */
4090 0 : if (iter->cur_seq >= iter->next_seq)
4091 : goto out;
4092 :
4093 : /*
4094 : * Find first record that fits, including all following records,
4095 : * into the user-provided buffer for this dump. Pass in size-1
4096 : * because this function (by way of record_print_text()) will
4097 : * not write more than size-1 bytes of text into @buf.
4098 : */
4099 0 : seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4100 : size - 1, syslog, time);
4101 :
4102 : /*
4103 : * Next kmsg_dump_get_buffer() invocation will dump block of
4104 : * older records stored right before this one.
4105 : */
4106 0 : next_seq = seq;
4107 :
4108 0 : prb_rec_init_rd(&r, &info, buf, size);
4109 :
4110 0 : len = 0;
4111 0 : prb_for_each_record(seq, prb, seq, &r) {
4112 0 : if (r.info->seq >= iter->next_seq)
4113 : break;
4114 :
4115 0 : len += record_print_text(&r, syslog, time);
4116 :
4117 : /* Adjust record to store to remaining buffer space. */
4118 0 : prb_rec_init_rd(&r, &info, buf + len, size - len);
4119 : }
4120 :
4121 0 : iter->next_seq = next_seq;
4122 0 : ret = true;
4123 : out:
4124 0 : if (len_out)
4125 0 : *len_out = len;
4126 0 : return ret;
4127 : }
4128 : EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4129 :
4130 : /**
4131 : * kmsg_dump_rewind - reset the iterator
4132 : * @iter: kmsg dump iterator
4133 : *
4134 : * Reset the dumper's iterator so that kmsg_dump_get_line() and
4135 : * kmsg_dump_get_buffer() can be called again and used multiple
4136 : * times within the same dumper.dump() callback.
4137 : */
4138 0 : void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4139 : {
4140 0 : iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4141 0 : iter->next_seq = prb_next_seq(prb);
4142 0 : }
4143 : EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4144 :
4145 : #endif
4146 :
4147 : #ifdef CONFIG_SMP
4148 : static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
4149 : static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
4150 :
4151 : /**
4152 : * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
4153 : * spinning lock is not owned by any CPU.
4154 : *
4155 : * Context: Any context.
4156 : */
4157 : void __printk_cpu_sync_wait(void)
4158 : {
4159 : do {
4160 : cpu_relax();
4161 : } while (atomic_read(&printk_cpu_sync_owner) != -1);
4162 : }
4163 : EXPORT_SYMBOL(__printk_cpu_sync_wait);
4164 :
4165 : /**
4166 : * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
4167 : * spinning lock.
4168 : *
4169 : * If no processor has the lock, the calling processor takes the lock and
4170 : * becomes the owner. If the calling processor is already the owner of the
4171 : * lock, this function succeeds immediately.
4172 : *
4173 : * Context: Any context. Expects interrupts to be disabled.
4174 : * Return: 1 on success, otherwise 0.
4175 : */
4176 : int __printk_cpu_sync_try_get(void)
4177 : {
4178 : int cpu;
4179 : int old;
4180 :
4181 : cpu = smp_processor_id();
4182 :
4183 : /*
4184 : * Guarantee loads and stores from this CPU when it is the lock owner
4185 : * are _not_ visible to the previous lock owner. This pairs with
4186 : * __printk_cpu_sync_put:B.
4187 : *
4188 : * Memory barrier involvement:
4189 : *
4190 : * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4191 : * then __printk_cpu_sync_put:A can never read from
4192 : * __printk_cpu_sync_try_get:B.
4193 : *
4194 : * Relies on:
4195 : *
4196 : * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4197 : * of the previous CPU
4198 : * matching
4199 : * ACQUIRE from __printk_cpu_sync_try_get:A to
4200 : * __printk_cpu_sync_try_get:B of this CPU
4201 : */
4202 : old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
4203 : cpu); /* LMM(__printk_cpu_sync_try_get:A) */
4204 : if (old == -1) {
4205 : /*
4206 : * This CPU is now the owner and begins loading/storing
4207 : * data: LMM(__printk_cpu_sync_try_get:B)
4208 : */
4209 : return 1;
4210 :
4211 : } else if (old == cpu) {
4212 : /* This CPU is already the owner. */
4213 : atomic_inc(&printk_cpu_sync_nested);
4214 : return 1;
4215 : }
4216 :
4217 : return 0;
4218 : }
4219 : EXPORT_SYMBOL(__printk_cpu_sync_try_get);
4220 :
4221 : /**
4222 : * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
4223 : *
4224 : * The calling processor must be the owner of the lock.
4225 : *
4226 : * Context: Any context. Expects interrupts to be disabled.
4227 : */
4228 : void __printk_cpu_sync_put(void)
4229 : {
4230 : if (atomic_read(&printk_cpu_sync_nested)) {
4231 : atomic_dec(&printk_cpu_sync_nested);
4232 : return;
4233 : }
4234 :
4235 : /*
4236 : * This CPU is finished loading/storing data:
4237 : * LMM(__printk_cpu_sync_put:A)
4238 : */
4239 :
4240 : /*
4241 : * Guarantee loads and stores from this CPU when it was the
4242 : * lock owner are visible to the next lock owner. This pairs
4243 : * with __printk_cpu_sync_try_get:A.
4244 : *
4245 : * Memory barrier involvement:
4246 : *
4247 : * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4248 : * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
4249 : *
4250 : * Relies on:
4251 : *
4252 : * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4253 : * of this CPU
4254 : * matching
4255 : * ACQUIRE from __printk_cpu_sync_try_get:A to
4256 : * __printk_cpu_sync_try_get:B of the next CPU
4257 : */
4258 : atomic_set_release(&printk_cpu_sync_owner,
4259 : -1); /* LMM(__printk_cpu_sync_put:B) */
4260 : }
4261 : EXPORT_SYMBOL(__printk_cpu_sync_put);
4262 : #endif /* CONFIG_SMP */
|