Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
printk.c
Go to the documentation of this file.
1 /*
2  * linux/kernel/printk.c
3  *
4  * Copyright (C) 1991, 1992 Linus Torvalds
5  *
6  * Modified to make sys_syslog() more flexible: added commands to
7  * return the last 4k of kernel messages, regardless of whether
8  * they've been read or not. Added option to suppress kernel printk's
9  * to the console. Added hook for sending the console messages
10  * elsewhere, in preparation for a serial line console (someday).
11  * Ted Ts'o, 2/11/93.
12  * Modified for sysctl support, 1/8/97, Chris Horn.
13  * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15  * Rewrote bits to get rid of console_lock
16  * 01Mar01 Andrew Morton
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/tty.h>
22 #include <linux/tty_driver.h>
23 #include <linux/console.h>
24 #include <linux/init.h>
25 #include <linux/jiffies.h>
26 #include <linux/nmi.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/interrupt.h> /* For in_interrupt() */
30 #include <linux/delay.h>
31 #include <linux/smp.h>
32 #include <linux/security.h>
33 #include <linux/bootmem.h>
34 #include <linux/memblock.h>
35 #include <linux/syscalls.h>
36 #include <linux/kexec.h>
37 #include <linux/kdb.h>
38 #include <linux/ratelimit.h>
39 #include <linux/kmsg_dump.h>
40 #include <linux/syslog.h>
41 #include <linux/cpu.h>
42 #include <linux/notifier.h>
43 #include <linux/rculist.h>
44 #include <linux/poll.h>
45 
46 #include <asm/uaccess.h>
47 
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/printk.h>
50 
51 /*
52  * Architectures can override it:
53  */
54 void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
55 {
56 }
57 
58 /* printk's without a loglevel use this.. */
59 #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
60 
61 /* We show everything that is MORE important than this.. */
62 #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
63 #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
64 
65 DECLARE_WAIT_QUEUE_HEAD(log_wait);
66 
67 int console_printk[4] = {
68  DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
69  DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
70  MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
71  DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
72 };
73 
74 /*
75  * Low level drivers may need that to know if they can schedule in
76  * their unblank() callback or not. So let's export it.
77  */
79 EXPORT_SYMBOL(oops_in_progress);
80 
81 /*
82  * console_sem protects the console_drivers list, and also
83  * provides serialisation for access to the entire console
84  * driver system.
85  */
86 static DEFINE_SEMAPHORE(console_sem);
88 EXPORT_SYMBOL_GPL(console_drivers);
89 
90 /*
91  * This is used for debugging the mess that is the VT code by
92  * keeping track if we have the console semaphore held. It's
93  * definitely not the perfect debug tool (we don't know if _WE_
94  * hold it are racing, but it helps tracking those weird code
95  * path in the console code where we end up in places I want
96  * locked without the console sempahore held
97  */
98 static int console_locked, console_suspended;
99 
100 /*
101  * If exclusive_console is non-NULL then only this console is to be printed to.
102  */
103 static struct console *exclusive_console;
104 
105 /*
106  * Array of consoles built from command line options (console=)
107  */
109 {
110  char name[8]; /* Name of the driver */
111  int index; /* Minor dev. to use */
112  char *options; /* Options for the driver */
113 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
114  char *brl_options; /* Options for braille driver */
115 #endif
116 };
117 
118 #define MAX_CMDLINECONSOLES 8
119 
121 static int selected_console = -1;
122 static int preferred_console = -1;
124 EXPORT_SYMBOL(console_set_on_cmdline);
125 
126 /* Flag: console code may call schedule() */
127 static int console_may_schedule;
128 
129 /*
130  * The printk log buffer consists of a chain of concatenated variable
131  * length records. Every record starts with a record header, containing
132  * the overall length of the record.
133  *
134  * The heads to the first and last entry in the buffer, as well as the
135  * sequence numbers of these both entries are maintained when messages
136  * are stored..
137  *
138  * If the heads indicate available messages, the length in the header
139  * tells the start next message. A length == 0 for the next message
140  * indicates a wrap-around to the beginning of the buffer.
141  *
142  * Every record carries the monotonic timestamp in microseconds, as well as
143  * the standard userspace syslog level and syslog facility. The usual
144  * kernel messages use LOG_KERN; userspace-injected messages always carry
145  * a matching syslog facility, by default LOG_USER. The origin of every
146  * message can be reliably determined that way.
147  *
148  * The human readable log message directly follows the message header. The
149  * length of the message text is stored in the header, the stored message
150  * is not terminated.
151  *
152  * Optionally, a message can carry a dictionary of properties (key/value pairs),
153  * to provide userspace with a machine-readable message context.
154  *
155  * Examples for well-defined, commonly used property names are:
156  * DEVICE=b12:8 device identifier
157  * b12:8 block dev_t
158  * c127:3 char dev_t
159  * n8 netdev ifindex
160  * +sound:card0 subsystem:devname
161  * SUBSYSTEM=pci driver-core subsystem name
162  *
163  * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
164  * follows directly after a '=' character. Every property is terminated by
165  * a '\0' character. The last property is not terminated.
166  *
167  * Example of a message structure:
168  * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
169  * 0008 34 00 record is 52 bytes long
170  * 000a 0b 00 text is 11 bytes long
171  * 000c 1f 00 dictionary is 23 bytes long
172  * 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
173  * 0010 69 74 27 73 20 61 20 6c "it's a l"
174  * 69 6e 65 "ine"
175  * 001b 44 45 56 49 43 "DEVIC"
176  * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
177  * 52 49 56 45 52 3d 62 75 "RIVER=bu"
178  * 67 "g"
179  * 0032 00 00 00 padding to next message header
180  *
181  * The 'struct log' buffer header must never be directly exported to
182  * userspace, it is a kernel-private implementation detail that might
183  * need to be changed in the future, when the requirements change.
184  *
185  * /dev/kmsg exports the structured data in the following line format:
186  * "level,sequnum,timestamp;<message text>\n"
187  *
188  * The optional key/value pairs are attached as continuation lines starting
189  * with a space character and terminated by a newline. All possible
190  * non-prinatable characters are escaped in the "\xff" notation.
191  *
192  * Users of the export format should ignore possible additional values
193  * separated by ',', and find the message after the ';' character.
194  */
195 
196 enum log_flags {
197  LOG_NOCONS = 1, /* already flushed, do not print to console */
198  LOG_NEWLINE = 2, /* text ended with a newline */
199  LOG_PREFIX = 4, /* text started with a prefix */
200  LOG_CONT = 8, /* text is a fragment of a continuation line */
201 };
202 
203 struct log {
204  u64 ts_nsec; /* timestamp in nanoseconds */
205  u16 len; /* length of entire record */
206  u16 text_len; /* length of text buffer */
207  u16 dict_len; /* length of dictionary buffer */
208  u8 facility; /* syslog facility */
209  u8 flags:5; /* internal record flags */
210  u8 level:3; /* syslog level */
211 };
212 
213 /*
214  * The logbuf_lock protects kmsg buffer, indices, counters. It is also
215  * used in interesting ways to provide interlocking in console_unlock();
216  */
217 static DEFINE_RAW_SPINLOCK(logbuf_lock);
218 
219 #ifdef CONFIG_PRINTK
220 /* the next printk record to read by syslog(READ) or /proc/kmsg */
221 static u64 syslog_seq;
222 static u32 syslog_idx;
223 static enum log_flags syslog_prev;
224 static size_t syslog_partial;
225 
226 /* index and sequence number of the first record stored in the buffer */
227 static u64 log_first_seq;
228 static u32 log_first_idx;
229 
230 /* index and sequence number of the next record to store in the buffer */
231 static u64 log_next_seq;
232 static u32 log_next_idx;
233 
234 /* the next printk record to write to the console */
235 static u64 console_seq;
236 static u32 console_idx;
237 static enum log_flags console_prev;
238 
239 /* the next printk record to read after the last 'clear' command */
240 static u64 clear_seq;
241 static u32 clear_idx;
242 
243 #define PREFIX_MAX 32
244 #define LOG_LINE_MAX 1024 - PREFIX_MAX
245 
246 /* record buffer */
247 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
248 #define LOG_ALIGN 4
249 #else
250 #define LOG_ALIGN __alignof__(struct log)
251 #endif
252 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
253 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
254 static char *log_buf = __log_buf;
255 static u32 log_buf_len = __LOG_BUF_LEN;
256 
257 /* cpu currently holding logbuf_lock */
258 static volatile unsigned int logbuf_cpu = UINT_MAX;
259 
260 /* human readable text of the record */
261 static char *log_text(const struct log *msg)
262 {
263  return (char *)msg + sizeof(struct log);
264 }
265 
266 /* optional key/value pair dictionary attached to the record */
267 static char *log_dict(const struct log *msg)
268 {
269  return (char *)msg + sizeof(struct log) + msg->text_len;
270 }
271 
272 /* get record by index; idx must point to valid msg */
273 static struct log *log_from_idx(u32 idx)
274 {
275  struct log *msg = (struct log *)(log_buf + idx);
276 
277  /*
278  * A length == 0 record is the end of buffer marker. Wrap around and
279  * read the message at the start of the buffer.
280  */
281  if (!msg->len)
282  return (struct log *)log_buf;
283  return msg;
284 }
285 
286 /* get next record; idx must point to valid msg */
287 static u32 log_next(u32 idx)
288 {
289  struct log *msg = (struct log *)(log_buf + idx);
290 
291  /* length == 0 indicates the end of the buffer; wrap */
292  /*
293  * A length == 0 record is the end of buffer marker. Wrap around and
294  * read the message at the start of the buffer as *this* one, and
295  * return the one after that.
296  */
297  if (!msg->len) {
298  msg = (struct log *)log_buf;
299  return msg->len;
300  }
301  return idx + msg->len;
302 }
303 
304 /* insert record into the buffer, discard old ones, update heads */
305 static void log_store(int facility, int level,
306  enum log_flags flags, u64 ts_nsec,
307  const char *dict, u16 dict_len,
308  const char *text, u16 text_len)
309 {
310  struct log *msg;
311  u32 size, pad_len;
312 
313  /* number of '\0' padding bytes to next message */
314  size = sizeof(struct log) + text_len + dict_len;
315  pad_len = (-size) & (LOG_ALIGN - 1);
316  size += pad_len;
317 
318  while (log_first_seq < log_next_seq) {
319  u32 free;
320 
321  if (log_next_idx > log_first_idx)
322  free = max(log_buf_len - log_next_idx, log_first_idx);
323  else
324  free = log_first_idx - log_next_idx;
325 
326  if (free > size + sizeof(struct log))
327  break;
328 
329  /* drop old messages until we have enough contiuous space */
330  log_first_idx = log_next(log_first_idx);
331  log_first_seq++;
332  }
333 
334  if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
335  /*
336  * This message + an additional empty header does not fit
337  * at the end of the buffer. Add an empty header with len == 0
338  * to signify a wrap around.
339  */
340  memset(log_buf + log_next_idx, 0, sizeof(struct log));
341  log_next_idx = 0;
342  }
343 
344  /* fill message */
345  msg = (struct log *)(log_buf + log_next_idx);
346  memcpy(log_text(msg), text, text_len);
347  msg->text_len = text_len;
348  memcpy(log_dict(msg), dict, dict_len);
349  msg->dict_len = dict_len;
350  msg->facility = facility;
351  msg->level = level & 7;
352  msg->flags = flags & 0x1f;
353  if (ts_nsec > 0)
354  msg->ts_nsec = ts_nsec;
355  else
356  msg->ts_nsec = local_clock();
357  memset(log_dict(msg) + dict_len, 0, pad_len);
358  msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
359 
360  /* insert message */
361  log_next_idx += msg->len;
362  log_next_seq++;
363 }
364 
365 /* /dev/kmsg - userspace message inject/listen interface */
366 struct devkmsg_user {
367  u64 seq;
368  u32 idx;
369  enum log_flags prev;
370  struct mutex lock;
371  char buf[8192];
372 };
373 
374 static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
375  unsigned long count, loff_t pos)
376 {
377  char *buf, *line;
378  int i;
379  int level = default_message_loglevel;
380  int facility = 1; /* LOG_USER */
381  size_t len = iov_length(iv, count);
382  ssize_t ret = len;
383 
384  if (len > LOG_LINE_MAX)
385  return -EINVAL;
386  buf = kmalloc(len+1, GFP_KERNEL);
387  if (buf == NULL)
388  return -ENOMEM;
389 
390  line = buf;
391  for (i = 0; i < count; i++) {
392  if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) {
393  ret = -EFAULT;
394  goto out;
395  }
396  line += iv[i].iov_len;
397  }
398 
399  /*
400  * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
401  * the decimal value represents 32bit, the lower 3 bit are the log
402  * level, the rest are the log facility.
403  *
404  * If no prefix or no userspace facility is specified, we
405  * enforce LOG_USER, to be able to reliably distinguish
406  * kernel-generated messages from userspace-injected ones.
407  */
408  line = buf;
409  if (line[0] == '<') {
410  char *endp = NULL;
411 
412  i = simple_strtoul(line+1, &endp, 10);
413  if (endp && endp[0] == '>') {
414  level = i & 7;
415  if (i >> 3)
416  facility = i >> 3;
417  endp++;
418  len -= endp - line;
419  line = endp;
420  }
421  }
422  line[len] = '\0';
423 
424  printk_emit(facility, level, NULL, 0, "%s", line);
425 out:
426  kfree(buf);
427  return ret;
428 }
429 
430 static ssize_t devkmsg_read(struct file *file, char __user *buf,
431  size_t count, loff_t *ppos)
432 {
433  struct devkmsg_user *user = file->private_data;
434  struct log *msg;
435  u64 ts_usec;
436  size_t i;
437  char cont = '-';
438  size_t len;
439  ssize_t ret;
440 
441  if (!user)
442  return -EBADF;
443 
444  ret = mutex_lock_interruptible(&user->lock);
445  if (ret)
446  return ret;
447  raw_spin_lock_irq(&logbuf_lock);
448  while (user->seq == log_next_seq) {
449  if (file->f_flags & O_NONBLOCK) {
450  ret = -EAGAIN;
451  raw_spin_unlock_irq(&logbuf_lock);
452  goto out;
453  }
454 
455  raw_spin_unlock_irq(&logbuf_lock);
456  ret = wait_event_interruptible(log_wait,
457  user->seq != log_next_seq);
458  if (ret)
459  goto out;
460  raw_spin_lock_irq(&logbuf_lock);
461  }
462 
463  if (user->seq < log_first_seq) {
464  /* our last seen message is gone, return error and reset */
465  user->idx = log_first_idx;
466  user->seq = log_first_seq;
467  ret = -EPIPE;
468  raw_spin_unlock_irq(&logbuf_lock);
469  goto out;
470  }
471 
472  msg = log_from_idx(user->idx);
473  ts_usec = msg->ts_nsec;
474  do_div(ts_usec, 1000);
475 
476  /*
477  * If we couldn't merge continuation line fragments during the print,
478  * export the stored flags to allow an optional external merge of the
479  * records. Merging the records isn't always neccessarily correct, like
480  * when we hit a race during printing. In most cases though, it produces
481  * better readable output. 'c' in the record flags mark the first
482  * fragment of a line, '+' the following.
483  */
484  if (msg->flags & LOG_CONT && !(user->prev & LOG_CONT))
485  cont = 'c';
486  else if ((msg->flags & LOG_CONT) ||
487  ((user->prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
488  cont = '+';
489 
490  len = sprintf(user->buf, "%u,%llu,%llu,%c;",
491  (msg->facility << 3) | msg->level,
492  user->seq, ts_usec, cont);
493  user->prev = msg->flags;
494 
495  /* escape non-printable characters */
496  for (i = 0; i < msg->text_len; i++) {
497  unsigned char c = log_text(msg)[i];
498 
499  if (c < ' ' || c >= 127 || c == '\\')
500  len += sprintf(user->buf + len, "\\x%02x", c);
501  else
502  user->buf[len++] = c;
503  }
504  user->buf[len++] = '\n';
505 
506  if (msg->dict_len) {
507  bool line = true;
508 
509  for (i = 0; i < msg->dict_len; i++) {
510  unsigned char c = log_dict(msg)[i];
511 
512  if (line) {
513  user->buf[len++] = ' ';
514  line = false;
515  }
516 
517  if (c == '\0') {
518  user->buf[len++] = '\n';
519  line = true;
520  continue;
521  }
522 
523  if (c < ' ' || c >= 127 || c == '\\') {
524  len += sprintf(user->buf + len, "\\x%02x", c);
525  continue;
526  }
527 
528  user->buf[len++] = c;
529  }
530  user->buf[len++] = '\n';
531  }
532 
533  user->idx = log_next(user->idx);
534  user->seq++;
535  raw_spin_unlock_irq(&logbuf_lock);
536 
537  if (len > count) {
538  ret = -EINVAL;
539  goto out;
540  }
541 
542  if (copy_to_user(buf, user->buf, len)) {
543  ret = -EFAULT;
544  goto out;
545  }
546  ret = len;
547 out:
548  mutex_unlock(&user->lock);
549  return ret;
550 }
551 
552 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
553 {
554  struct devkmsg_user *user = file->private_data;
555  loff_t ret = 0;
556 
557  if (!user)
558  return -EBADF;
559  if (offset)
560  return -ESPIPE;
561 
562  raw_spin_lock_irq(&logbuf_lock);
563  switch (whence) {
564  case SEEK_SET:
565  /* the first record */
566  user->idx = log_first_idx;
567  user->seq = log_first_seq;
568  break;
569  case SEEK_DATA:
570  /*
571  * The first record after the last SYSLOG_ACTION_CLEAR,
572  * like issued by 'dmesg -c'. Reading /dev/kmsg itself
573  * changes no global state, and does not clear anything.
574  */
575  user->idx = clear_idx;
576  user->seq = clear_seq;
577  break;
578  case SEEK_END:
579  /* after the last record */
580  user->idx = log_next_idx;
581  user->seq = log_next_seq;
582  break;
583  default:
584  ret = -EINVAL;
585  }
586  raw_spin_unlock_irq(&logbuf_lock);
587  return ret;
588 }
589 
590 static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
591 {
592  struct devkmsg_user *user = file->private_data;
593  int ret = 0;
594 
595  if (!user)
596  return POLLERR|POLLNVAL;
597 
598  poll_wait(file, &log_wait, wait);
599 
600  raw_spin_lock_irq(&logbuf_lock);
601  if (user->seq < log_next_seq) {
602  /* return error when data has vanished underneath us */
603  if (user->seq < log_first_seq)
605  ret = POLLIN|POLLRDNORM;
606  }
607  raw_spin_unlock_irq(&logbuf_lock);
608 
609  return ret;
610 }
611 
612 static int devkmsg_open(struct inode *inode, struct file *file)
613 {
614  struct devkmsg_user *user;
615  int err;
616 
617  /* write-only does not need any file context */
618  if ((file->f_flags & O_ACCMODE) == O_WRONLY)
619  return 0;
620 
622  if (err)
623  return err;
624 
625  user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
626  if (!user)
627  return -ENOMEM;
628 
629  mutex_init(&user->lock);
630 
631  raw_spin_lock_irq(&logbuf_lock);
632  user->idx = log_first_idx;
633  user->seq = log_first_seq;
634  raw_spin_unlock_irq(&logbuf_lock);
635 
636  file->private_data = user;
637  return 0;
638 }
639 
640 static int devkmsg_release(struct inode *inode, struct file *file)
641 {
642  struct devkmsg_user *user = file->private_data;
643 
644  if (!user)
645  return 0;
646 
647  mutex_destroy(&user->lock);
648  kfree(user);
649  return 0;
650 }
651 
652 const struct file_operations kmsg_fops = {
653  .open = devkmsg_open,
654  .read = devkmsg_read,
655  .aio_write = devkmsg_writev,
656  .llseek = devkmsg_llseek,
657  .poll = devkmsg_poll,
658  .release = devkmsg_release,
659 };
660 
661 #ifdef CONFIG_KEXEC
662 /*
663  * This appends the listed symbols to /proc/vmcoreinfo
664  *
665  * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
666  * obtain access to symbols that are otherwise very difficult to locate. These
667  * symbols are specifically used so that utilities can access and extract the
668  * dmesg log from a vmcore file after a crash.
669  */
670 void log_buf_kexec_setup(void)
671 {
672  VMCOREINFO_SYMBOL(log_buf);
673  VMCOREINFO_SYMBOL(log_buf_len);
674  VMCOREINFO_SYMBOL(log_first_idx);
675  VMCOREINFO_SYMBOL(log_next_idx);
676  /*
677  * Export struct log size and field offsets. User space tools can
678  * parse it and detect any changes to structure down the line.
679  */
680  VMCOREINFO_STRUCT_SIZE(log);
681  VMCOREINFO_OFFSET(log, ts_nsec);
682  VMCOREINFO_OFFSET(log, len);
683  VMCOREINFO_OFFSET(log, text_len);
684  VMCOREINFO_OFFSET(log, dict_len);
685 }
686 #endif
687 
688 /* requested log_buf_len from kernel cmdline */
689 static unsigned long __initdata new_log_buf_len;
690 
691 /* save requested log_buf_len since it's too early to process it */
692 static int __init log_buf_len_setup(char *str)
693 {
694  unsigned size = memparse(str, &str);
695 
696  if (size)
697  size = roundup_pow_of_two(size);
698  if (size > log_buf_len)
699  new_log_buf_len = size;
700 
701  return 0;
702 }
703 early_param("log_buf_len", log_buf_len_setup);
704 
705 void __init setup_log_buf(int early)
706 {
707  unsigned long flags;
708  char *new_log_buf;
709  int free;
710 
711  if (!new_log_buf_len)
712  return;
713 
714  if (early) {
715  unsigned long mem;
716 
717  mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
718  if (!mem)
719  return;
720  new_log_buf = __va(mem);
721  } else {
722  new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
723  }
724 
725  if (unlikely(!new_log_buf)) {
726  pr_err("log_buf_len: %ld bytes not available\n",
727  new_log_buf_len);
728  return;
729  }
730 
731  raw_spin_lock_irqsave(&logbuf_lock, flags);
732  log_buf_len = new_log_buf_len;
733  log_buf = new_log_buf;
734  new_log_buf_len = 0;
735  free = __LOG_BUF_LEN - log_next_idx;
736  memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
737  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
738 
739  pr_info("log_buf_len: %d\n", log_buf_len);
740  pr_info("early log buf free: %d(%d%%)\n",
741  free, (free * 100) / __LOG_BUF_LEN);
742 }
743 
744 #ifdef CONFIG_BOOT_PRINTK_DELAY
745 
746 static int boot_delay; /* msecs delay after each printk during bootup */
747 static unsigned long long loops_per_msec; /* based on boot_delay */
748 
749 static int __init boot_delay_setup(char *str)
750 {
751  unsigned long lpj;
752 
753  lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
754  loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
755 
756  get_option(&str, &boot_delay);
757  if (boot_delay > 10 * 1000)
758  boot_delay = 0;
759 
760  pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
761  "HZ: %d, loops_per_msec: %llu\n",
762  boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
763  return 1;
764 }
765 __setup("boot_delay=", boot_delay_setup);
766 
767 static void boot_delay_msec(void)
768 {
769  unsigned long long k;
770  unsigned long timeout;
771 
772  if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
773  return;
774 
775  k = (unsigned long long)loops_per_msec * boot_delay;
776 
777  timeout = jiffies + msecs_to_jiffies(boot_delay);
778  while (k) {
779  k--;
780  cpu_relax();
781  /*
782  * use (volatile) jiffies to prevent
783  * compiler reduction; loop termination via jiffies
784  * is secondary and may or may not happen.
785  */
786  if (time_after(jiffies, timeout))
787  break;
789  }
790 }
791 #else
792 static inline void boot_delay_msec(void)
793 {
794 }
795 #endif
796 
797 #ifdef CONFIG_SECURITY_DMESG_RESTRICT
798 int dmesg_restrict = 1;
799 #else
800 int dmesg_restrict;
801 #endif
802 
803 static int syslog_action_restricted(int type)
804 {
805  if (dmesg_restrict)
806  return 1;
807  /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
808  return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
809 }
810 
811 static int check_syslog_permissions(int type, bool from_file)
812 {
813  /*
814  * If this is from /proc/kmsg and we've already opened it, then we've
815  * already done the capabilities checks at open time.
816  */
817  if (from_file && type != SYSLOG_ACTION_OPEN)
818  return 0;
819 
820  if (syslog_action_restricted(type)) {
821  if (capable(CAP_SYSLOG))
822  return 0;
823  /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
824  if (capable(CAP_SYS_ADMIN)) {
825  printk_once(KERN_WARNING "%s (%d): "
826  "Attempt to access syslog with CAP_SYS_ADMIN "
827  "but no CAP_SYSLOG (deprecated).\n",
828  current->comm, task_pid_nr(current));
829  return 0;
830  }
831  return -EPERM;
832  }
833  return 0;
834 }
835 
836 #if defined(CONFIG_PRINTK_TIME)
837 static bool printk_time = 1;
838 #else
839 static bool printk_time;
840 #endif
841 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
842 
843 static size_t print_time(u64 ts, char *buf)
844 {
845  unsigned long rem_nsec;
846 
847  if (!printk_time)
848  return 0;
849 
850  if (!buf)
851  return 15;
852 
853  rem_nsec = do_div(ts, 1000000000);
854  return sprintf(buf, "[%5lu.%06lu] ",
855  (unsigned long)ts, rem_nsec / 1000);
856 }
857 
858 static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
859 {
860  size_t len = 0;
861  unsigned int prefix = (msg->facility << 3) | msg->level;
862 
863  if (syslog) {
864  if (buf) {
865  len += sprintf(buf, "<%u>", prefix);
866  } else {
867  len += 3;
868  if (prefix > 999)
869  len += 3;
870  else if (prefix > 99)
871  len += 2;
872  else if (prefix > 9)
873  len++;
874  }
875  }
876 
877  len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
878  return len;
879 }
880 
881 static size_t msg_print_text(const struct log *msg, enum log_flags prev,
882  bool syslog, char *buf, size_t size)
883 {
884  const char *text = log_text(msg);
885  size_t text_size = msg->text_len;
886  bool prefix = true;
887  bool newline = true;
888  size_t len = 0;
889 
890  if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
891  prefix = false;
892 
893  if (msg->flags & LOG_CONT) {
894  if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
895  prefix = false;
896 
897  if (!(msg->flags & LOG_NEWLINE))
898  newline = false;
899  }
900 
901  do {
902  const char *next = memchr(text, '\n', text_size);
903  size_t text_len;
904 
905  if (next) {
906  text_len = next - text;
907  next++;
908  text_size -= next - text;
909  } else {
910  text_len = text_size;
911  }
912 
913  if (buf) {
914  if (print_prefix(msg, syslog, NULL) +
915  text_len + 1 >= size - len)
916  break;
917 
918  if (prefix)
919  len += print_prefix(msg, syslog, buf + len);
920  memcpy(buf + len, text, text_len);
921  len += text_len;
922  if (next || newline)
923  buf[len++] = '\n';
924  } else {
925  /* SYSLOG_ACTION_* buffer size only calculation */
926  if (prefix)
927  len += print_prefix(msg, syslog, NULL);
928  len += text_len;
929  if (next || newline)
930  len++;
931  }
932 
933  prefix = true;
934  text = next;
935  } while (text);
936 
937  return len;
938 }
939 
940 static int syslog_print(char __user *buf, int size)
941 {
942  char *text;
943  struct log *msg;
944  int len = 0;
945 
947  if (!text)
948  return -ENOMEM;
949 
950  while (size > 0) {
951  size_t n;
952  size_t skip;
953 
954  raw_spin_lock_irq(&logbuf_lock);
955  if (syslog_seq < log_first_seq) {
956  /* messages are gone, move to first one */
957  syslog_seq = log_first_seq;
958  syslog_idx = log_first_idx;
959  syslog_prev = 0;
960  syslog_partial = 0;
961  }
962  if (syslog_seq == log_next_seq) {
963  raw_spin_unlock_irq(&logbuf_lock);
964  break;
965  }
966 
967  skip = syslog_partial;
968  msg = log_from_idx(syslog_idx);
969  n = msg_print_text(msg, syslog_prev, true, text,
971  if (n - syslog_partial <= size) {
972  /* message fits into buffer, move forward */
973  syslog_idx = log_next(syslog_idx);
974  syslog_seq++;
975  syslog_prev = msg->flags;
976  n -= syslog_partial;
977  syslog_partial = 0;
978  } else if (!len){
979  /* partial read(), remember position */
980  n = size;
981  syslog_partial += n;
982  } else
983  n = 0;
984  raw_spin_unlock_irq(&logbuf_lock);
985 
986  if (!n)
987  break;
988 
989  if (copy_to_user(buf, text + skip, n)) {
990  if (!len)
991  len = -EFAULT;
992  break;
993  }
994 
995  len += n;
996  size -= n;
997  buf += n;
998  }
999 
1000  kfree(text);
1001  return len;
1002 }
1003 
1004 static int syslog_print_all(char __user *buf, int size, bool clear)
1005 {
1006  char *text;
1007  int len = 0;
1008 
1010  if (!text)
1011  return -ENOMEM;
1012 
1013  raw_spin_lock_irq(&logbuf_lock);
1014  if (buf) {
1015  u64 next_seq;
1016  u64 seq;
1017  u32 idx;
1018  enum log_flags prev;
1019 
1020  if (clear_seq < log_first_seq) {
1021  /* messages are gone, move to first available one */
1022  clear_seq = log_first_seq;
1023  clear_idx = log_first_idx;
1024  }
1025 
1026  /*
1027  * Find first record that fits, including all following records,
1028  * into the user-provided buffer for this dump.
1029  */
1030  seq = clear_seq;
1031  idx = clear_idx;
1032  prev = 0;
1033  while (seq < log_next_seq) {
1034  struct log *msg = log_from_idx(idx);
1035 
1036  len += msg_print_text(msg, prev, true, NULL, 0);
1037  prev = msg->flags;
1038  idx = log_next(idx);
1039  seq++;
1040  }
1041 
1042  /* move first record forward until length fits into the buffer */
1043  seq = clear_seq;
1044  idx = clear_idx;
1045  prev = 0;
1046  while (len > size && seq < log_next_seq) {
1047  struct log *msg = log_from_idx(idx);
1048 
1049  len -= msg_print_text(msg, prev, true, NULL, 0);
1050  prev = msg->flags;
1051  idx = log_next(idx);
1052  seq++;
1053  }
1054 
1055  /* last message fitting into this dump */
1056  next_seq = log_next_seq;
1057 
1058  len = 0;
1059  prev = 0;
1060  while (len >= 0 && seq < next_seq) {
1061  struct log *msg = log_from_idx(idx);
1062  int textlen;
1063 
1064  textlen = msg_print_text(msg, prev, true, text,
1066  if (textlen < 0) {
1067  len = textlen;
1068  break;
1069  }
1070  idx = log_next(idx);
1071  seq++;
1072  prev = msg->flags;
1073 
1074  raw_spin_unlock_irq(&logbuf_lock);
1075  if (copy_to_user(buf + len, text, textlen))
1076  len = -EFAULT;
1077  else
1078  len += textlen;
1079  raw_spin_lock_irq(&logbuf_lock);
1080 
1081  if (seq < log_first_seq) {
1082  /* messages are gone, move to next one */
1083  seq = log_first_seq;
1084  idx = log_first_idx;
1085  prev = 0;
1086  }
1087  }
1088  }
1089 
1090  if (clear) {
1091  clear_seq = log_next_seq;
1092  clear_idx = log_next_idx;
1093  }
1094  raw_spin_unlock_irq(&logbuf_lock);
1095 
1096  kfree(text);
1097  return len;
1098 }
1099 
1100 int do_syslog(int type, char __user *buf, int len, bool from_file)
1101 {
1102  bool clear = false;
1103  static int saved_console_loglevel = -1;
1104  int error;
1105 
1106  error = check_syslog_permissions(type, from_file);
1107  if (error)
1108  goto out;
1109 
1110  error = security_syslog(type);
1111  if (error)
1112  return error;
1113 
1114  switch (type) {
1115  case SYSLOG_ACTION_CLOSE: /* Close log */
1116  break;
1117  case SYSLOG_ACTION_OPEN: /* Open log */
1118  break;
1119  case SYSLOG_ACTION_READ: /* Read from log */
1120  error = -EINVAL;
1121  if (!buf || len < 0)
1122  goto out;
1123  error = 0;
1124  if (!len)
1125  goto out;
1126  if (!access_ok(VERIFY_WRITE, buf, len)) {
1127  error = -EFAULT;
1128  goto out;
1129  }
1130  error = wait_event_interruptible(log_wait,
1131  syslog_seq != log_next_seq);
1132  if (error)
1133  goto out;
1134  error = syslog_print(buf, len);
1135  break;
1136  /* Read/clear last kernel messages */
1138  clear = true;
1139  /* FALL THRU */
1140  /* Read last kernel messages */
1142  error = -EINVAL;
1143  if (!buf || len < 0)
1144  goto out;
1145  error = 0;
1146  if (!len)
1147  goto out;
1148  if (!access_ok(VERIFY_WRITE, buf, len)) {
1149  error = -EFAULT;
1150  goto out;
1151  }
1152  error = syslog_print_all(buf, len, clear);
1153  break;
1154  /* Clear ring buffer */
1155  case SYSLOG_ACTION_CLEAR:
1156  syslog_print_all(NULL, 0, true);
1157  break;
1158  /* Disable logging to console */
1160  if (saved_console_loglevel == -1)
1161  saved_console_loglevel = console_loglevel;
1163  break;
1164  /* Enable logging to console */
1166  if (saved_console_loglevel != -1) {
1167  console_loglevel = saved_console_loglevel;
1168  saved_console_loglevel = -1;
1169  }
1170  break;
1171  /* Set level of messages printed to console */
1173  error = -EINVAL;
1174  if (len < 1 || len > 8)
1175  goto out;
1176  if (len < minimum_console_loglevel)
1179  /* Implicitly re-enable logging to console */
1180  saved_console_loglevel = -1;
1181  error = 0;
1182  break;
1183  /* Number of chars in the log buffer */
1185  raw_spin_lock_irq(&logbuf_lock);
1186  if (syslog_seq < log_first_seq) {
1187  /* messages are gone, move to first one */
1188  syslog_seq = log_first_seq;
1189  syslog_idx = log_first_idx;
1190  syslog_prev = 0;
1191  syslog_partial = 0;
1192  }
1193  if (from_file) {
1194  /*
1195  * Short-cut for poll(/"proc/kmsg") which simply checks
1196  * for pending data, not the size; return the count of
1197  * records, not the length.
1198  */
1199  error = log_next_idx - syslog_idx;
1200  } else {
1201  u64 seq = syslog_seq;
1202  u32 idx = syslog_idx;
1203  enum log_flags prev = syslog_prev;
1204 
1205  error = 0;
1206  while (seq < log_next_seq) {
1207  struct log *msg = log_from_idx(idx);
1208 
1209  error += msg_print_text(msg, prev, true, NULL, 0);
1210  idx = log_next(idx);
1211  seq++;
1212  prev = msg->flags;
1213  }
1214  error -= syslog_partial;
1215  }
1216  raw_spin_unlock_irq(&logbuf_lock);
1217  break;
1218  /* Size of the log buffer */
1220  error = log_buf_len;
1221  break;
1222  default:
1223  error = -EINVAL;
1224  break;
1225  }
1226 out:
1227  return error;
1228 }
1229 
1230 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1231 {
1232  return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
1233 }
1234 
1235 static bool __read_mostly ignore_loglevel;
1236 
1237 static int __init ignore_loglevel_setup(char *str)
1238 {
1239  ignore_loglevel = 1;
1240  printk(KERN_INFO "debug: ignoring loglevel setting.\n");
1241 
1242  return 0;
1243 }
1244 
1245 early_param("ignore_loglevel", ignore_loglevel_setup);
1246 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1247 MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
1248  "print all kernel messages to the console.");
1249 
1250 /*
1251  * Call the console drivers, asking them to write out
1252  * log_buf[start] to log_buf[end - 1].
1253  * The console_lock must be held.
1254  */
1255 static void call_console_drivers(int level, const char *text, size_t len)
1256 {
1257  struct console *con;
1258 
1259  trace_console(text, 0, len, len);
1260 
1261  if (level >= console_loglevel && !ignore_loglevel)
1262  return;
1263  if (!console_drivers)
1264  return;
1265 
1266  for_each_console(con) {
1267  if (exclusive_console && con != exclusive_console)
1268  continue;
1269  if (!(con->flags & CON_ENABLED))
1270  continue;
1271  if (!con->write)
1272  continue;
1273  if (!cpu_online(smp_processor_id()) &&
1274  !(con->flags & CON_ANYTIME))
1275  continue;
1276  con->write(con, text, len);
1277  }
1278 }
1279 
1280 /*
1281  * Zap console related locks when oopsing. Only zap at most once
1282  * every 10 seconds, to leave time for slow consoles to print a
1283  * full oops.
1284  */
1285 static void zap_locks(void)
1286 {
1287  static unsigned long oops_timestamp;
1288 
1289  if (time_after_eq(jiffies, oops_timestamp) &&
1290  !time_after(jiffies, oops_timestamp + 30 * HZ))
1291  return;
1292 
1293  oops_timestamp = jiffies;
1294 
1295  debug_locks_off();
1296  /* If a crash is occurring, make sure we can't deadlock */
1297  raw_spin_lock_init(&logbuf_lock);
1298  /* And make sure that we print immediately */
1299  sema_init(&console_sem, 1);
1300 }
1301 
1302 /* Check if we have any console registered that can be called early in boot. */
1303 static int have_callable_console(void)
1304 {
1305  struct console *con;
1306 
1307  for_each_console(con)
1308  if (con->flags & CON_ANYTIME)
1309  return 1;
1310 
1311  return 0;
1312 }
1313 
1314 /*
1315  * Can we actually use the console at this time on this cpu?
1316  *
1317  * Console drivers may assume that per-cpu resources have
1318  * been allocated. So unless they're explicitly marked as
1319  * being able to cope (CON_ANYTIME) don't call them until
1320  * this CPU is officially up.
1321  */
1322 static inline int can_use_console(unsigned int cpu)
1323 {
1324  return cpu_online(cpu) || have_callable_console();
1325 }
1326 
1327 /*
1328  * Try to get console ownership to actually show the kernel
1329  * messages from a 'printk'. Return true (and with the
1330  * console_lock held, and 'console_locked' set) if it
1331  * is successful, false otherwise.
1332  *
1333  * This gets called with the 'logbuf_lock' spinlock held and
1334  * interrupts disabled. It should return with 'lockbuf_lock'
1335  * released but interrupts still disabled.
1336  */
1337 static int console_trylock_for_printk(unsigned int cpu)
1338  __releases(&logbuf_lock)
1339 {
1340  int retval = 0, wake = 0;
1341 
1342  if (console_trylock()) {
1343  retval = 1;
1344 
1345  /*
1346  * If we can't use the console, we need to release
1347  * the console semaphore by hand to avoid flushing
1348  * the buffer. We need to hold the console semaphore
1349  * in order to do this test safely.
1350  */
1351  if (!can_use_console(cpu)) {
1352  console_locked = 0;
1353  wake = 1;
1354  retval = 0;
1355  }
1356  }
1357  logbuf_cpu = UINT_MAX;
1358  if (wake)
1359  up(&console_sem);
1360  raw_spin_unlock(&logbuf_lock);
1361  return retval;
1362 }
1363 
1364 int printk_delay_msec __read_mostly;
1365 
1366 static inline void printk_delay(void)
1367 {
1368  if (unlikely(printk_delay_msec)) {
1369  int m = printk_delay_msec;
1370 
1371  while (m--) {
1372  mdelay(1);
1374  }
1375  }
1376 }
1377 
1378 /*
1379  * Continuation lines are buffered, and not committed to the record buffer
1380  * until the line is complete, or a race forces it. The line fragments
1381  * though, are printed immediately to the consoles to ensure everything has
1382  * reached the console in case of a kernel crash.
1383  */
1384 static struct cont {
1385  char buf[LOG_LINE_MAX];
1386  size_t len; /* length == 0 means unused buffer */
1387  size_t cons; /* bytes written to console */
1388  struct task_struct *owner; /* task of first print*/
1389  u64 ts_nsec; /* time of first print */
1390  u8 level; /* log level of first message */
1391  u8 facility; /* log level of first message */
1392  enum log_flags flags; /* prefix, newline flags */
1393  bool flushed:1; /* buffer sealed and committed */
1394 } cont;
1395 
1396 static void cont_flush(enum log_flags flags)
1397 {
1398  if (cont.flushed)
1399  return;
1400  if (cont.len == 0)
1401  return;
1402 
1403  if (cont.cons) {
1404  /*
1405  * If a fragment of this line was directly flushed to the
1406  * console; wait for the console to pick up the rest of the
1407  * line. LOG_NOCONS suppresses a duplicated output.
1408  */
1409  log_store(cont.facility, cont.level, flags | LOG_NOCONS,
1410  cont.ts_nsec, NULL, 0, cont.buf, cont.len);
1411  cont.flags = flags;
1412  cont.flushed = true;
1413  } else {
1414  /*
1415  * If no fragment of this line ever reached the console,
1416  * just submit it to the store and free the buffer.
1417  */
1418  log_store(cont.facility, cont.level, flags, 0,
1419  NULL, 0, cont.buf, cont.len);
1420  cont.len = 0;
1421  }
1422 }
1423 
1424 static bool cont_add(int facility, int level, const char *text, size_t len)
1425 {
1426  if (cont.len && cont.flushed)
1427  return false;
1428 
1429  if (cont.len + len > sizeof(cont.buf)) {
1430  /* the line gets too long, split it up in separate records */
1431  cont_flush(LOG_CONT);
1432  return false;
1433  }
1434 
1435  if (!cont.len) {
1436  cont.facility = facility;
1437  cont.level = level;
1438  cont.owner = current;
1439  cont.ts_nsec = local_clock();
1440  cont.flags = 0;
1441  cont.cons = 0;
1442  cont.flushed = false;
1443  }
1444 
1445  memcpy(cont.buf + cont.len, text, len);
1446  cont.len += len;
1447 
1448  if (cont.len > (sizeof(cont.buf) * 80) / 100)
1449  cont_flush(LOG_CONT);
1450 
1451  return true;
1452 }
1453 
1454 static size_t cont_print_text(char *text, size_t size)
1455 {
1456  size_t textlen = 0;
1457  size_t len;
1458 
1459  if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
1460  textlen += print_time(cont.ts_nsec, text);
1461  size -= textlen;
1462  }
1463 
1464  len = cont.len - cont.cons;
1465  if (len > 0) {
1466  if (len+1 > size)
1467  len = size-1;
1468  memcpy(text + textlen, cont.buf + cont.cons, len);
1469  textlen += len;
1470  cont.cons = cont.len;
1471  }
1472 
1473  if (cont.flushed) {
1474  if (cont.flags & LOG_NEWLINE)
1475  text[textlen++] = '\n';
1476  /* got everything, release buffer */
1477  cont.len = 0;
1478  }
1479  return textlen;
1480 }
1481 
1482 asmlinkage int vprintk_emit(int facility, int level,
1483  const char *dict, size_t dictlen,
1484  const char *fmt, va_list args)
1485 {
1486  static int recursion_bug;
1487  static char textbuf[LOG_LINE_MAX];
1488  char *text = textbuf;
1489  size_t text_len;
1490  enum log_flags lflags = 0;
1491  unsigned long flags;
1492  int this_cpu;
1493  int printed_len = 0;
1494 
1495  boot_delay_msec();
1496  printk_delay();
1497 
1498  /* This stops the holder of console_sem just where we want him */
1499  local_irq_save(flags);
1500  this_cpu = smp_processor_id();
1501 
1502  /*
1503  * Ouch, printk recursed into itself!
1504  */
1505  if (unlikely(logbuf_cpu == this_cpu)) {
1506  /*
1507  * If a crash is occurring during printk() on this CPU,
1508  * then try to get the crash message out but make sure
1509  * we can't deadlock. Otherwise just return to avoid the
1510  * recursion and return - but flag the recursion so that
1511  * it can be printed at the next appropriate moment:
1512  */
1513  if (!oops_in_progress && !lockdep_recursing(current)) {
1514  recursion_bug = 1;
1515  goto out_restore_irqs;
1516  }
1517  zap_locks();
1518  }
1519 
1520  lockdep_off();
1521  raw_spin_lock(&logbuf_lock);
1522  logbuf_cpu = this_cpu;
1523 
1524  if (recursion_bug) {
1525  static const char recursion_msg[] =
1526  "BUG: recent printk recursion!";
1527 
1528  recursion_bug = 0;
1529  printed_len += strlen(recursion_msg);
1530  /* emit KERN_CRIT message */
1531  log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
1532  NULL, 0, recursion_msg, printed_len);
1533  }
1534 
1535  /*
1536  * The printf needs to come first; we need the syslog
1537  * prefix which might be passed-in as a parameter.
1538  */
1539  text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
1540 
1541  /* mark and strip a trailing newline */
1542  if (text_len && text[text_len-1] == '\n') {
1543  text_len--;
1544  lflags |= LOG_NEWLINE;
1545  }
1546 
1547  /* strip kernel syslog prefix and extract log level or control flags */
1548  if (facility == 0) {
1549  int kern_level = printk_get_level(text);
1550 
1551  if (kern_level) {
1552  const char *end_of_header = printk_skip_level(text);
1553  switch (kern_level) {
1554  case '0' ... '7':
1555  if (level == -1)
1556  level = kern_level - '0';
1557  case 'd': /* KERN_DEFAULT */
1558  lflags |= LOG_PREFIX;
1559  case 'c': /* KERN_CONT */
1560  break;
1561  }
1562  text_len -= end_of_header - text;
1563  text = (char *)end_of_header;
1564  }
1565  }
1566 
1567  if (level == -1)
1568  level = default_message_loglevel;
1569 
1570  if (dict)
1571  lflags |= LOG_PREFIX|LOG_NEWLINE;
1572 
1573  if (!(lflags & LOG_NEWLINE)) {
1574  /*
1575  * Flush the conflicting buffer. An earlier newline was missing,
1576  * or another task also prints continuation lines.
1577  */
1578  if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
1579  cont_flush(LOG_NEWLINE);
1580 
1581  /* buffer line if possible, otherwise store it right away */
1582  if (!cont_add(facility, level, text, text_len))
1583  log_store(facility, level, lflags | LOG_CONT, 0,
1584  dict, dictlen, text, text_len);
1585  } else {
1586  bool stored = false;
1587 
1588  /*
1589  * If an earlier newline was missing and it was the same task,
1590  * either merge it with the current buffer and flush, or if
1591  * there was a race with interrupts (prefix == true) then just
1592  * flush it out and store this line separately.
1593  */
1594  if (cont.len && cont.owner == current) {
1595  if (!(lflags & LOG_PREFIX))
1596  stored = cont_add(facility, level, text, text_len);
1597  cont_flush(LOG_NEWLINE);
1598  }
1599 
1600  if (!stored)
1601  log_store(facility, level, lflags, 0,
1602  dict, dictlen, text, text_len);
1603  }
1604  printed_len += text_len;
1605 
1606  /*
1607  * Try to acquire and then immediately release the console semaphore.
1608  * The release will print out buffers and wake up /dev/kmsg and syslog()
1609  * users.
1610  *
1611  * The console_trylock_for_printk() function will release 'logbuf_lock'
1612  * regardless of whether it actually gets the console semaphore or not.
1613  */
1614  if (console_trylock_for_printk(this_cpu))
1615  console_unlock();
1616 
1617  lockdep_on();
1618 out_restore_irqs:
1619  local_irq_restore(flags);
1620 
1621  return printed_len;
1622 }
1623 EXPORT_SYMBOL(vprintk_emit);
1624 
1625 asmlinkage int vprintk(const char *fmt, va_list args)
1626 {
1627  return vprintk_emit(0, -1, NULL, 0, fmt, args);
1628 }
1629 EXPORT_SYMBOL(vprintk);
1630 
1631 asmlinkage int printk_emit(int facility, int level,
1632  const char *dict, size_t dictlen,
1633  const char *fmt, ...)
1634 {
1635  va_list args;
1636  int r;
1637 
1638  va_start(args, fmt);
1639  r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
1640  va_end(args);
1641 
1642  return r;
1643 }
1644 EXPORT_SYMBOL(printk_emit);
1645 
1667 asmlinkage int printk(const char *fmt, ...)
1668 {
1669  va_list args;
1670  int r;
1671 
1672 #ifdef CONFIG_KGDB_KDB
1673  if (unlikely(kdb_trap_printk)) {
1674  va_start(args, fmt);
1675  r = vkdb_printf(fmt, args);
1676  va_end(args);
1677  return r;
1678  }
1679 #endif
1680  va_start(args, fmt);
1681  r = vprintk_emit(0, -1, NULL, 0, fmt, args);
1682  va_end(args);
1683 
1684  return r;
1685 }
1687 
1688 #else /* CONFIG_PRINTK */
1689 
1690 #define LOG_LINE_MAX 0
1691 #define PREFIX_MAX 0
1692 #define LOG_LINE_MAX 0
1693 static u64 syslog_seq;
1694 static u32 syslog_idx;
1695 static u64 console_seq;
1696 static u32 console_idx;
1697 static enum log_flags syslog_prev;
1698 static u64 log_first_seq;
1699 static u32 log_first_idx;
1700 static u64 log_next_seq;
1701 static enum log_flags console_prev;
1702 static struct cont {
1703  size_t len;
1704  size_t cons;
1705  u8 level;
1706  bool flushed:1;
1707 } cont;
1708 static struct log *log_from_idx(u32 idx) { return NULL; }
1709 static u32 log_next(u32 idx) { return 0; }
1710 static void call_console_drivers(int level, const char *text, size_t len) {}
1711 static size_t msg_print_text(const struct log *msg, enum log_flags prev,
1712  bool syslog, char *buf, size_t size) { return 0; }
1713 static size_t cont_print_text(char *text, size_t size) { return 0; }
1714 
1715 #endif /* CONFIG_PRINTK */
1716 
1717 static int __add_preferred_console(char *name, int idx, char *options,
1718  char *brl_options)
1719 {
1720  struct console_cmdline *c;
1721  int i;
1722 
1723  /*
1724  * See if this tty is not yet registered, and
1725  * if we have a slot free.
1726  */
1727  for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
1728  if (strcmp(console_cmdline[i].name, name) == 0 &&
1729  console_cmdline[i].index == idx) {
1730  if (!brl_options)
1731  selected_console = i;
1732  return 0;
1733  }
1734  if (i == MAX_CMDLINECONSOLES)
1735  return -E2BIG;
1736  if (!brl_options)
1737  selected_console = i;
1738  c = &console_cmdline[i];
1739  strlcpy(c->name, name, sizeof(c->name));
1740  c->options = options;
1741 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
1742  c->brl_options = brl_options;
1743 #endif
1744  c->index = idx;
1745  return 0;
1746 }
1747 /*
1748  * Set up a list of consoles. Called from init/main.c
1749  */
1750 static int __init console_setup(char *str)
1751 {
1752  char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
1753  char *s, *options, *brl_options = NULL;
1754  int idx;
1755 
1756 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
1757  if (!memcmp(str, "brl,", 4)) {
1758  brl_options = "";
1759  str += 4;
1760  } else if (!memcmp(str, "brl=", 4)) {
1761  brl_options = str + 4;
1762  str = strchr(brl_options, ',');
1763  if (!str) {
1764  printk(KERN_ERR "need port name after brl=\n");
1765  return 1;
1766  }
1767  *(str++) = 0;
1768  }
1769 #endif
1770 
1771  /*
1772  * Decode str into name, index, options.
1773  */
1774  if (str[0] >= '0' && str[0] <= '9') {
1775  strcpy(buf, "ttyS");
1776  strncpy(buf + 4, str, sizeof(buf) - 5);
1777  } else {
1778  strncpy(buf, str, sizeof(buf) - 1);
1779  }
1780  buf[sizeof(buf) - 1] = 0;
1781  if ((options = strchr(str, ',')) != NULL)
1782  *(options++) = 0;
1783 #ifdef __sparc__
1784  if (!strcmp(str, "ttya"))
1785  strcpy(buf, "ttyS0");
1786  if (!strcmp(str, "ttyb"))
1787  strcpy(buf, "ttyS1");
1788 #endif
1789  for (s = buf; *s; s++)
1790  if ((*s >= '0' && *s <= '9') || *s == ',')
1791  break;
1792  idx = simple_strtoul(s, NULL, 10);
1793  *s = 0;
1794 
1795  __add_preferred_console(buf, idx, options, brl_options);
1796  console_set_on_cmdline = 1;
1797  return 1;
1798 }
1799 __setup("console=", console_setup);
1800 
1814 int add_preferred_console(char *name, int idx, char *options)
1815 {
1816  return __add_preferred_console(name, idx, options, NULL);
1817 }
1818 
1819 int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
1820 {
1821  struct console_cmdline *c;
1822  int i;
1823 
1824  for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
1825  if (strcmp(console_cmdline[i].name, name) == 0 &&
1826  console_cmdline[i].index == idx) {
1827  c = &console_cmdline[i];
1828  strlcpy(c->name, name_new, sizeof(c->name));
1829  c->name[sizeof(c->name) - 1] = 0;
1830  c->options = options;
1831  c->index = idx_new;
1832  return i;
1833  }
1834  /* not found */
1835  return -1;
1836 }
1837 
1839 EXPORT_SYMBOL(console_suspend_enabled);
1840 
1841 static int __init console_suspend_disable(char *str)
1842 {
1843  console_suspend_enabled = 0;
1844  return 1;
1845 }
1846 __setup("no_console_suspend", console_suspend_disable);
1847 module_param_named(console_suspend, console_suspend_enabled,
1848  bool, S_IRUGO | S_IWUSR);
1849 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
1850  " and hibernate operations");
1851 
1858 {
1859  if (!console_suspend_enabled)
1860  return;
1861  printk("Suspending console(s) (use no_console_suspend to debug)\n");
1862  console_lock();
1863  console_suspended = 1;
1864  up(&console_sem);
1865 }
1866 
1867 void resume_console(void)
1868 {
1869  if (!console_suspend_enabled)
1870  return;
1871  down(&console_sem);
1872  console_suspended = 0;
1873  console_unlock();
1874 }
1875 
1887 static int __cpuinit console_cpu_notify(struct notifier_block *self,
1888  unsigned long action, void *hcpu)
1889 {
1890  switch (action) {
1891  case CPU_ONLINE:
1892  case CPU_DEAD:
1893  case CPU_DOWN_FAILED:
1894  case CPU_UP_CANCELED:
1895  console_lock();
1896  console_unlock();
1897  }
1898  return NOTIFY_OK;
1899 }
1900 
1909 void console_lock(void)
1910 {
1911  BUG_ON(in_interrupt());
1912  down(&console_sem);
1913  if (console_suspended)
1914  return;
1915  console_locked = 1;
1916  console_may_schedule = 1;
1917 }
1919 
1929 {
1930  if (down_trylock(&console_sem))
1931  return 0;
1932  if (console_suspended) {
1933  up(&console_sem);
1934  return 0;
1935  }
1936  console_locked = 1;
1937  console_may_schedule = 0;
1938  return 1;
1939 }
1941 
1943 {
1944  return console_locked;
1945 }
1946 
1947 /*
1948  * Delayed printk version, for scheduler-internal messages:
1949  */
1950 #define PRINTK_BUF_SIZE 512
1951 
1952 #define PRINTK_PENDING_WAKEUP 0x01
1953 #define PRINTK_PENDING_SCHED 0x02
1954 
1955 static DEFINE_PER_CPU(int, printk_pending);
1956 static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
1957 
1958 void printk_tick(void)
1959 {
1960  if (__this_cpu_read(printk_pending)) {
1961  int pending = __this_cpu_xchg(printk_pending, 0);
1962  if (pending & PRINTK_PENDING_SCHED) {
1963  char *buf = __get_cpu_var(printk_sched_buf);
1964  printk(KERN_WARNING "[sched_delayed] %s", buf);
1965  }
1966  if (pending & PRINTK_PENDING_WAKEUP)
1968  }
1969 }
1970 
1972 {
1973  if (cpu_is_offline(cpu))
1974  printk_tick();
1975  return __this_cpu_read(printk_pending);
1976 }
1977 
1978 void wake_up_klogd(void)
1979 {
1980  if (waitqueue_active(&log_wait))
1981  this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
1982 }
1983 
1984 static void console_cont_flush(char *text, size_t size)
1985 {
1986  unsigned long flags;
1987  size_t len;
1988 
1989  raw_spin_lock_irqsave(&logbuf_lock, flags);
1990 
1991  if (!cont.len)
1992  goto out;
1993 
1994  /*
1995  * We still queue earlier records, likely because the console was
1996  * busy. The earlier ones need to be printed before this one, we
1997  * did not flush any fragment so far, so just let it queue up.
1998  */
1999  if (console_seq < log_next_seq && !cont.cons)
2000  goto out;
2001 
2002  len = cont_print_text(text, size);
2003  raw_spin_unlock(&logbuf_lock);
2005  call_console_drivers(cont.level, text, len);
2007  local_irq_restore(flags);
2008  return;
2009 out:
2010  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2011 }
2012 
2027 void console_unlock(void)
2028 {
2029  static char text[LOG_LINE_MAX + PREFIX_MAX];
2030  static u64 seen_seq;
2031  unsigned long flags;
2032  bool wake_klogd = false;
2033  bool retry;
2034 
2035  if (console_suspended) {
2036  up(&console_sem);
2037  return;
2038  }
2039 
2040  console_may_schedule = 0;
2041 
2042  /* flush buffered message fragment immediately to console */
2043  console_cont_flush(text, sizeof(text));
2044 again:
2045  for (;;) {
2046  struct log *msg;
2047  size_t len;
2048  int level;
2049 
2050  raw_spin_lock_irqsave(&logbuf_lock, flags);
2051  if (seen_seq != log_next_seq) {
2052  wake_klogd = true;
2053  seen_seq = log_next_seq;
2054  }
2055 
2056  if (console_seq < log_first_seq) {
2057  /* messages are gone, move to first one */
2058  console_seq = log_first_seq;
2059  console_idx = log_first_idx;
2060  console_prev = 0;
2061  }
2062 skip:
2063  if (console_seq == log_next_seq)
2064  break;
2065 
2066  msg = log_from_idx(console_idx);
2067  if (msg->flags & LOG_NOCONS) {
2068  /*
2069  * Skip record we have buffered and already printed
2070  * directly to the console when we received it.
2071  */
2072  console_idx = log_next(console_idx);
2073  console_seq++;
2074  /*
2075  * We will get here again when we register a new
2076  * CON_PRINTBUFFER console. Clear the flag so we
2077  * will properly dump everything later.
2078  */
2079  msg->flags &= ~LOG_NOCONS;
2080  console_prev = msg->flags;
2081  goto skip;
2082  }
2083 
2084  level = msg->level;
2085  len = msg_print_text(msg, console_prev, false,
2086  text, sizeof(text));
2087  console_idx = log_next(console_idx);
2088  console_seq++;
2089  console_prev = msg->flags;
2090  raw_spin_unlock(&logbuf_lock);
2091 
2092  stop_critical_timings(); /* don't trace print latency */
2093  call_console_drivers(level, text, len);
2095  local_irq_restore(flags);
2096  }
2097  console_locked = 0;
2098 
2099  /* Release the exclusive_console once it is used */
2100  if (unlikely(exclusive_console))
2101  exclusive_console = NULL;
2102 
2103  raw_spin_unlock(&logbuf_lock);
2104 
2105  up(&console_sem);
2106 
2107  /*
2108  * Someone could have filled up the buffer again, so re-check if there's
2109  * something to flush. In case we cannot trylock the console_sem again,
2110  * there's a new owner and the console_unlock() from them will do the
2111  * flush, no worries.
2112  */
2113  raw_spin_lock(&logbuf_lock);
2114  retry = console_seq != log_next_seq;
2115  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2116 
2117  if (retry && console_trylock())
2118  goto again;
2119 
2120  if (wake_klogd)
2121  wake_up_klogd();
2122 }
2124 
2135 {
2136  if (console_may_schedule)
2137  cond_resched();
2138 }
2140 
2142 {
2143  struct console *c;
2144 
2145  /*
2146  * console_unblank can no longer be called in interrupt context unless
2147  * oops_in_progress is set to 1..
2148  */
2149  if (oops_in_progress) {
2150  if (down_trylock(&console_sem) != 0)
2151  return;
2152  } else
2153  console_lock();
2154 
2155  console_locked = 1;
2156  console_may_schedule = 0;
2157  for_each_console(c)
2158  if ((c->flags & CON_ENABLED) && c->unblank)
2159  c->unblank();
2160  console_unlock();
2161 }
2162 
2163 /*
2164  * Return the console tty driver structure and its associated index
2165  */
2167 {
2168  struct console *c;
2169  struct tty_driver *driver = NULL;
2170 
2171  console_lock();
2172  for_each_console(c) {
2173  if (!c->device)
2174  continue;
2175  driver = c->device(c, index);
2176  if (driver)
2177  break;
2178  }
2179  console_unlock();
2180  return driver;
2181 }
2182 
2183 /*
2184  * Prevent further output on the passed console device so that (for example)
2185  * serial drivers can disable console output before suspending a port, and can
2186  * re-enable output afterwards.
2187  */
2189 {
2190  console_lock();
2191  console->flags &= ~CON_ENABLED;
2192  console_unlock();
2193 }
2195 
2197 {
2198  console_lock();
2199  console->flags |= CON_ENABLED;
2200  console_unlock();
2201 }
2203 
2204 static int __read_mostly keep_bootcon;
2205 
2206 static int __init keep_bootcon_setup(char *str)
2207 {
2208  keep_bootcon = 1;
2209  printk(KERN_INFO "debug: skip boot console de-registration.\n");
2210 
2211  return 0;
2212 }
2213 
2214 early_param("keep_bootcon", keep_bootcon_setup);
2215 
2216 /*
2217  * The console driver calls this routine during kernel initialization
2218  * to register the console printing procedure with printk() and to
2219  * print any messages that were printed by the kernel before the
2220  * console driver was initialized.
2221  *
2222  * This can happen pretty early during the boot process (because of
2223  * early_printk) - sometimes before setup_arch() completes - be careful
2224  * of what kernel features are used - they may not be initialised yet.
2225  *
2226  * There are two types of consoles - bootconsoles (early_printk) and
2227  * "real" consoles (everything which is not a bootconsole) which are
2228  * handled differently.
2229  * - Any number of bootconsoles can be registered at any time.
2230  * - As soon as a "real" console is registered, all bootconsoles
2231  * will be unregistered automatically.
2232  * - Once a "real" console is registered, any attempt to register a
2233  * bootconsoles will be rejected
2234  */
2235 void register_console(struct console *newcon)
2236 {
2237  int i;
2238  unsigned long flags;
2239  struct console *bcon = NULL;
2240 
2241  /*
2242  * before we register a new CON_BOOT console, make sure we don't
2243  * already have a valid console
2244  */
2245  if (console_drivers && newcon->flags & CON_BOOT) {
2246  /* find the last or real console */
2247  for_each_console(bcon) {
2248  if (!(bcon->flags & CON_BOOT)) {
2249  printk(KERN_INFO "Too late to register bootconsole %s%d\n",
2250  newcon->name, newcon->index);
2251  return;
2252  }
2253  }
2254  }
2255 
2256  if (console_drivers && console_drivers->flags & CON_BOOT)
2257  bcon = console_drivers;
2258 
2259  if (preferred_console < 0 || bcon || !console_drivers)
2260  preferred_console = selected_console;
2261 
2262  if (newcon->early_setup)
2263  newcon->early_setup();
2264 
2265  /*
2266  * See if we want to use this console driver. If we
2267  * didn't select a console we take the first one
2268  * that registers here.
2269  */
2270  if (preferred_console < 0) {
2271  if (newcon->index < 0)
2272  newcon->index = 0;
2273  if (newcon->setup == NULL ||
2274  newcon->setup(newcon, NULL) == 0) {
2275  newcon->flags |= CON_ENABLED;
2276  if (newcon->device) {
2277  newcon->flags |= CON_CONSDEV;
2278  preferred_console = 0;
2279  }
2280  }
2281  }
2282 
2283  /*
2284  * See if this console matches one we selected on
2285  * the command line.
2286  */
2287  for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
2288  i++) {
2289  if (strcmp(console_cmdline[i].name, newcon->name) != 0)
2290  continue;
2291  if (newcon->index >= 0 &&
2292  newcon->index != console_cmdline[i].index)
2293  continue;
2294  if (newcon->index < 0)
2295  newcon->index = console_cmdline[i].index;
2296 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
2297  if (console_cmdline[i].brl_options) {
2298  newcon->flags |= CON_BRL;
2299  braille_register_console(newcon,
2301  console_cmdline[i].options,
2302  console_cmdline[i].brl_options);
2303  return;
2304  }
2305 #endif
2306  if (newcon->setup &&
2307  newcon->setup(newcon, console_cmdline[i].options) != 0)
2308  break;
2309  newcon->flags |= CON_ENABLED;
2310  newcon->index = console_cmdline[i].index;
2311  if (i == selected_console) {
2312  newcon->flags |= CON_CONSDEV;
2313  preferred_console = selected_console;
2314  }
2315  break;
2316  }
2317 
2318  if (!(newcon->flags & CON_ENABLED))
2319  return;
2320 
2321  /*
2322  * If we have a bootconsole, and are switching to a real console,
2323  * don't print everything out again, since when the boot console, and
2324  * the real console are the same physical device, it's annoying to
2325  * see the beginning boot messages twice
2326  */
2327  if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
2328  newcon->flags &= ~CON_PRINTBUFFER;
2329 
2330  /*
2331  * Put this console in the list - keep the
2332  * preferred driver at the head of the list.
2333  */
2334  console_lock();
2335  if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
2336  newcon->next = console_drivers;
2337  console_drivers = newcon;
2338  if (newcon->next)
2339  newcon->next->flags &= ~CON_CONSDEV;
2340  } else {
2341  newcon->next = console_drivers->next;
2342  console_drivers->next = newcon;
2343  }
2344  if (newcon->flags & CON_PRINTBUFFER) {
2345  /*
2346  * console_unlock(); will print out the buffered messages
2347  * for us.
2348  */
2349  raw_spin_lock_irqsave(&logbuf_lock, flags);
2350  console_seq = syslog_seq;
2351  console_idx = syslog_idx;
2352  console_prev = syslog_prev;
2353  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2354  /*
2355  * We're about to replay the log buffer. Only do this to the
2356  * just-registered console to avoid excessive message spam to
2357  * the already-registered consoles.
2358  */
2359  exclusive_console = newcon;
2360  }
2361  console_unlock();
2363 
2364  /*
2365  * By unregistering the bootconsoles after we enable the real console
2366  * we get the "console xxx enabled" message on all the consoles -
2367  * boot consoles, real consoles, etc - this is to ensure that end
2368  * users know there might be something in the kernel's log buffer that
2369  * went to the bootconsole (that they do not see on the real console)
2370  */
2371  if (bcon &&
2372  ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
2373  !keep_bootcon) {
2374  /* we need to iterate through twice, to make sure we print
2375  * everything out, before we unregister the console(s)
2376  */
2377  printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
2378  newcon->name, newcon->index);
2379  for_each_console(bcon)
2380  if (bcon->flags & CON_BOOT)
2381  unregister_console(bcon);
2382  } else {
2383  printk(KERN_INFO "%sconsole [%s%d] enabled\n",
2384  (newcon->flags & CON_BOOT) ? "boot" : "" ,
2385  newcon->name, newcon->index);
2386  }
2387 }
2389 
2391 {
2392  struct console *a, *b;
2393  int res = 1;
2394 
2395 #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
2396  if (console->flags & CON_BRL)
2397  return braille_unregister_console(console);
2398 #endif
2399 
2400  console_lock();
2401  if (console_drivers == console) {
2402  console_drivers=console->next;
2403  res = 0;
2404  } else if (console_drivers) {
2405  for (a=console_drivers->next, b=console_drivers ;
2406  a; b=a, a=b->next) {
2407  if (a == console) {
2408  b->next = a->next;
2409  res = 0;
2410  break;
2411  }
2412  }
2413  }
2414 
2415  /*
2416  * If this isn't the last console and it has CON_CONSDEV set, we
2417  * need to set it on the next preferred console.
2418  */
2419  if (console_drivers != NULL && console->flags & CON_CONSDEV)
2420  console_drivers->flags |= CON_CONSDEV;
2421 
2422  console_unlock();
2424  return res;
2425 }
2427 
2428 static int __init printk_late_init(void)
2429 {
2430  struct console *con;
2431 
2432  for_each_console(con) {
2433  if (!keep_bootcon && con->flags & CON_BOOT) {
2434  printk(KERN_INFO "turn off boot console %s%d\n",
2435  con->name, con->index);
2436  unregister_console(con);
2437  }
2438  }
2439  hotcpu_notifier(console_cpu_notify, 0);
2440  return 0;
2441 }
2442 late_initcall(printk_late_init);
2443 
2444 #if defined CONFIG_PRINTK
2445 
2446 int printk_sched(const char *fmt, ...)
2447 {
2448  unsigned long flags;
2449  va_list args;
2450  char *buf;
2451  int r;
2452 
2453  local_irq_save(flags);
2454  buf = __get_cpu_var(printk_sched_buf);
2455 
2456  va_start(args, fmt);
2457  r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
2458  va_end(args);
2459 
2460  __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
2461  local_irq_restore(flags);
2462 
2463  return r;
2464 }
2465 
2466 /*
2467  * printk rate limiting, lifted from the networking subsystem.
2468  *
2469  * This enforces a rate limit: not more than 10 kernel messages
2470  * every 5s to make a denial-of-service attack impossible.
2471  */
2472 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
2473 
2474 int __printk_ratelimit(const char *func)
2475 {
2476  return ___ratelimit(&printk_ratelimit_state, func);
2477 }
2478 EXPORT_SYMBOL(__printk_ratelimit);
2479 
2489 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
2490  unsigned int interval_msecs)
2491 {
2492  if (*caller_jiffies == 0
2493  || !time_in_range(jiffies, *caller_jiffies,
2494  *caller_jiffies
2495  + msecs_to_jiffies(interval_msecs))) {
2496  *caller_jiffies = jiffies;
2497  return true;
2498  }
2499  return false;
2500 }
2501 EXPORT_SYMBOL(printk_timed_ratelimit);
2502 
2503 static DEFINE_SPINLOCK(dump_list_lock);
2504 static LIST_HEAD(dump_list);
2505 
2514 int kmsg_dump_register(struct kmsg_dumper *dumper)
2515 {
2516  unsigned long flags;
2517  int err = -EBUSY;
2518 
2519  /* The dump callback needs to be set */
2520  if (!dumper->dump)
2521  return -EINVAL;
2522 
2523  spin_lock_irqsave(&dump_list_lock, flags);
2524  /* Don't allow registering multiple times */
2525  if (!dumper->registered) {
2526  dumper->registered = 1;
2527  list_add_tail_rcu(&dumper->list, &dump_list);
2528  err = 0;
2529  }
2530  spin_unlock_irqrestore(&dump_list_lock, flags);
2531 
2532  return err;
2533 }
2534 EXPORT_SYMBOL_GPL(kmsg_dump_register);
2535 
2543 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
2544 {
2545  unsigned long flags;
2546  int err = -EINVAL;
2547 
2548  spin_lock_irqsave(&dump_list_lock, flags);
2549  if (dumper->registered) {
2550  dumper->registered = 0;
2551  list_del_rcu(&dumper->list);
2552  err = 0;
2553  }
2554  spin_unlock_irqrestore(&dump_list_lock, flags);
2555  synchronize_rcu();
2556 
2557  return err;
2558 }
2559 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
2560 
2561 static bool always_kmsg_dump;
2562 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
2563 
2572 void kmsg_dump(enum kmsg_dump_reason reason)
2573 {
2574  struct kmsg_dumper *dumper;
2575  unsigned long flags;
2576 
2577  if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
2578  return;
2579 
2580  rcu_read_lock();
2581  list_for_each_entry_rcu(dumper, &dump_list, list) {
2582  if (dumper->max_reason && reason > dumper->max_reason)
2583  continue;
2584 
2585  /* initialize iterator with data about the stored records */
2586  dumper->active = true;
2587 
2588  raw_spin_lock_irqsave(&logbuf_lock, flags);
2589  dumper->cur_seq = clear_seq;
2590  dumper->cur_idx = clear_idx;
2591  dumper->next_seq = log_next_seq;
2592  dumper->next_idx = log_next_idx;
2593  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2594 
2595  /* invoke dumper which will iterate over records */
2596  dumper->dump(dumper, reason);
2597 
2598  /* reset iterator */
2599  dumper->active = false;
2600  }
2601  rcu_read_unlock();
2602 }
2603 
2623 bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
2624  char *line, size_t size, size_t *len)
2625 {
2626  struct log *msg;
2627  size_t l = 0;
2628  bool ret = false;
2629 
2630  if (!dumper->active)
2631  goto out;
2632 
2633  if (dumper->cur_seq < log_first_seq) {
2634  /* messages are gone, move to first available one */
2635  dumper->cur_seq = log_first_seq;
2636  dumper->cur_idx = log_first_idx;
2637  }
2638 
2639  /* last entry */
2640  if (dumper->cur_seq >= log_next_seq)
2641  goto out;
2642 
2643  msg = log_from_idx(dumper->cur_idx);
2644  l = msg_print_text(msg, 0, syslog, line, size);
2645 
2646  dumper->cur_idx = log_next(dumper->cur_idx);
2647  dumper->cur_seq++;
2648  ret = true;
2649 out:
2650  if (len)
2651  *len = l;
2652  return ret;
2653 }
2654 
2672 bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
2673  char *line, size_t size, size_t *len)
2674 {
2675  unsigned long flags;
2676  bool ret;
2677 
2678  raw_spin_lock_irqsave(&logbuf_lock, flags);
2679  ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
2680  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2681 
2682  return ret;
2683 }
2684 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
2685 
2705 bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2706  char *buf, size_t size, size_t *len)
2707 {
2708  unsigned long flags;
2709  u64 seq;
2710  u32 idx;
2711  u64 next_seq;
2712  u32 next_idx;
2713  enum log_flags prev;
2714  size_t l = 0;
2715  bool ret = false;
2716 
2717  if (!dumper->active)
2718  goto out;
2719 
2720  raw_spin_lock_irqsave(&logbuf_lock, flags);
2721  if (dumper->cur_seq < log_first_seq) {
2722  /* messages are gone, move to first available one */
2723  dumper->cur_seq = log_first_seq;
2724  dumper->cur_idx = log_first_idx;
2725  }
2726 
2727  /* last entry */
2728  if (dumper->cur_seq >= dumper->next_seq) {
2729  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2730  goto out;
2731  }
2732 
2733  /* calculate length of entire buffer */
2734  seq = dumper->cur_seq;
2735  idx = dumper->cur_idx;
2736  prev = 0;
2737  while (seq < dumper->next_seq) {
2738  struct log *msg = log_from_idx(idx);
2739 
2740  l += msg_print_text(msg, prev, true, NULL, 0);
2741  idx = log_next(idx);
2742  seq++;
2743  prev = msg->flags;
2744  }
2745 
2746  /* move first record forward until length fits into the buffer */
2747  seq = dumper->cur_seq;
2748  idx = dumper->cur_idx;
2749  prev = 0;
2750  while (l > size && seq < dumper->next_seq) {
2751  struct log *msg = log_from_idx(idx);
2752 
2753  l -= msg_print_text(msg, prev, true, NULL, 0);
2754  idx = log_next(idx);
2755  seq++;
2756  prev = msg->flags;
2757  }
2758 
2759  /* last message in next interation */
2760  next_seq = seq;
2761  next_idx = idx;
2762 
2763  l = 0;
2764  prev = 0;
2765  while (seq < dumper->next_seq) {
2766  struct log *msg = log_from_idx(idx);
2767 
2768  l += msg_print_text(msg, prev, syslog, buf + l, size - l);
2769  idx = log_next(idx);
2770  seq++;
2771  prev = msg->flags;
2772  }
2773 
2774  dumper->next_seq = next_seq;
2775  dumper->next_idx = next_idx;
2776  ret = true;
2777  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2778 out:
2779  if (len)
2780  *len = l;
2781  return ret;
2782 }
2783 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
2784 
2795 void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
2796 {
2797  dumper->cur_seq = clear_seq;
2798  dumper->cur_idx = clear_idx;
2799  dumper->next_seq = log_next_seq;
2800  dumper->next_idx = log_next_idx;
2801 }
2802 
2811 void kmsg_dump_rewind(struct kmsg_dumper *dumper)
2812 {
2813  unsigned long flags;
2814 
2815  raw_spin_lock_irqsave(&logbuf_lock, flags);
2816  kmsg_dump_rewind_nolock(dumper);
2817  raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2818 }
2819 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
2820 #endif