24 #include <linux/kernel.h>
25 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/wait.h>
37 #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
39 #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
40 #define MTDOOPS_HEADER_SIZE 8
42 static unsigned long record_size = 4096;
45 "record size for MTD OOPS pages in bytes (default 4096)");
47 static char mtddev[80];
50 "name or index number of the MTD device to use");
52 static int dump_oops = 1;
55 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
57 static struct mtdoops_context {
67 unsigned long *oops_page_used;
72 static void mark_page_used(
struct mtdoops_context *cxt,
int page)
74 set_bit(page, cxt->oops_page_used);
77 static void mark_page_unused(
struct mtdoops_context *cxt,
int page)
82 static int page_is_used(
struct mtdoops_context *cxt,
int page)
84 return test_bit(page, cxt->oops_page_used);
93 static int mtdoops_erase_block(
struct mtdoops_context *cxt,
int offset)
96 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->
erasesize;
97 u32 start_page = start_page_offset / record_size;
107 erase.callback = mtdoops_erase_callback;
110 erase.priv = (
u_long)&wait_q;
120 (
unsigned long long)erase.addr,
121 (
unsigned long long)erase.len, mtddev);
129 for (page = start_page; page < start_page + erase_pages; page++)
130 mark_page_unused(cxt, page);
135 static void mtdoops_inc_counter(
struct mtdoops_context *cxt)
138 if (cxt->nextpage >= cxt->oops_pages)
141 if (cxt->nextcount == 0xffffffff)
144 if (page_is_used(cxt, cxt->nextpage)) {
150 cxt->nextpage, cxt->nextcount);
156 struct mtdoops_context *cxt =
165 mod = (cxt->nextpage * record_size) % mtd->
erasesize;
167 cxt->nextpage = cxt->nextpage + ((mtd->
erasesize -
mod) / record_size);
168 if (cxt->nextpage >= cxt->oops_pages)
172 while ((ret =
mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
175 cxt->nextpage * record_size);
177 cxt->nextpage = cxt->nextpage + (mtd->
erasesize / record_size);
178 if (cxt->nextpage >= cxt->oops_pages)
180 if (i == cxt->oops_pages / (mtd->
erasesize / record_size)) {
191 for (
j = 0, ret = -1; (
j < 3) && (ret < 0);
j++)
192 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
196 cxt->nextpage, cxt->nextcount);
210 static void mtdoops_write(
struct mtdoops_context *cxt,
int panic)
219 hdr[0] = cxt->nextcount;
224 record_size, &retlen, cxt->oops_buf);
226 printk(
KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
230 ret =
mtd_write(mtd, cxt->nextpage * record_size,
231 record_size, &retlen, cxt->oops_buf);
233 if (retlen != record_size || ret < 0)
234 printk(
KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
235 cxt->nextpage * record_size, retlen, record_size, ret);
236 mark_page_used(cxt, cxt->nextpage);
237 memset(cxt->oops_buf, 0xff, record_size);
239 mtdoops_inc_counter(cxt);
242 static void mtdoops_workfunc_write(
struct work_struct *work)
244 struct mtdoops_context *cxt =
247 mtdoops_write(cxt, 0);
250 static void find_next_position(
struct mtdoops_context *cxt)
254 u32 count[2], maxcount = 0xffffffff;
257 for (page = 0; page < cxt->oops_pages; page++) {
261 mark_page_used(cxt, page);
263 &retlen, (
u_char *)&count[0]);
265 (ret < 0 && !mtd_is_bitflip(ret))) {
266 printk(
KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
267 page * record_size, retlen,
272 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
273 mark_page_unused(cxt, page);
274 if (count[0] == 0xffffffff)
276 if (maxcount == 0xffffffff) {
279 }
else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
282 }
else if (count[0] > maxcount && count[0] < 0xc0000000) {
285 }
else if (count[0] > maxcount && count[0] > 0xc0000000
286 && maxcount > 0x80000000) {
291 if (maxcount == 0xffffffff) {
298 cxt->nextpage = maxpos;
299 cxt->nextcount = maxcount;
301 mtdoops_inc_counter(cxt);
304 static void mtdoops_do_dump(
struct kmsg_dumper *dumper,
308 struct mtdoops_context,
dump);
319 mtdoops_write(cxt, 1);
325 static void mtdoops_notify_add(
struct mtd_info *mtd)
327 struct mtdoops_context *cxt = &oops_cxt;
328 u64 mtdoops_pages = div_u64(mtd->
size, record_size);
332 cxt->mtd_index = mtd->
index;
334 if (mtd->
index != cxt->mtd_index || cxt->mtd_index < 0)
338 printk(
KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
343 printk(
KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
348 printk(
KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
356 if (!cxt->oops_page_used) {
362 cxt->dump.dump = mtdoops_do_dump;
363 err = kmsg_dump_register(&cxt->dump);
365 printk(
KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
366 vfree(cxt->oops_page_used);
367 cxt->oops_page_used =
NULL;
372 cxt->oops_pages = (
int)mtd->
size / record_size;
373 find_next_position(cxt);
377 static void mtdoops_notify_remove(
struct mtd_info *mtd)
379 struct mtdoops_context *cxt = &oops_cxt;
381 if (mtd->
index != cxt->mtd_index || cxt->mtd_index < 0)
384 if (kmsg_dump_unregister(&cxt->dump) < 0)
394 .add = mtdoops_notify_add,
395 .remove = mtdoops_notify_remove,
398 static int __init mtdoops_init(
void)
400 struct mtdoops_context *cxt = &oops_cxt;
404 if (
strlen(mtddev) == 0) {
405 printk(
KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
408 if ((record_size & 4095) != 0) {
409 printk(
KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
412 if (record_size < 4096) {
413 printk(
KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
421 cxt->mtd_index = mtd_index;
423 cxt->oops_buf =
vmalloc(record_size);
424 if (!cxt->oops_buf) {
425 printk(
KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
428 memset(cxt->oops_buf, 0xff, record_size);
430 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
431 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
437 static void __exit mtdoops_exit(
void)
439 struct mtdoops_context *cxt = &oops_cxt;
442 vfree(cxt->oops_buf);
443 vfree(cxt->oops_page_used);