Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
zram_drv.c
Go to the documentation of this file.
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14 
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21 
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35 
36 #include "zram_drv.h"
37 
38 /* Globals */
39 static int zram_major;
41 
42 /* Module params (documentation at end) */
43 static unsigned int num_devices;
44 
45 static void zram_stat_inc(u32 *v)
46 {
47  *v = *v + 1;
48 }
49 
50 static void zram_stat_dec(u32 *v)
51 {
52  *v = *v - 1;
53 }
54 
55 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56 {
57  spin_lock(&zram->stat64_lock);
58  *v = *v + inc;
59  spin_unlock(&zram->stat64_lock);
60 }
61 
62 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63 {
64  spin_lock(&zram->stat64_lock);
65  *v = *v - dec;
66  spin_unlock(&zram->stat64_lock);
67 }
68 
69 static void zram_stat64_inc(struct zram *zram, u64 *v)
70 {
71  zram_stat64_add(zram, v, 1);
72 }
73 
74 static int zram_test_flag(struct zram *zram, u32 index,
75  enum zram_pageflags flag)
76 {
77  return zram->table[index].flags & BIT(flag);
78 }
79 
80 static void zram_set_flag(struct zram *zram, u32 index,
81  enum zram_pageflags flag)
82 {
83  zram->table[index].flags |= BIT(flag);
84 }
85 
86 static void zram_clear_flag(struct zram *zram, u32 index,
87  enum zram_pageflags flag)
88 {
89  zram->table[index].flags &= ~BIT(flag);
90 }
91 
92 static int page_zero_filled(void *ptr)
93 {
94  unsigned int pos;
95  unsigned long *page;
96 
97  page = (unsigned long *)ptr;
98 
99  for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100  if (page[pos])
101  return 0;
102  }
103 
104  return 1;
105 }
106 
107 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
108 {
109  if (!zram->disksize) {
110  pr_info(
111  "disk size not provided. You can use disksize_kb module "
112  "param to specify size.\nUsing default: (%u%% of RAM).\n",
113  default_disksize_perc_ram
114  );
115  zram->disksize = default_disksize_perc_ram *
116  (totalram_bytes / 100);
117  }
118 
119  if (zram->disksize > 2 * (totalram_bytes)) {
120  pr_info(
121  "There is little point creating a zram of greater than "
122  "twice the size of memory since we expect a 2:1 compression "
123  "ratio. Note that zram uses about 0.1%% of the size of "
124  "the disk when not in use so a huge zram is "
125  "wasteful.\n"
126  "\tMemory Size: %zu kB\n"
127  "\tSize you selected: %llu kB\n"
128  "Continuing anyway ...\n",
129  totalram_bytes >> 10, zram->disksize
130  );
131  }
132 
133  zram->disksize &= PAGE_MASK;
134 }
135 
136 static void zram_free_page(struct zram *zram, size_t index)
137 {
138  unsigned long handle = zram->table[index].handle;
139  u16 size = zram->table[index].size;
140 
141  if (unlikely(!handle)) {
142  /*
143  * No memory is allocated for zero filled pages.
144  * Simply clear zero page flag.
145  */
146  if (zram_test_flag(zram, index, ZRAM_ZERO)) {
147  zram_clear_flag(zram, index, ZRAM_ZERO);
148  zram_stat_dec(&zram->stats.pages_zero);
149  }
150  return;
151  }
152 
153  if (unlikely(size > max_zpage_size))
154  zram_stat_dec(&zram->stats.bad_compress);
155 
156  zs_free(zram->mem_pool, handle);
157 
158  if (size <= PAGE_SIZE / 2)
159  zram_stat_dec(&zram->stats.good_compress);
160 
161  zram_stat64_sub(zram, &zram->stats.compr_size,
162  zram->table[index].size);
163  zram_stat_dec(&zram->stats.pages_stored);
164 
165  zram->table[index].handle = 0;
166  zram->table[index].size = 0;
167 }
168 
169 static void handle_zero_page(struct bio_vec *bvec)
170 {
171  struct page *page = bvec->bv_page;
172  void *user_mem;
173 
174  user_mem = kmap_atomic(page);
175  memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
176  kunmap_atomic(user_mem);
177 
178  flush_dcache_page(page);
179 }
180 
181 static inline int is_partial_io(struct bio_vec *bvec)
182 {
183  return bvec->bv_len != PAGE_SIZE;
184 }
185 
186 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
187  u32 index, int offset, struct bio *bio)
188 {
189  int ret;
190  size_t clen;
191  struct page *page;
192  unsigned char *user_mem, *cmem, *uncmem = NULL;
193 
194  page = bvec->bv_page;
195 
196  if (zram_test_flag(zram, index, ZRAM_ZERO)) {
197  handle_zero_page(bvec);
198  return 0;
199  }
200 
201  /* Requested page is not present in compressed area */
202  if (unlikely(!zram->table[index].handle)) {
203  pr_debug("Read before write: sector=%lu, size=%u",
204  (ulong)(bio->bi_sector), bio->bi_size);
205  handle_zero_page(bvec);
206  return 0;
207  }
208 
209  if (is_partial_io(bvec)) {
210  /* Use a temporary buffer to decompress the page */
211  uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
212  if (!uncmem) {
213  pr_info("Error allocating temp memory!\n");
214  return -ENOMEM;
215  }
216  }
217 
218  user_mem = kmap_atomic(page);
219  if (!is_partial_io(bvec))
220  uncmem = user_mem;
221  clen = PAGE_SIZE;
222 
223  cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
224  ZS_MM_RO);
225 
226  if (zram->table[index].size == PAGE_SIZE) {
227  memcpy(uncmem, cmem, PAGE_SIZE);
228  ret = LZO_E_OK;
229  } else {
230  ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
231  uncmem, &clen);
232  }
233 
234  if (is_partial_io(bvec)) {
235  memcpy(user_mem + bvec->bv_offset, uncmem + offset,
236  bvec->bv_len);
237  kfree(uncmem);
238  }
239 
240  zs_unmap_object(zram->mem_pool, zram->table[index].handle);
241  kunmap_atomic(user_mem);
242 
243  /* Should NEVER happen. Return bio error if it does. */
244  if (unlikely(ret != LZO_E_OK)) {
245  pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
246  zram_stat64_inc(zram, &zram->stats.failed_reads);
247  return ret;
248  }
249 
250  flush_dcache_page(page);
251 
252  return 0;
253 }
254 
255 static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
256 {
257  int ret;
258  size_t clen = PAGE_SIZE;
259  unsigned char *cmem;
260  unsigned long handle = zram->table[index].handle;
261 
262  if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
263  memset(mem, 0, PAGE_SIZE);
264  return 0;
265  }
266 
267  cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
268  ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
269  mem, &clen);
270  zs_unmap_object(zram->mem_pool, handle);
271 
272  /* Should NEVER happen. Return bio error if it does. */
273  if (unlikely(ret != LZO_E_OK)) {
274  pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
275  zram_stat64_inc(zram, &zram->stats.failed_reads);
276  return ret;
277  }
278 
279  return 0;
280 }
281 
282 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
283  int offset)
284 {
285  int ret;
286  size_t clen;
287  unsigned long handle;
288  struct page *page;
289  unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
290 
291  page = bvec->bv_page;
292  src = zram->compress_buffer;
293 
294  if (is_partial_io(bvec)) {
295  /*
296  * This is a partial IO. We need to read the full page
297  * before to write the changes.
298  */
299  uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
300  if (!uncmem) {
301  pr_info("Error allocating temp memory!\n");
302  ret = -ENOMEM;
303  goto out;
304  }
305  ret = zram_read_before_write(zram, uncmem, index);
306  if (ret) {
307  kfree(uncmem);
308  goto out;
309  }
310  }
311 
312  /*
313  * System overwrites unused sectors. Free memory associated
314  * with this sector now.
315  */
316  if (zram->table[index].handle ||
317  zram_test_flag(zram, index, ZRAM_ZERO))
318  zram_free_page(zram, index);
319 
320  user_mem = kmap_atomic(page);
321 
322  if (is_partial_io(bvec))
323  memcpy(uncmem + offset, user_mem + bvec->bv_offset,
324  bvec->bv_len);
325  else
326  uncmem = user_mem;
327 
328  if (page_zero_filled(uncmem)) {
329  kunmap_atomic(user_mem);
330  if (is_partial_io(bvec))
331  kfree(uncmem);
332  zram_stat_inc(&zram->stats.pages_zero);
333  zram_set_flag(zram, index, ZRAM_ZERO);
334  ret = 0;
335  goto out;
336  }
337 
338  ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
339  zram->compress_workmem);
340 
341  kunmap_atomic(user_mem);
342  if (is_partial_io(bvec))
343  kfree(uncmem);
344 
345  if (unlikely(ret != LZO_E_OK)) {
346  pr_err("Compression failed! err=%d\n", ret);
347  goto out;
348  }
349 
350  if (unlikely(clen > max_zpage_size)) {
351  zram_stat_inc(&zram->stats.bad_compress);
352  src = uncmem;
353  clen = PAGE_SIZE;
354  }
355 
356  handle = zs_malloc(zram->mem_pool, clen);
357  if (!handle) {
358  pr_info("Error allocating memory for compressed "
359  "page: %u, size=%zu\n", index, clen);
360  ret = -ENOMEM;
361  goto out;
362  }
363  cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
364 
365  memcpy(cmem, src, clen);
366 
367  zs_unmap_object(zram->mem_pool, handle);
368 
369  zram->table[index].handle = handle;
370  zram->table[index].size = clen;
371 
372  /* Update stats */
373  zram_stat64_add(zram, &zram->stats.compr_size, clen);
374  zram_stat_inc(&zram->stats.pages_stored);
375  if (clen <= PAGE_SIZE / 2)
376  zram_stat_inc(&zram->stats.good_compress);
377 
378  return 0;
379 
380 out:
381  if (ret)
382  zram_stat64_inc(zram, &zram->stats.failed_writes);
383  return ret;
384 }
385 
386 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
387  int offset, struct bio *bio, int rw)
388 {
389  int ret;
390 
391  if (rw == READ) {
392  down_read(&zram->lock);
393  ret = zram_bvec_read(zram, bvec, index, offset, bio);
394  up_read(&zram->lock);
395  } else {
396  down_write(&zram->lock);
397  ret = zram_bvec_write(zram, bvec, index, offset);
398  up_write(&zram->lock);
399  }
400 
401  return ret;
402 }
403 
404 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
405 {
406  if (*offset + bvec->bv_len >= PAGE_SIZE)
407  (*index)++;
408  *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
409 }
410 
411 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
412 {
413  int i, offset;
414  u32 index;
415  struct bio_vec *bvec;
416 
417  switch (rw) {
418  case READ:
419  zram_stat64_inc(zram, &zram->stats.num_reads);
420  break;
421  case WRITE:
422  zram_stat64_inc(zram, &zram->stats.num_writes);
423  break;
424  }
425 
426  index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
427  offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
428 
429  bio_for_each_segment(bvec, bio, i) {
431 
432  if (bvec->bv_len > max_transfer_size) {
433  /*
434  * zram_bvec_rw() can only make operation on a single
435  * zram page. Split the bio vector.
436  */
437  struct bio_vec bv;
438 
439  bv.bv_page = bvec->bv_page;
440  bv.bv_len = max_transfer_size;
441  bv.bv_offset = bvec->bv_offset;
442 
443  if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
444  goto out;
445 
446  bv.bv_len = bvec->bv_len - max_transfer_size;
447  bv.bv_offset += max_transfer_size;
448  if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
449  goto out;
450  } else
451  if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
452  < 0)
453  goto out;
454 
455  update_position(&index, &offset, bvec);
456  }
457 
458  set_bit(BIO_UPTODATE, &bio->bi_flags);
459  bio_endio(bio, 0);
460  return;
461 
462 out:
463  bio_io_error(bio);
464 }
465 
466 /*
467  * Check if request is within bounds and aligned on zram logical blocks.
468  */
469 static inline int valid_io_request(struct zram *zram, struct bio *bio)
470 {
471  if (unlikely(
472  (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
473  (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
474  (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
475 
476  return 0;
477  }
478 
479  /* I/O request is valid */
480  return 1;
481 }
482 
483 /*
484  * Handler function for all zram I/O requests.
485  */
486 static void zram_make_request(struct request_queue *queue, struct bio *bio)
487 {
488  struct zram *zram = queue->queuedata;
489 
490  if (unlikely(!zram->init_done) && zram_init_device(zram))
491  goto error;
492 
493  down_read(&zram->init_lock);
494  if (unlikely(!zram->init_done))
495  goto error_unlock;
496 
497  if (!valid_io_request(zram, bio)) {
498  zram_stat64_inc(zram, &zram->stats.invalid_io);
499  goto error_unlock;
500  }
501 
502  __zram_make_request(zram, bio, bio_data_dir(bio));
503  up_read(&zram->init_lock);
504 
505  return;
506 
507 error_unlock:
508  up_read(&zram->init_lock);
509 error:
510  bio_io_error(bio);
511 }
512 
513 void __zram_reset_device(struct zram *zram)
514 {
515  size_t index;
516 
517  zram->init_done = 0;
518 
519  /* Free various per-device buffers */
520  kfree(zram->compress_workmem);
521  free_pages((unsigned long)zram->compress_buffer, 1);
522 
523  zram->compress_workmem = NULL;
524  zram->compress_buffer = NULL;
525 
526  /* Free all pages that are still in this zram device */
527  for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
528  unsigned long handle = zram->table[index].handle;
529  if (!handle)
530  continue;
531 
532  zs_free(zram->mem_pool, handle);
533  }
534 
535  vfree(zram->table);
536  zram->table = NULL;
537 
538  zs_destroy_pool(zram->mem_pool);
539  zram->mem_pool = NULL;
540 
541  /* Reset stats */
542  memset(&zram->stats, 0, sizeof(zram->stats));
543 
544  zram->disksize = 0;
545 }
546 
547 void zram_reset_device(struct zram *zram)
548 {
549  down_write(&zram->init_lock);
550  __zram_reset_device(zram);
551  up_write(&zram->init_lock);
552 }
553 
554 int zram_init_device(struct zram *zram)
555 {
556  int ret;
557  size_t num_pages;
558 
559  down_write(&zram->init_lock);
560 
561  if (zram->init_done) {
562  up_write(&zram->init_lock);
563  return 0;
564  }
565 
566  zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
567 
569  if (!zram->compress_workmem) {
570  pr_err("Error allocating compressor working memory!\n");
571  ret = -ENOMEM;
572  goto fail_no_table;
573  }
574 
575  zram->compress_buffer =
576  (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
577  if (!zram->compress_buffer) {
578  pr_err("Error allocating compressor buffer space\n");
579  ret = -ENOMEM;
580  goto fail_no_table;
581  }
582 
583  num_pages = zram->disksize >> PAGE_SHIFT;
584  zram->table = vzalloc(num_pages * sizeof(*zram->table));
585  if (!zram->table) {
586  pr_err("Error allocating zram address table\n");
587  ret = -ENOMEM;
588  goto fail_no_table;
589  }
590 
591  set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
592 
593  /* zram devices sort of resembles non-rotational disks */
594  queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
595 
596  zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
597  if (!zram->mem_pool) {
598  pr_err("Error creating memory pool\n");
599  ret = -ENOMEM;
600  goto fail;
601  }
602 
603  zram->init_done = 1;
604  up_write(&zram->init_lock);
605 
606  pr_debug("Initialization done!\n");
607  return 0;
608 
609 fail_no_table:
610  /* To prevent accessing table entries during cleanup */
611  zram->disksize = 0;
612 fail:
613  __zram_reset_device(zram);
614  up_write(&zram->init_lock);
615  pr_err("Initialization failed: err=%d\n", ret);
616  return ret;
617 }
618 
619 static void zram_slot_free_notify(struct block_device *bdev,
620  unsigned long index)
621 {
622  struct zram *zram;
623 
624  zram = bdev->bd_disk->private_data;
625  zram_free_page(zram, index);
626  zram_stat64_inc(zram, &zram->stats.notify_free);
627 }
628 
629 static const struct block_device_operations zram_devops = {
630  .swap_slot_free_notify = zram_slot_free_notify,
631  .owner = THIS_MODULE
632 };
633 
634 static int create_device(struct zram *zram, int device_id)
635 {
636  int ret = 0;
637 
638  init_rwsem(&zram->lock);
639  init_rwsem(&zram->init_lock);
640  spin_lock_init(&zram->stat64_lock);
641 
643  if (!zram->queue) {
644  pr_err("Error allocating disk queue for device %d\n",
645  device_id);
646  ret = -ENOMEM;
647  goto out;
648  }
649 
650  blk_queue_make_request(zram->queue, zram_make_request);
651  zram->queue->queuedata = zram;
652 
653  /* gendisk structure */
654  zram->disk = alloc_disk(1);
655  if (!zram->disk) {
656  blk_cleanup_queue(zram->queue);
657  pr_warn("Error allocating disk structure for device %d\n",
658  device_id);
659  ret = -ENOMEM;
660  goto out;
661  }
662 
663  zram->disk->major = zram_major;
664  zram->disk->first_minor = device_id;
665  zram->disk->fops = &zram_devops;
666  zram->disk->queue = zram->queue;
667  zram->disk->private_data = zram;
668  snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
669 
670  /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
671  set_capacity(zram->disk, 0);
672 
673  /*
674  * To ensure that we always get PAGE_SIZE aligned
675  * and n*PAGE_SIZED sized I/O requests.
676  */
678  blk_queue_logical_block_size(zram->disk->queue,
680  blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
681  blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
682 
683  add_disk(zram->disk);
684 
685  ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
687  if (ret < 0) {
688  pr_warn("Error creating sysfs group");
689  goto out;
690  }
691 
692  zram->init_done = 0;
693 
694 out:
695  return ret;
696 }
697 
698 static void destroy_device(struct zram *zram)
699 {
700  sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
702 
703  if (zram->disk) {
704  del_gendisk(zram->disk);
705  put_disk(zram->disk);
706  }
707 
708  if (zram->queue)
709  blk_cleanup_queue(zram->queue);
710 }
711 
712 unsigned int zram_get_num_devices(void)
713 {
714  return num_devices;
715 }
716 
717 static int __init zram_init(void)
718 {
719  int ret, dev_id;
720 
721  if (num_devices > max_num_devices) {
722  pr_warn("Invalid value for num_devices: %u\n",
723  num_devices);
724  ret = -EINVAL;
725  goto out;
726  }
727 
728  zram_major = register_blkdev(0, "zram");
729  if (zram_major <= 0) {
730  pr_warn("Unable to get major number\n");
731  ret = -EBUSY;
732  goto out;
733  }
734 
735  if (!num_devices) {
736  pr_info("num_devices not specified. Using default: 1\n");
737  num_devices = 1;
738  }
739 
740  /* Allocate the device array and initialize each one */
741  pr_info("Creating %u devices ...\n", num_devices);
742  zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
743  if (!zram_devices) {
744  ret = -ENOMEM;
745  goto unregister;
746  }
747 
748  for (dev_id = 0; dev_id < num_devices; dev_id++) {
749  ret = create_device(&zram_devices[dev_id], dev_id);
750  if (ret)
751  goto free_devices;
752  }
753 
754  return 0;
755 
756 free_devices:
757  while (dev_id)
758  destroy_device(&zram_devices[--dev_id]);
759  kfree(zram_devices);
760 unregister:
761  unregister_blkdev(zram_major, "zram");
762 out:
763  return ret;
764 }
765 
766 static void __exit zram_exit(void)
767 {
768  int i;
769  struct zram *zram;
770 
771  for (i = 0; i < num_devices; i++) {
772  zram = &zram_devices[i];
773 
774  destroy_device(zram);
775  if (zram->init_done)
776  zram_reset_device(zram);
777  }
778 
779  unregister_blkdev(zram_major, "zram");
780 
781  kfree(zram_devices);
782  pr_debug("Cleanup done!\n");
783 }
784 
785 module_param(num_devices, uint, 0);
786 MODULE_PARM_DESC(num_devices, "Number of zram devices");
787 
788 module_init(zram_init);
789 module_exit(zram_exit);
790 
791 MODULE_LICENSE("Dual BSD/GPL");
792 MODULE_AUTHOR("Nitin Gupta <[email protected]>");
793 MODULE_DESCRIPTION("Compressed RAM Block Device");