4 #include <linux/kernel.h>
5 #include <linux/module.h>
52 q->unprep_rq_fn = ufn;
74 q->merge_bvec_fn = mbfn;
80 q->softirq_done_fn =
fn;
86 q->rq_timeout = timeout;
92 q->rq_timed_out_fn =
fn;
111 lim->max_segments = BLK_MAX_SEGMENTS;
112 lim->max_integrity_segments = 0;
113 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
114 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
115 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
116 lim->max_write_same_sectors = 0;
117 lim->max_discard_sectors = 0;
118 lim->discard_granularity = 0;
119 lim->discard_alignment = 0;
120 lim->discard_misaligned = 0;
121 lim->discard_zeroes_data = 0;
122 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
124 lim->alignment_offset = 0;
144 lim->discard_zeroes_data = 1;
148 lim->max_write_same_sectors =
UINT_MAX;
179 q->nr_requests = BLKDEV_MAX_RQ;
181 q->make_request_fn = mfn;
212 #if BITS_PER_LONG == 64
218 if (b_pfn < (
min_t(
u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
224 q->limits.bounce_pfn = b_pfn;
229 q->limits.bounce_pfn = b_pfn;
255 __func__, max_hw_sectors);
258 limits->max_hw_sectors = max_hw_sectors;
259 limits->max_sectors =
min_t(
unsigned int, max_hw_sectors,
260 BLK_DEF_MAX_SECTORS);
284 unsigned int max_discard_sectors)
286 q->limits.max_discard_sectors = max_discard_sectors;
296 unsigned int max_write_same_sectors)
298 q->limits.max_write_same_sectors = max_write_same_sectors;
316 __func__, max_segments);
319 q->limits.max_segments = max_segments;
340 q->limits.max_segment_size =
max_size;
356 q->limits.logical_block_size =
size;
358 if (q->limits.physical_block_size < size)
359 q->limits.physical_block_size =
size;
361 if (q->limits.io_min < q->limits.physical_block_size)
362 q->limits.io_min = q->limits.physical_block_size;
378 q->limits.physical_block_size =
size;
380 if (q->limits.physical_block_size < q->limits.logical_block_size)
381 q->limits.physical_block_size = q->limits.logical_block_size;
383 if (q->limits.io_min < q->limits.physical_block_size)
384 q->limits.io_min = q->limits.physical_block_size;
401 q->limits.alignment_offset =
402 offset & (q->limits.physical_block_size - 1);
403 q->limits.misaligned = 0;
420 limits->io_min =
min;
422 if (limits->io_min < limits->logical_block_size)
423 limits->io_min = limits->logical_block_size;
425 if (limits->io_min < limits->physical_block_size)
426 limits->io_min = limits->physical_block_size;
465 limits->io_opt =
opt;
525 t->max_sectors =
min_not_zero(t->max_sectors, b->max_sectors);
526 t->max_hw_sectors =
min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
527 t->max_write_same_sectors =
min(t->max_write_same_sectors,
528 b->max_write_same_sectors);
529 t->bounce_pfn =
min_not_zero(t->bounce_pfn, b->bounce_pfn);
531 t->seg_boundary_mask =
min_not_zero(t->seg_boundary_mask,
532 b->seg_boundary_mask);
534 t->max_segments =
min_not_zero(t->max_segments, b->max_segments);
535 t->max_integrity_segments =
min_not_zero(t->max_integrity_segments,
536 b->max_integrity_segments);
539 b->max_segment_size);
541 t->misaligned |= b->misaligned;
543 alignment = queue_limit_alignment_offset(b, start);
548 if (t->alignment_offset != alignment) {
550 top =
max(t->physical_block_size, t->io_min)
551 + t->alignment_offset;
552 bottom =
max(b->physical_block_size, b->io_min) +
alignment;
555 if (
max(top, bottom) & (
min(top, bottom) - 1)) {
561 t->logical_block_size =
max(t->logical_block_size,
562 b->logical_block_size);
564 t->physical_block_size =
max(t->physical_block_size,
565 b->physical_block_size);
567 t->io_min =
max(t->io_min, b->io_min);
568 t->io_opt =
lcm(t->io_opt, b->io_opt);
570 t->cluster &= b->cluster;
571 t->discard_zeroes_data &= b->discard_zeroes_data;
574 if (t->physical_block_size & (t->logical_block_size - 1)) {
575 t->physical_block_size = t->logical_block_size;
581 if (t->io_min & (t->physical_block_size - 1)) {
582 t->io_min = t->physical_block_size;
588 if (t->io_opt & (t->physical_block_size - 1)) {
595 t->alignment_offset =
lcm(t->alignment_offset, alignment)
596 & (
max(t->physical_block_size, t->io_min) - 1);
599 if (t->alignment_offset & (t->logical_block_size - 1)) {
605 if (b->discard_granularity) {
606 alignment = queue_limit_discard_alignment(b, start);
608 if (t->discard_granularity != 0 &&
609 t->discard_alignment != alignment) {
610 top = t->discard_granularity + t->discard_alignment;
611 bottom = b->discard_granularity +
alignment;
614 if (
max(top, bottom) & (
min(top, bottom) - 1))
615 t->discard_misaligned = 1;
618 t->max_discard_sectors =
min_not_zero(t->max_discard_sectors,
619 b->max_discard_sectors);
620 t->discard_granularity =
max(t->discard_granularity,
621 b->discard_granularity);
622 t->discard_alignment =
lcm(t->discard_alignment, alignment) &
623 (t->discard_granularity - 1);
646 start += get_start_sect(bdev);
691 q->dma_pad_mask =
mask;
707 if (mask > q->dma_pad_mask)
708 q->dma_pad_mask =
mask;
734 dma_drain_needed_fn *dma_drain_needed,
737 if (queue_max_segments(q) < 2)
741 q->dma_drain_needed = dma_drain_needed;
742 q->dma_drain_buffer =
buf;
743 q->dma_drain_size =
size;
762 q->limits.seg_boundary_mask =
mask;
778 q->dma_alignment =
mask;
800 if (mask > q->dma_alignment)
801 q->dma_alignment =
mask;
821 q->flush_flags = flush & (REQ_FLUSH |
REQ_FUA);
827 q->flush_not_queueable = !queueable;
831 static int __init blk_settings_init(
void)