22 #define ORE_DBGMSG2 ORE_DBGMSG
69 static int _sp2d_alloc(
unsigned pages_in_unit,
unsigned group_width,
74 struct _alloc_all_bytes {
75 struct __alloc_stripe_pages_2d {
77 struct __1_page_stripe _1p_stripes[pages_in_unit];
79 struct __alloc_1p_arrays {
81 struct page *scribble[group_width];
82 char page_is_read[data_devs];
83 } __a1pa[pages_in_unit];
85 struct __alloc_1p_arrays *__a1pa;
86 struct __alloc_1p_arrays *__a1pa_end;
87 const unsigned sizeof__a1pa =
sizeof(_aab->__a1pa[0]);
88 unsigned num_a1pa, alloc_size,
i;
95 num_a1pa = (
PAGE_SIZE -
sizeof(_aab->__asp2d)) / sizeof__a1pa;
96 alloc_size =
sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa;
98 num_a1pa = pages_in_unit;
99 alloc_size =
sizeof(*_aab);
104 ORE_DBGMSG(
"!! Failed to alloc sp2d size=%d\n", alloc_size);
108 sp2d = &_aab->__asp2d.sp2d;
111 __a1pa = _aab->__a1pa;
112 __a1pa_end = __a1pa + num_a1pa;
114 for (i = 0; i < pages_in_unit; ++
i) {
115 if (
unlikely(__a1pa >= __a1pa_end)) {
119 __a1pa = kzalloc(num_a1pa * sizeof__a1pa,
GFP_KERNEL);
121 ORE_DBGMSG(
"!! Failed to _alloc_1p_arrays=%d\n",
125 __a1pa_end = __a1pa + num_a1pa;
132 sp2d->
_1p_stripes[
i].page_is_read = __a1pa->page_is_read;
146 unsigned group_width = data_devs + sp2d->
parity;
152 for (c = data_devs - 1; c >= 0; --
c)
156 if (_1ps->page_is_read[c]) {
160 _1ps->page_is_read[
c] =
false;
167 memset(_1ps->pages, 0, group_width *
sizeof(*_1ps->pages));
168 _1ps->write_count = 0;
197 if (_1ps->write_count)
211 if (_1ps->write_count)
224 if (!_1ps->write_count)
227 init_async_submit(&_1ps->submit,
245 async_tx_issue_pending(_1ps->tx);
252 struct __1_page_stripe *_1ps;
269 ORE_DBGMSG(
"dev=%d cur_len=0x%x not_last=%d cur_sg=%d "
270 "offset=0x%llx length=0x%x last_sgs_total=0x%x\n",
271 per_dev->dev, cur_len, not_last, per_dev->cur_sg,
272 _LLU(per_dev->offset), per_dev->length,
273 per_dev->last_sgs_total);
275 if (!per_dev->cur_sg) {
276 sge = per_dev->sglist;
279 if (per_dev->length) {
281 sge->
offset = per_dev->offset;
282 sge->
len = per_dev->length;
289 per_dev->offset += cur_len;
294 sge = &per_dev->sglist[per_dev->cur_sg - 1];
295 sge->
len = per_dev->length - per_dev->last_sgs_total;
305 per_dev->last_sgs_total = per_dev->length;
307 }
else if (!sge->
len) {
313 static int _alloc_read_4_write(
struct ore_io_state *ios)
320 unsigned sgs_per_dev = ios->
sp2d->pages_in_unit + 2;
335 struct ore_per_dev_state *per_dev;
337 unsigned first_dev = si->
dev - (si->
dev %
339 unsigned comp = si->
dev - first_dev;
343 int ret = _alloc_read_4_write(ios);
352 per_dev = &read_ios->
per_dev[comp];
353 if (!per_dev->length) {
355 ios->
sp2d->pages_in_unit);
357 ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
358 ios->
sp2d->pages_in_unit);
362 per_dev->dev = si->
dev;
363 }
else if (si->
obj_offset != (per_dev->offset + per_dev->length)) {
364 u64 gap = si->
obj_offset - (per_dev->offset + per_dev->length);
368 q = osd_request_queue(ore_comp_dev(read_ios->
oc, per_dev->dev));
371 if (
unlikely(added_len != pg_len)) {
372 ORE_DBGMSG(
"Failed to bio_add_pc_page bi_vcnt=%d\n",
373 per_dev->bio->bi_vcnt);
377 per_dev->length += pg_len;
382 static int _add_to_r4w_first_page(
struct ore_io_state *ios,
struct page *page)
392 ORE_DBGMSG(
"offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
395 return _add_to_r4w(ios, &si, page, pg_len);
403 unsigned pg_len,
p,
c;
408 c = _dev_order(ios->
layout->group_width * ios->
layout->mirrors_p1,
410 page = ios->
sp2d->_1p_stripes[
p].pages[
c];
415 ORE_DBGMSG(
"p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
420 return _add_to_r4w(ios, &si, page, pg_len);
423 static void _mark_read4write_pages_uptodate(
struct ore_io_state *ios,
int ret)
429 for (d = 0; d < ios->
numdevs; d++) {
430 struct bio *bio = ios->
per_dev[
d].bio;
435 __bio_for_each_segment(bv, bio, i, 0) {
436 struct page *page = bv->bv_page;
438 SetPageUptodate(page);
440 ClearPageError(page);
464 static int _read_4_write_first_stripe(
struct ore_io_state *ios)
468 u64 offset = ios->
si.first_stripe_start;
471 if (offset == ios->
offset)
472 goto read_last_stripe;
474 min_p = _sp2d_min_pg(sp2d);
475 max_p = _sp2d_max_pg(sp2d);
477 ORE_DBGMSG(
"stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
478 offset, ios->
offset, min_p, max_p);
484 for (p = min_p; p <= max_p; p++) {
486 struct page **
pp = &_1ps->pages[
c];
490 if (ios->
offset % PAGE_SIZE)
492 _add_to_r4w_first_page(ios, *pp);
494 goto read_last_stripe;
497 *pp = ios->
r4w->get_page(ios->
private, offset,
503 _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
506 _1ps->page_is_read[
c] =
true;
517 static int _read_4_write_last_stripe(
struct ore_io_state *ios)
523 unsigned bytes_in_stripe = ios->
si.bytes_in_stripe;
527 if (offset % PAGE_SIZE)
528 _add_to_r4w_last_page(ios, &offset);
531 last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
533 if (offset == last_stripe_end)
538 c = _dev_order(ios->
layout->group_width * ios->
layout->mirrors_p1,
539 ios->
layout->mirrors_p1, read_si.par_dev, read_si.dev);
543 min_p = _sp2d_min_pg(sp2d);
544 max_p = _sp2d_max_pg(sp2d);
547 ORE_DBGMSG(
"offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
548 offset, last_stripe_end, min_p, max_p);
550 while (offset < last_stripe_end) {
553 if ((min_p <= p) && (p <= max_p)) {
558 page = ios->
r4w->get_page(ios->
private, offset,
563 _1ps->pages[
c] =
page;
565 _1ps->page_is_read[
c] =
true;
567 _add_to_r4w(ios, &read_si, page, PAGE_SIZE);
585 static int _read_4_write_execute(
struct ore_io_state *ios)
601 for (i = 0; i < ios_read->
numdevs; i += ios_read->
layout->mirrors_p1) {
613 _mark_read4write_pages_uptodate(ios_read, ret);
622 struct ore_per_dev_state *per_dev,
627 ORE_DBGMSG(
"cur_sg(%d) >= sgs_per_dev(%d)\n" ,
636 unsigned array_start = 0;
640 si->
cur_pg = _sp2d_min_pg(sp2d);
641 num_pages = _sp2d_max_pg(sp2d) + 1 - si->
cur_pg;
646 if (!per_dev->length) {
651 _read_4_write_first_stripe(ios);
654 _read_4_write_last_stripe(ios);
655 _read_4_write_execute(ios);
669 per_dev, num_pages * PAGE_SIZE);
686 if (_sp2d_alloc(pages_in_unit, layout->
group_width,
709 _sp2d_free(ios->
sp2d);