diff --git a/src/nvme/rq.c b/src/nvme/rq.c index 261abca8..8ddf5ef1 100644 --- a/src/nvme/rq.c +++ b/src/nvme/rq.c @@ -46,20 +46,20 @@ static inline int __map_first(leint64_t *prp1, leint64_t *prplist, uint64_t iova int max_prps = 1 << (pageshift - 3); /* number of prps required to map the buffer */ - int prpcount = (int)(len >> pageshift); + int prpcount = 1; *prp1 = cpu_to_le64(iova); - /* - * If prpcount is at least one and the iova is not aligned to the page - * size, we need one more prp than what we calculated above. - * Additionally, we align the iova down to a page size boundary, - * simplifying the following loop. - */ - if (prpcount && !ALIGNED(iova, pagesize)) { + /* account for what is covered with the first prp */ + len -= min_t(size_t, len, pagesize - (iova & (pagesize - 1))); + + /* any residual just adds more prps */ + if (len) + prpcount += ALIGN_UP(len, pagesize) >> pageshift; + + if (prpcount > 1 && !ALIGNED(iova, pagesize)) + /* align down to simplify loop below */ iova = ALIGN_DOWN(iova, pagesize); - prpcount++; - } if (prpcount > max_prps) { errno = EINVAL; diff --git a/src/nvme/rq_test.c b/src/nvme/rq_test.c index c417d7e1..a72d5255 100644 --- a/src/nvme/rq_test.c +++ b/src/nvme/rq_test.c @@ -33,7 +33,7 @@ int main(void) leint64_t *prplist; struct iovec iov[8]; - plan_tests(62); + plan_tests(89); assert(pgmap((void **)&prplist, __VFN_PAGESIZE) > 0); @@ -54,6 +54,13 @@ int main(void) ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); + /* test 4k + 8 aligned */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x1008); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); + /* test 8k aligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x2000); @@ -61,6 +68,15 @@ int main(void) ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); + /* test 8k + 16 aligned */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x2010); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); + ok1(le64_to_cpu(prplist[0]) == 0x1001000); + ok1(le64_to_cpu(prplist[1]) == 0x1002000); + /* test 12k aligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x3000); @@ -70,6 +86,15 @@ int main(void) ok1(le64_to_cpu(prplist[0]) == 0x1001000); ok1(le64_to_cpu(prplist[1]) == 0x1002000); + /* test 12k + 24 aligned */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000000, 0x3018); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000000); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); + ok1(le64_to_cpu(prplist[0]) == 0x1001000); + ok1(le64_to_cpu(prplist[1]) == 0x1002000); + /* test 512b unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x200); @@ -77,6 +102,13 @@ int main(void) ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x0); + /* test 512 unaligned (nasty) */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1001000 - 4, 0x200); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1001000 - 4); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); + /* test 4k unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1000); @@ -84,6 +116,22 @@ int main(void) ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); + /* test 4k + 8 unaligned */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1008); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x1001000); + + /* test 4k + 8 unaligned (nasty) */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1001000 - 4, 0x1008); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1001000 - 4); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); + ok1(le64_to_cpu(prplist[0]) == 0x1001000); + ok1(le64_to_cpu(prplist[1]) == 0x1002000); + /* test 4k - 4 unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x1000 - 4); @@ -100,6 +148,15 @@ int main(void) ok1(le64_to_cpu(prplist[0]) == 0x1001000); ok1(le64_to_cpu(prplist[1]) == 0x1002000); + /* test 8k + 16 unaligned */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x2010); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); + ok1(le64_to_cpu(prplist[0]) == 0x1001000); + ok1(le64_to_cpu(prplist[1]) == 0x1002000); + /* test 8k - 4 unaligned */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x2000 - 4); @@ -117,6 +174,16 @@ int main(void) ok1(le64_to_cpu(prplist[1]) == 0x1002000); ok1(le64_to_cpu(prplist[2]) == 0x1003000); + /* test 12k + 24 unaligned */ + memset((void *)prplist, 0x0, __VFN_PAGESIZE); + nvme_rq_map_prp(&ctrl, &rq, &cmd, 0x1000004, 0x3018); + + ok1(le64_to_cpu(cmd.dptr.prp1) == 0x1000004); + ok1(le64_to_cpu(cmd.dptr.prp2) == 0x8000000); + ok1(le64_to_cpu(prplist[0]) == 0x1001000); + ok1(le64_to_cpu(prplist[1]) == 0x1002000); + ok1(le64_to_cpu(prplist[2]) == 0x1003000); + /* test 512b aligned 1-iovec */ memset((void *)prplist, 0x0, __VFN_PAGESIZE); iov[0] = (struct iovec) {.iov_base = (void *)0x1000000, .iov_len = 0x200};