Commit 5a25f4ae authored by Kirill Smelkov's avatar Kirill Smelkov

X minor pagefault time turned out to be huge

parent 494c2ccf
/* This program benchmarks pagefault time.
*
* Unfortunately as of 2017-Mar-20 for data in pagecache the situation is as
* follows (i7-6600U, Linux 4.9.13):
*
* 1. minor pagefault: ~ 1200ns
* (this program)
*
* 2. read syscall + whole page copy: ~ 215ns
* (https://github.com/golang/go/issues/19563#issuecomment-287423654)
*
* 3. it is not possible to mmap(MAP_POPULATE | MAP_NONBLOCK) (i.e. prefault
* those PTE that are already in pagecache).
* ( http://www.spinics.net/lists/linux-man/msg11420.html,
* https://git.kernel.org/linus/54cb8821de07f2ffcd28c380ce9b93d5784b40d7 )
*
* 4. (Q) I'm not sure a mechanism exists in the kernel to automatically
* subscribe a VMA so that when a page becomes pagecached, associated PTE is
* adjusted so that programs won't need to pay minor pagefault time on
* access.
*
* unless 3 and 4 are solved mmap unfortunately seems to be slower choice
* compared to just pread.
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/stat.h>
......@@ -47,6 +71,7 @@ int main() {
abort();
}
#if 1
// make sure RAM is actually allocated
Tstart = microtime();
err = fallocate(fd, /*mode*/0, 0, size);
......@@ -56,9 +81,12 @@ int main() {
abort();
}
printf("T(fallocate):\t%.1f\t%6.1f ns / page\n", Tend - Tstart, (Tend - Tstart) * 1E9 / NITER);
#endif
Tstart = microtime();
addr = mmap(NULL, size, PROT_READ, MAP_SHARED | MAP_POPULATE, fd, 0);
addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
//addr = mmap(NULL, size, PROT_READ, MAP_SHARED | MAP_POPULATE, fd, 0);
//addr = mmap(NULL, size, PROT_READ, MAP_SHARED | MAP_POPULATE | MAP_NONBLOCK, fd, 0);
if (addr == MAP_FAILED) {
perror("mmap");
abort();
......@@ -67,6 +95,7 @@ int main() {
printf("T(mmap):\t%.1f\t%6.1f ns / page\n", Tend - Tstart, (Tend - Tstart) * 1E9 / NITER);
Tstart = microtime();
//for (int j=0; j < 100; j++)
for (i=0; i<NITER; i++) {
sum += addr[i*PAGE_SIZE];
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment