Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
W
wendelin.core
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
Kirill Smelkov
wendelin.core
Commits
0d10ef28
Commit
0d10ef28
authored
Jul 10, 2019
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
fbd5b279
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
35 additions
and
5 deletions
+35
-5
bigfile/virtmem.c
bigfile/virtmem.c
+35
-5
No files found.
bigfile/virtmem.c
View file @
0d10ef28
...
@@ -41,6 +41,7 @@ static size_t page_size(const Page *page);
...
@@ -41,6 +41,7 @@ static size_t page_size(const Page *page);
static
void
page_drop_memory
(
Page
*
page
);
static
void
page_drop_memory
(
Page
*
page
);
static
void
*
vma_page_addr
(
VMA
*
vma
,
Page
*
page
);
static
void
*
vma_page_addr
(
VMA
*
vma
,
Page
*
page
);
static
pgoff_t
vma_addr_fpgoffset
(
VMA
*
vma
,
uintptr_t
addr
);
static
pgoff_t
vma_addr_fpgoffset
(
VMA
*
vma
,
uintptr_t
addr
);
static
void
vma_mmap_page
(
VMA
*
vma
,
Page
*
page
);
static
int
vma_page_ismapped
(
VMA
*
vma
,
Page
*
page
);
static
int
vma_page_ismapped
(
VMA
*
vma
,
Page
*
page
);
static
void
vma_page_ensure_unmapped
(
VMA
*
vma
,
Page
*
page
);
static
void
vma_page_ensure_unmapped
(
VMA
*
vma
,
Page
*
page
);
static
void
vma_page_ensure_notmappedrw
(
VMA
*
vma
,
Page
*
page
);
static
void
vma_page_ensure_notmappedrw
(
VMA
*
vma
,
Page
*
page
);
...
@@ -270,11 +271,8 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen)
...
@@ -270,11 +271,8 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen)
if
(
!
(
pgoffset
<=
page
->
f_pgoffset
&&
page
->
f_pgoffset
<
pgoffset
+
pglen
))
if
(
!
(
pgoffset
<=
page
->
f_pgoffset
&&
page
->
f_pgoffset
<
pgoffset
+
pglen
))
continue
;
/* page is out of requested mmap coverage */
continue
;
/* page is out of requested mmap coverage */
// XXX err
// XXX notify watcher that we mmap RAM page in its range?
// XXX notify watcher that we mmaped RAM page in its range?
vma_mmap_page
(
vma
,
page
);
page_mmap
(
page
,
vma_page_addr
(
vma
,
page
),
PROT_READ
|
PROT_WRITE
);
bitmap_set_bit
(
vma
->
page_ismappedv
,
page
->
f_pgoffset
-
vma
->
f_pgoffset
);
page_incref
(
page
);
}
}
}
}
...
@@ -818,6 +816,8 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
...
@@ -818,6 +816,8 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
// XXX overlay: assert !vma->page_ismappedv[blk] XXX not ok? (retrying after virt unlock/lock)
// XXX overlay: assert !vma->page_ismappedv[blk] XXX not ok? (retrying after virt unlock/lock)
// XXX mmap page to all vma with .mmap_overlay=1 of this fileh.
// XXX mmap page to all vma with .mmap_overlay=1 of this fileh.
vma_mmap_page
(
vma
,
page
);
#if 0
if (!bitmap_test_bit(vma->page_ismappedv, page->f_pgoffset - vma->f_pgoffset)) {
if (!bitmap_test_bit(vma->page_ismappedv, page->f_pgoffset - vma->f_pgoffset)) {
// XXX err
// XXX err
page_mmap(page, vma_page_addr(vma, page), prot);
page_mmap(page, vma_page_addr(vma, page), prot);
...
@@ -828,8 +828,10 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
...
@@ -828,8 +828,10 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
/* just changing protection bits should not fail, if parameters ok */
/* just changing protection bits should not fail, if parameters ok */
xmprotect(vma_page_addr(vma, page), page_size(page), prot);
xmprotect(vma_page_addr(vma, page), page_size(page), prot);
}
}
#endif
// XXX also call page->markdirty() ?
// XXX also call page->markdirty() ?
// XXX move ^^^ before vma_mmap_page
if
(
newstate
==
PAGE_DIRTY
&&
newstate
!=
page
->
state
)
{
if
(
newstate
==
PAGE_DIRTY
&&
newstate
!=
page
->
state
)
{
/* it is not allowed to modify pages while writeout is in progress */
/* it is not allowed to modify pages while writeout is in progress */
BUG_ON
(
fileh
->
writeout_inprogress
);
BUG_ON
(
fileh
->
writeout_inprogress
);
...
@@ -993,6 +995,34 @@ static pgoff_t vma_addr_fpgoffset(VMA *vma, uintptr_t addr)
...
@@ -993,6 +995,34 @@ static pgoff_t vma_addr_fpgoffset(VMA *vma, uintptr_t addr)
}
}
/* vma_mmap_page mmaps page into vma.
*
* the page must belong to covered file.
* mmap protection is PROT_READ if page is PAGE_LOADED or PROT_READ|PROT_WRITE
* if page is PAGE_DIRTY.
*
* must be called under virtmem lock.
*/
static
void
vma_mmap_page
(
VMA
*
vma
,
Page
*
page
)
{
pgoff_t
pgoff_invma
;
int
prot
=
(
page
->
state
==
PAGE_DIRTY
?
PROT_READ
|
PROT_WRITE
:
PROT_READ
);
ASSERT
(
page
->
state
==
PAGE_LOADED
||
page
->
state
==
PAGE_DIRTY
);
ASSERT
(
vma
->
f_pgoffset
<=
page
->
f_pgoffset
&&
page
->
f_pgoffset
<
vma_addr_fpgoffset
(
vma
,
vma
->
addr_stop
));
pgoff_invma
=
page
->
f_pgoffset
-
vma
->
f_pgoffset
;
if
(
!
bitmap_test_bit
(
vma
->
page_ismappedv
,
pgoff_invma
))
{
// XXX err
page_mmap
(
page
,
vma_page_addr
(
vma
,
page
),
prot
);
bitmap_set_bit
(
vma
->
page_ismappedv
,
pgoff_invma
);
page_incref
(
page
);
}
else
{
/* just changing protection bits should not fail, if parameters ok */
xmprotect
(
vma_page_addr
(
vma
,
page
),
page_size
(
page
),
prot
);
}
}
/* is `page` mapped to `vma` */
/* is `page` mapped to `vma` */
static
int
vma_page_ismapped
(
VMA
*
vma
,
Page
*
page
)
static
int
vma_page_ismapped
(
VMA
*
vma
,
Page
*
page
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment