Commit d5c05f78 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 164ae5a1
...@@ -1159,7 +1159,7 @@ int mmapfile_storeblk(BigFile *file, blk_t blk, const void *buf) { ...@@ -1159,7 +1159,7 @@ int mmapfile_storeblk(BigFile *file, blk_t blk, const void *buf) {
return 0; return 0;
} }
void *mmapfile_mmap_setup_read(BigFile *file, blk_t blk, size_t blklen, VMA *vma) { void *mmapfile_mmap_setup_read(VMA *vma, BigFile *file, blk_t blk, size_t blklen) {
BigFileMMap *f = upcast(BigFileMMap*, file); BigFileMMap *f = upcast(BigFileMMap*, file);
void *addr; void *addr;
...@@ -1170,7 +1170,7 @@ void *mmapfile_mmap_setup_read(BigFile *file, blk_t blk, size_t blklen, VMA *vma ...@@ -1170,7 +1170,7 @@ void *mmapfile_mmap_setup_read(BigFile *file, blk_t blk, size_t blklen, VMA *vma
return addr; return addr;
} }
int mmapfile_remmap_blk_read(BigFile *file, blk_t blk, VMA *vma) { int mmapfile_remmap_blk_read(VMA *vma, BigFile *file, blk_t blk) {
BigFileMMap *f = upcast(BigFileMMap*, file); BigFileMMap *f = upcast(BigFileMMap*, file);
TODO (f->blksize != vma->fileh->ramh->ram->pagesize); TODO (f->blksize != vma->fileh->ramh->ram->pagesize);
ASSERT(vma->f_pgoffset <= blk && blk < vma_addr_fpgoffset(vma, vma->addr_stop)); ASSERT(vma->f_pgoffset <= blk && blk < vma_addr_fpgoffset(vma, vma->addr_stop));
......
...@@ -265,7 +265,7 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen) ...@@ -265,7 +265,7 @@ int fileh_mmap(VMA *vma, BigFileH *fileh, pgoff_t pgoffset, pgoff_t pglen)
if (fileh->mmap_overlay) { if (fileh->mmap_overlay) {
/* wcfs: mmap(base, READ) */ /* wcfs: mmap(base, READ) */
TODO (file->blksize != fileh->ramh->ram->pagesize); TODO (file->blksize != fileh->ramh->ram->pagesize);
addr = fops->mmap_setup_read(file, pgoffset, pglen, vma); addr = fops->mmap_setup_read(vma, file, pgoffset, pglen);
} else { } else {
/* !wcfs: allocate address space somewhere */ /* !wcfs: allocate address space somewhere */
addr = mem_valloc(NULL, len); addr = mem_valloc(NULL, len);
...@@ -1125,7 +1125,7 @@ static void vma_page_ensure_unmapped(VMA *vma, Page *page) ...@@ -1125,7 +1125,7 @@ static void vma_page_ensure_unmapped(VMA *vma, Page *page)
int err; int err;
TODO (file->blksize != page_size(page)); TODO (file->blksize != page_size(page));
err = file->file_ops->remmap_blk_read(file, /* blk = */page->f_pgoffset, vma); err = file->file_ops->remmap_blk_read(vma, file, /* blk = */page->f_pgoffset);
BUG_ON(err); /* must not fail */ BUG_ON(err); /* must not fail */
} }
else { else {
......
...@@ -43,7 +43,7 @@ extern "C" { ...@@ -43,7 +43,7 @@ extern "C" {
/* BigFile base class /* BigFile base class
* *
* BigFile is a file of fixed size blocks. It knows how to load/store blocks * BigFile is a file of fixed size blocks. It knows how to load/store blocks
* to/from memory. Nothing else. * to/from memory. Nothing else. XXX also mmap?
* *
* Concrete file implementations subclass BigFile and define their file_ops. * Concrete file implementations subclass BigFile and define their file_ops.
*/ */
...@@ -76,8 +76,8 @@ struct bigfile_ops { ...@@ -76,8 +76,8 @@ struct bigfile_ops {
int (*storeblk) (BigFile *file, blk_t blk, const void *buf); int (*storeblk) (BigFile *file, blk_t blk, const void *buf);
// - mmap_setup_read(file[blk +blklen), vma) setup initial read-only mmap to serve vma // - mmap_setup_read(vma, file[blk +blklen)) setup initial read-only mmap to serve vma
// - remmap_blk_read(file[blk], vma) remmap blk into vma again, after e.g. // - remmap_blk_read(vma, file[blk]) remmap blk into vma again, after e.g.
// RW dirty page was discarded // RW dirty page was discarded
// - munmap(vma) before VMA is unmapped // - munmap(vma) before VMA is unmapped
...@@ -87,14 +87,14 @@ struct bigfile_ops { ...@@ -87,14 +87,14 @@ struct bigfile_ops {
* *
* After setup bigfile backend manages the mapping and can change it dynamically * After setup bigfile backend manages the mapping and can change it dynamically
* e.g. due to changes to the file from outside. However before changing a page, * e.g. due to changes to the file from outside. However before changing a page,
* the backend must check if that page was already dirtied by virtmeme and if * the backend must check if that page was already dirtied by virtmem and if
* so don't change that page until virtmem calls .remmap_blk_read. * so don't change that page until virtmem calls .remmap_blk_read.
* *
* The checking has to be done with virtmem lock held. A sketch of mapping * The checking has to be done with virtmem lock held. A sketch of mapping
* update sequence is as below: * update sequence is as below:
* *
* # backend detects that block is changed from outside * // backend detects that block is changed from outside
* # fileh is vma->fileh - file handle with which the vma is associated * // fileh is vma->fileh - file handle with which the vma is associated
* virt_lock() * virt_lock()
* if (!fileh_blk_isdirty(fileh, blk)) { * if (!fileh_blk_isdirty(fileh, blk)) {
* // update mappings for all fileh's vma that cover blk * // update mappings for all fileh's vma that cover blk
...@@ -109,7 +109,7 @@ struct bigfile_ops { ...@@ -109,7 +109,7 @@ struct bigfile_ops {
* @addr NULL - mmap at anywhere, !NULL - mmap exactly at addr. * @addr NULL - mmap at anywhere, !NULL - mmap exactly at addr.
* @return !NULL - mapped there, NULL - error. * @return !NULL - mapped there, NULL - error.
*/ */
void* (*mmap_setup_read) (BigFile *file, blk_t blk, size_t blklen, VMA *vma); void* (*mmap_setup_read) (VMA *vma, BigFile *file, blk_t blk, size_t blklen);
// remmap_blk_read is called to remmap a block into vma again, after e.g. // remmap_blk_read is called to remmap a block into vma again, after e.g.
...@@ -118,7 +118,7 @@ struct bigfile_ops { ...@@ -118,7 +118,7 @@ struct bigfile_ops {
// XXX called under virtmem lock? // XXX called under virtmem lock?
// //
// XXX error -> bug (must not fail) // XXX error -> bug (must not fail)
int (*remmap_blk_read) (BigFile *file, blk_t, VMA *vma); int (*remmap_blk_read) (VMA *vma, BigFile *file, blk_t blk);
/* munmap is called when vma set up via mmap_setup_read is going to be unmapped. /* munmap is called when vma set up via mmap_setup_read is going to be unmapped.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment