Commit 49fd66ad authored by Kirill Smelkov's avatar Kirill Smelkov

bigfile: Fix typos

parent d9707446
...@@ -50,7 +50,7 @@ static PyObject *pybuf_str; ...@@ -50,7 +50,7 @@ static PyObject *pybuf_str;
/* whether to pass old buffer instead of memoryview to .loadblk() / .storeblk() /* whether to pass old buffer instead of memoryview to .loadblk() / .storeblk()
* *
* on python2 < 2.7.10 memoreview object is not accepted in a lot of * on python2 < 2.7.10 memoryview object is not accepted in a lot of
* places, see e.g. http://bugs.python.org/issue22113 for struct.pack_into() * places, see e.g. http://bugs.python.org/issue22113 for struct.pack_into()
* *
* also on python 2.7.10, even latest numpy does not accept memoryview as * also on python 2.7.10, even latest numpy does not accept memoryview as
...@@ -171,7 +171,7 @@ void XPyObject_PrintReferrers(PyObject *obj, FILE *fp); ...@@ -171,7 +171,7 @@ void XPyObject_PrintReferrers(PyObject *obj, FILE *fp);
static int XPyFrame_IsCalleeOf(PyFrameObject *f, PyFrameObject *top); static int XPyFrame_IsCalleeOf(PyFrameObject *f, PyFrameObject *top);
/* buffer utilities: unpin buffer from its memory - make it zero-length /* buffer utilities: unpin buffer from its memory - make it zero-length
* pointing to NULL but staying a vailid python object */ * pointing to NULL but staying a valid python object */
#if PY_MAJOR_VERSION < 3 #if PY_MAJOR_VERSION < 3
void XPyBufferObject_Unpin(PyBufferObject *bufo); void XPyBufferObject_Unpin(PyBufferObject *bufo);
#endif #endif
...@@ -348,7 +348,7 @@ static /*const*/ PyMethodDef pyvma_methods[] = { ...@@ -348,7 +348,7 @@ static /*const*/ PyMethodDef pyvma_methods[] = {
{NULL} {NULL}
}; };
// XXX vvv better switch on various possibilities and find approptiate type // XXX vvv better switch on various possibilities and find appropriate type
// (e.g. on X32 uintptr_t will be 4 while long will be 8) // (e.g. on X32 uintptr_t will be 4 while long will be 8)
const int _ = const int _ =
BUILD_ASSERT_OR_ZERO(sizeof(uintptr_t) == sizeof(unsigned long)); BUILD_ASSERT_OR_ZERO(sizeof(uintptr_t) == sizeof(unsigned long));
...@@ -458,7 +458,7 @@ PyFunc(pyfileh_isdirty, "isdirty() - are there any changes to fileh memory at al ...@@ -458,7 +458,7 @@ PyFunc(pyfileh_isdirty, "isdirty() - are there any changes to fileh memory at al
if (!PyArg_ParseTuple(args, "")) if (!PyArg_ParseTuple(args, ""))
return NULL; return NULL;
/* NOTE not strictly neccessary to virt_lock() for checking ->dirty_pages not empty */ /* NOTE not strictly necessary to virt_lock() for checking ->dirty_pages not empty */
return PyBool_FromLong(!list_empty(&pyfileh->dirty_pages)); return PyBool_FromLong(!list_empty(&pyfileh->dirty_pages));
} }
...@@ -570,14 +570,14 @@ static int pybigfile_loadblk(BigFile *file, blk_t blk, void *buf) ...@@ -570,14 +570,14 @@ static int pybigfile_loadblk(BigFile *file, blk_t blk, void *buf)
* as the result - _we_ are the thread which holds the GIL and can call * as the result - _we_ are the thread which holds the GIL and can call
* python capi. */ * python capi. */
// XXX assert PyGILState_GetThisThreadState() != NULL // XXX assert PyGILState_GetThisThreadState() != NULL
// (i.e. pyton already knows this thread?) // (i.e. python already knows this thread?)
gstate = PyGILState_Ensure(); gstate = PyGILState_Ensure();
/* TODO write why we setup completly new thread state which looks like /* TODO write why we setup completely new thread state which looks like
* switching threads for python but stays at the same OS thread * switching threads for python but stays at the same OS thread
* *
* a) do not change current thread state in any way; * a) do not change current thread state in any way;
* b) to completly clear ts after loadblk (ex. for pybuf->refcnf to go to exactly 1) * b) to completely clear ts after loadblk (ex. for pybuf->refcnf to go to exactly 1)
*/ */
/* in python thread state - save what we'll possibly override /* in python thread state - save what we'll possibly override
...@@ -690,13 +690,13 @@ out: ...@@ -690,13 +690,13 @@ out:
* come here with gc.collecting=1 * come here with gc.collecting=1
* *
* NOTE also: while collecting garbage even more garbage can be * NOTE also: while collecting garbage even more garbage can be
* created due to arbitrary code run from undel __del__ of released * created due to arbitrary code run from under __del__ of released
* objects and weakref callbacks. This way after here GC collect * objects and weakref callbacks. This way after here GC collect
* even a single allocation could trigger GC, and thus arbitrary * even a single allocation could trigger GC, and thus arbitrary
* python code run, again */ * python code run, again */
PyGC_Collect(); PyGC_Collect();
/* garbage collection could result in running arbitraty code /* garbage collection could result in running arbitrary code
* because of finalizers. Print problems (if any) and make sure * because of finalizers. Print problems (if any) and make sure
* once again exception state is clear */ * once again exception state is clear */
if (PyErr_Occurred()) if (PyErr_Occurred())
......
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Wendelin.bigfile | BigFile file backend # Wendelin.bigfile | BigFile file backend
# Copyright (C) 2014-2015 Nexedi SA and Contributors. # Copyright (C) 2014-2019 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com> # Kirill Smelkov <kirr@nexedi.com>
# #
# This program is free software: you can Use, Study, Modify and Redistribute # This program is free software: you can Use, Study, Modify and Redistribute
...@@ -25,7 +25,7 @@ from io import FileIO, SEEK_SET ...@@ -25,7 +25,7 @@ from io import FileIO, SEEK_SET
# XXX naming # XXX naming
class BigFile_File(BigFile): class BigFile_File(BigFile):
# .f - io.FileIo to file # .f - io.FileIO to file
def __new__(cls, path_or_fd, blksize): def __new__(cls, path_or_fd, blksize):
# XXX pass flags/mode as args to ctor ? # XXX pass flags/mode as args to ctor ?
...@@ -41,7 +41,7 @@ class BigFile_File(BigFile): ...@@ -41,7 +41,7 @@ class BigFile_File(BigFile):
f.seek(blk * blksize, SEEK_SET) f.seek(blk * blksize, SEEK_SET)
# XXX unfortunately buffer(buf, pos) creates readonly buffer, so we # XXX unfortunately buffer(buf, pos) creates readonly buffer, so we
# have to use memoryviews # have to use memoryview
# XXX not needed after BIGFILE_USE_OLD_BUFFER support is dropped # XXX not needed after BIGFILE_USE_OLD_BUFFER support is dropped
bufmem = memoryview(buf) bufmem = memoryview(buf)
......
/* Wendelin.bigfile | Low-level pagefault handler /* Wendelin.bigfile | Low-level pagefault handler
* Copyright (C) 2014-2015 Nexedi SA and Contributors. * Copyright (C) 2014-2019 Nexedi SA and Contributors.
* Kirill Smelkov <kirr@nexedi.com> * Kirill Smelkov <kirr@nexedi.com>
* *
* This program is free software: you can Use, Study, Modify and Redistribute * This program is free software: you can Use, Study, Modify and Redistribute
...@@ -89,7 +89,7 @@ static void on_pagefault(int sig, siginfo_t *si, void *_uc) ...@@ -89,7 +89,7 @@ static void on_pagefault(int sig, siginfo_t *si, void *_uc)
* block is allocated dynamically at runtime, we can overlap with such * block is allocated dynamically at runtime, we can overlap with such
* allocation only if SIGSEGV happens in that original TLS allocation, * allocation only if SIGSEGV happens in that original TLS allocation,
* which should not happen, and thus it is already a bug somewhere in * which should not happen, and thus it is already a bug somewhere in
* thread datatructures. */ * thread data structures. */
static __thread int in_on_pagefault; static __thread int in_on_pagefault;
BUG_ON(in_on_pagefault); BUG_ON(in_on_pagefault);
++in_on_pagefault; ++in_on_pagefault;
......
/* Wendelin.bigfile | Virtual memory /* Wendelin.bigfile | Virtual memory
* Copyright (C) 2014-2015 Nexedi SA and Contributors. * Copyright (C) 2014-2019 Nexedi SA and Contributors.
* Kirill Smelkov <kirr@nexedi.com> * Kirill Smelkov <kirr@nexedi.com>
* *
* This program is free software: you can Use, Study, Modify and Redistribute * This program is free software: you can Use, Study, Modify and Redistribute
...@@ -57,7 +57,7 @@ static int __ram_reclaim(RAM *ram); ...@@ -57,7 +57,7 @@ static int __ram_reclaim(RAM *ram);
/* global lock which protects manipulating virtmem data structures /* global lock which protects manipulating virtmem data structures
* *
* NOTE not scalable, but this is temporary solution - as we are going to move * NOTE not scalable, but this is temporary solution - as we are going to move
* memory managment back into the kernel, where it is done properly. */ * memory management back into the kernel, where it is done properly. */
static pthread_mutex_t virtmem_lock = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; static pthread_mutex_t virtmem_lock = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
static const VirtGilHooks *virtmem_gilhooks; static const VirtGilHooks *virtmem_gilhooks;
...@@ -711,7 +711,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write) ...@@ -711,7 +711,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
/* (5b) page is currently being loaded by another thread - wait for load to complete /* (5b) page is currently being loaded by another thread - wait for load to complete
* *
* NOTE a page is protected from being concurently loaded by two threads at * NOTE a page is protected from being concurrently loaded by two threads at
* the same time via: * the same time via:
* *
* - virtmem lock - we get/put pages from fileh->pagemap only under it * - virtmem lock - we get/put pages from fileh->pagemap only under it
...@@ -726,7 +726,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write) ...@@ -726,7 +726,7 @@ VMFaultResult vma_on_pagefault(VMA *vma, uintptr_t addr, int write)
void *gilstate; void *gilstate;
virt_unlock(); virt_unlock();
gilstate = virt_gil_ensure_unlocked(); gilstate = virt_gil_ensure_unlocked();
usleep(10000); // XXX with 1000 uslepp still busywaits usleep(10000); // XXX with 1000 usleep still busywaits
virt_gil_retake_if_waslocked(gilstate); virt_gil_retake_if_waslocked(gilstate);
virt_lock(); virt_lock();
return VM_RETRY; return VM_RETRY;
...@@ -861,7 +861,7 @@ void page_decref(Page *page) ...@@ -861,7 +861,7 @@ void page_decref(Page *page)
void *page_mmap(Page *page, void *addr, int prot) void *page_mmap(Page *page, void *addr, int prot)
{ {
RAMH *ramh = page->ramh; RAMH *ramh = page->ramh;
// XXX better call ramh_mmap_page() without tinkering wih ramh_ops? // XXX better call ramh_mmap_page() without tinkering with ramh_ops?
return ramh->ramh_ops->mmap_page(ramh, page->ramh_pgoffset, addr, prot); return ramh->ramh_ops->mmap_page(ramh, page->ramh_pgoffset, addr, prot);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment