Commit 34ffb472 authored by Bradley C. Kuszmaul's avatar Bradley C. Kuszmaul Committed by Yoni Fogel

[t:4934] Get rid of more #4934 stuff from main. Refs #4934.

git-svn-id: file:///svn/toku/tokudb@44663 c7de825b-a66e-492c-adef-691d508d4ae1
parent c1afcd08
......@@ -87,7 +87,6 @@ if (CMAKE_C_COMPILER_ID MATCHES Intel)
11001
11006
11003 # do not complain if some file was compiled without -ipo
144 # silly icc 13 bug.
)
string(REGEX REPLACE ";" "," intel_warning_string "${intel_warnings}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -diag-disable ${intel_warning_string}")
......
......@@ -36,7 +36,6 @@ void toku_mempool_copy_construct(struct mempool *mp, const void * const data_sou
toku_mempool_construct(mp, data_size);
memcpy(mp->base, data_source, data_size);
mp->free_offset = data_size; // address of first available memory for new data
toku_memory_dontneed_after_but_i_touched(mp->base, mp->size, 0, data_size);
}
else {
toku_mempool_zero(mp);
......@@ -116,7 +115,6 @@ void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment) {
} else {
vp = (char *)mp->base + offset;
mp->free_offset = offset + size;
toku_memory_dontneed_after_but_i_touched(mp->base, mp->size, offset, size);
}
assert(mp->free_offset <= mp->size);
assert(((long)vp & (alignment-1)) == 0);
......@@ -147,5 +145,4 @@ void toku_mempool_clone(struct mempool* orig_mp, struct mempool* new_mp) {
new_mp->size = orig_mp->free_offset; // only make the cloned mempool store what is needed
new_mp->base = toku_xmalloc(new_mp->size);
memcpy(new_mp->base, orig_mp->base, new_mp->size);
toku_memory_dontneed_after_but_i_touched(new_mp->base, new_mp->size, 0, new_mp->size);
}
......@@ -6,14 +6,10 @@
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/*
Datatype mempool.
Overview: a memory pool is a contiguous region of memory that supports single
allocations from the pool. These allocated regions are never recycled.
/* a memory pool is a contiguous region of memory that supports single
allocations from the pool. these allocated regions are never recycled.
when the memory pool no longer has free space, the allocated chunks
must be relocated by the application to a new memory pool.
*/
must be relocated by the application to a new memory pool. */
#include <sys/types.h>
......@@ -90,13 +86,6 @@ static inline int toku_mempool_inrange(struct mempool *mp, void *vp, size_t size
size_t toku_mempool_footprint(struct mempool *mp);
void toku_mempool_clone(struct mempool* orig_mp, struct mempool* new_mp);
// Effect: Create a new mempool in which all the allocated objects are at the same offsets in the new pool as they were in the old pool.
// Rationale: The fractal tree code clones a pool, copies the OMT (which contains pointers) and then adjusts all the pointers to be
// old_value - original_base + new_base.
// Question: Is this arithmetic, in general, defined in the C standard? You cannot simply subtract two
// pointers. It is correct, however to calculate (old_value-original_base)+new_base since the
// subexpression calculates the offset in the original base (that's an integer) and adds it to the
// new base. So it's probably OK.
#if defined(__cplusplus) || defined(__cilkplusplus)
};
......
/* Test to see if the mempool uses madvise to mitigate #4934. */
#include "mempool.h"
#include "memory.h"
#include <assert.h>
#include <string.h>
#include <sys/mman.h>
int mallctl(const char *op, void *r, size_t *rsize, void *s, size_t ssize);
int mallctl(const char *op, void *r, size_t *rsize, void *s, size_t ssize) {
if (strcmp(op, "version")==0) {
assert(*rsize == sizeof(char *));
char **rc=r;
*rc = "libc faking as jemalloc";
assert(s==NULL);
assert(ssize==0);
return 0;
} else if (strcmp(op, "opt.lg_chunk")==0) {
assert(*rsize==sizeof(size_t));
size_t *lg_chunk_p=r;
*lg_chunk_p = 22;
assert(s==NULL);
assert(ssize==0);
return 0;
} else {
assert(0);
}
return 0;
}
struct known_sizes {
void *p;
size_t size;
} known_sizes[100];
int n_known_sizes=0;
size_t malloc_usable_size(const void *p);
size_t malloc_usable_size(const void *p) {
for (int i=0; i<n_known_sizes; i++) {
if (p==known_sizes[i].p) {
return known_sizes[i].size;
}
}
printf("p=%p\n", p);
abort();
}
void *mem;
int counter=0;
int madvise (void *addr, size_t length, int advice) {
char *a=addr;
char *m=mem;
if (counter==0) {
assert(m+4096==a);
assert(length=16*1024*1024-4096);
assert(advice=MADV_DONTNEED);
} else if (counter==1) {
assert(m+2*1024*1024+4096==a);
assert(length=16*1024*1024-2*1024*1024-4096);
assert(advice=MADV_DONTNEED);
} else if (counter==2) {
assert(m+4*1024*1024+4096==a);
assert(length=16*1024*1024-4*1024*1024-4096);
assert(advice=MADV_DONTNEED);
} else {
printf("madvise(%p, 0x%lx, %d)\n", addr, length, advice);
abort();
}
counter++;
return 0;
}
int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__unused__))) {
// toku_memory_startup(); is called by the linker.
struct mempool m;
size_t siz = 16*1024*1024;
{
int r = posix_memalign(&mem, 2*1024*1024, siz);
assert(r==0);
known_sizes[n_known_sizes++] = (struct known_sizes){mem, siz};
}
toku_mempool_init(&m, mem, siz);
void *a = toku_mempool_malloc(&m, 1, 1);
assert(a==mem);
void *b = toku_mempool_malloc(&m, 2*1024*1024 - 4096, 4096);
assert(b==(char*)mem+4096);
void *c = toku_mempool_malloc(&m, 1, 1);
assert(c==(char*)mem+2*1024*1024);
void *d = toku_mempool_malloc(&m, 2*1024*1024, 1);
assert(d==(char*)mem+2*1024*1024+1);
toku_free(mem);
return 0;
}
......@@ -122,16 +122,6 @@ void toku_memory_get_status(LOCAL_MEMORY_STATUS s);
size_t toku_memory_footprint(void * p, size_t touched);
void toku_memory_dontneed_after_but_i_touched(void *malloced_object, size_t malloced_size, size_t just_touched_start, size_t just_touched_length);
// Effect: Tell the memory system that we just touched just_touched_length bytes starting at malloced_object[just_touched_start].
// And that we don't care about anything after that. The memory system may call madvise(MADV_DONTNEED) on anything after that point.
// The memory system should avoid calling madvise() unless it will do some good.
// Implementation note: Whenever we cross a 2MB boundary (that is just_touched_start-1 is in a different 2MB page from just_touched_start+just_touched_length-1)
// we should call madvise(). Call madvise all the way to the end of malloc_usable_size_fun if that's known.
// Rationale: In RHEL 6, huge pages get malloced for large objects. When we have only touched a few pages at the beginning we want to madvise the rest to be dontneed
// so that the kernel will deaggregate space.
//
#if defined(__cplusplus) || defined(__cilkplusplus)
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment