Commit 1086d8ce authored by Rusty Russell's avatar Rusty Russell

ptr_valid: test whether a ptr is valid.

Very slow, but sometimes you need to know without crashing.
parent 655bae65
../../licenses/BSD-MIT
\ No newline at end of file
#include <string.h>
#include "config.h"
/**
* ptr_valid - test whether a pointer is safe to dereference.
*
* This little helper tells you if an address is mapped; it doesn't tell you
* if it's read-only (or execute only).
*
* License: BSD-MIT
*
* Ccanlint:
* // Our child actually crashes, but that's OK!
* tests_pass_valgrind test/run.c:--child-silent-after-fork=yes
*/
int main(int argc, char *argv[])
{
/* Expect exactly one argument */
if (argc != 2)
return 1;
if (strcmp(argv[1], "depends") == 0) {
printf("ccan/noerr\n");
return 0;
}
return 1;
}
// Licensed under BSD-MIT: See LICENSE.
#include "ptr_valid.h"
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <ccan/noerr/noerr.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#if HAVE_PROC_SELF_MAPS
static char *grab(const char *filename)
{
int ret, fd;
size_t max = 16384, s = 0;
char *buffer;
fd = open(filename, O_RDONLY);
if (fd < 0)
return NULL;
buffer = malloc(max+1);
if (!buffer)
goto close;
while ((ret = read(fd, buffer + s, max - s)) > 0) {
s += ret;
if (s == max) {
buffer = realloc(buffer, max*2+1);
if (!buffer)
goto close;
max *= 2;
}
}
if (ret < 0)
goto free;
close(fd);
buffer[s] = '\0';
return buffer;
free:
free(buffer);
close:
close_noerr(fd);
return NULL;
}
static char *skip_line(char *p)
{
char *nl = strchr(p, '\n');
if (!nl)
return NULL;
return nl + 1;
}
static struct ptr_valid_map *add_map(struct ptr_valid_map *map,
unsigned int *num,
unsigned int *max,
unsigned long start, unsigned long end, bool is_write)
{
if (*num == *max) {
*max *= 2;
map = realloc(map, sizeof(*map) * *max);
if (!map)
return NULL;
}
map[*num].start = (void *)start;
map[*num].end = (void *)end;
map[*num].is_write = is_write;
(*num)++;
return map;
}
static struct ptr_valid_map *get_proc_maps(unsigned int *num)
{
char *buf, *p;
struct ptr_valid_map *map;
unsigned int max = 16;
buf = grab("/proc/self/maps");
if (!buf) {
*num = 0;
return NULL;
}
map = malloc(sizeof(*map) * max);
if (!map)
goto free_buf;
*num = 0;
for (p = buf; p && *p; p = skip_line(p)) {
unsigned long start, end;
char *endp;
/* Expect '<start-in-hex>-<end-in-hex> rw... */
start = strtoul(p, &endp, 16);
if (*endp != '-')
goto malformed;
end = strtoul(endp+1, &endp, 16);
if (*endp != ' ')
goto malformed;
endp++;
if (endp[0] != 'r' && endp[0] != '-')
goto malformed;
if (endp[1] != 'w' && endp[1] != '-')
goto malformed;
/* We only add readable mappings. */
if (endp[0] == 'r') {
map = add_map(map, num, &max, start, end,
endp[1] == 'w');
if (!map)
goto free_buf;
}
}
free(buf);
return map;
malformed:
free(map);
free_buf:
free(buf);
*num = 0;
return NULL;
}
#else
static struct ptr_valid_map *get_proc_maps(unsigned int *num)
{
*num = 0;
return NULL;
}
#endif
static bool check_with_maps(struct ptr_valid_batch *batch,
const char *p, size_t size, bool is_write)
{
unsigned int i;
for (i = 0; i < batch->num_maps; i++) {
if (p >= batch->maps[i].start && p < batch->maps[i].end) {
/* Overlap into other maps? Recurse with remainder. */
if (p + size > batch->maps[i].end) {
size_t len = p + size - batch->maps[i].end;
if (!check_with_maps(batch, batch->maps[i].end,
len, is_write))
return false;
}
return !is_write || batch->maps[i].is_write;
}
}
return false;
}
static void finish_child(struct ptr_valid_batch *batch)
{
close(batch->to_child);
close(batch->from_child);
waitpid(batch->child_pid, NULL, 0);
batch->child_pid = 0;
}
static bool child_alive(struct ptr_valid_batch *batch)
{
return batch->child_pid != 0;
}
static void run_child(int infd, int outfd)
{
volatile char *p;
/* This is how we expect to exit. */
while (read(infd, &p, sizeof(p)) == sizeof(p)) {
size_t i, size;
bool is_write;
char ret = 0;
/* This is weird. */
if (read(infd, &size, sizeof(size)) != sizeof(size))
exit(1);
if (read(infd, &is_write, sizeof(is_write)) != sizeof(is_write))
exit(2);
for (i = 0; i < size; i++) {
ret = p[i];
if (is_write)
p[i] = ret;
}
/* If we're still here, the answer is "yes". */
if (write(outfd, &ret, 1) != 1)
exit(3);
}
exit(0);
}
static bool create_child(struct ptr_valid_batch *batch)
{
int outpipe[2], inpipe[2];
if (pipe(outpipe) != 0)
return false;
if (pipe(inpipe) != 0)
goto close_outpipe;
fflush(stdout);
batch->child_pid = fork();
if (batch->child_pid == 0) {
close(outpipe[1]);
close(inpipe[0]);
run_child(outpipe[0], inpipe[1]);
}
if (batch->child_pid == -1)
goto cleanup_pid;
close(outpipe[0]);
close(inpipe[1]);
batch->to_child = outpipe[1];
batch->from_child = inpipe[0];
return true;
cleanup_pid:
batch->child_pid = 0;
close_noerr(inpipe[0]);
close_noerr(inpipe[1]);
close_outpipe:
close_noerr(outpipe[0]);
close_noerr(outpipe[1]);
return false;
}
static bool check_with_child(struct ptr_valid_batch *batch,
const void *p, size_t size, bool is_write)
{
char ret;
if (!child_alive(batch)) {
if (!create_child(batch))
return false;
}
write(batch->to_child, &p, sizeof(p));
write(batch->to_child, &size, sizeof(size));
write(batch->to_child, &is_write, sizeof(is_write));
if (read(batch->from_child, &ret, sizeof(ret)) != sizeof(ret)) {
finish_child(batch);
errno = EFAULT;
return false;
}
return true;
}
/* msync seems most well-defined test, but page could be mapped with
* no permissions, and can't distiguish readonly from writable. */
bool ptr_valid_batch(struct ptr_valid_batch *batch,
const void *p, size_t alignment, size_t size, bool write)
{
char *start, *end;
bool ret;
if ((intptr_t)p & (alignment - 1))
return false;
start = (void *)((intptr_t)p & ~(getpagesize() - 1));
end = (void *)(((intptr_t)p + size - 1) & ~(getpagesize() - 1));
/* We cache single page hits. */
if (start == end) {
if (batch->last && batch->last == start)
return batch->last_ok;
}
if (batch->num_maps)
ret = check_with_maps(batch, p, size, write);
else
ret = check_with_child(batch, p, size, write);
if (start == end) {
batch->last = start;
batch->last_ok = ret;
}
return ret;
}
bool ptr_valid_batch_string(struct ptr_valid_batch *batch, const char *p)
{
while (ptr_valid_batch(batch, p, 1, 1, false)) {
if (*p == '\0')
return true;
p++;
}
return false;
}
bool ptr_valid(const void *p, size_t alignment, size_t size, bool write)
{
bool ret;
struct ptr_valid_batch batch;
if (!ptr_valid_batch_start(&batch))
return false;
ret = ptr_valid_batch(&batch, p, alignment, size, write);
ptr_valid_batch_end(&batch);
return ret;
}
bool ptr_valid_string(const char *p)
{
bool ret;
struct ptr_valid_batch batch;
if (!ptr_valid_batch_start(&batch))
return false;
ret = ptr_valid_batch_string(&batch, p);
ptr_valid_batch_end(&batch);
return ret;
}
bool ptr_valid_batch_start(struct ptr_valid_batch *batch)
{
batch->child_pid = 0;
batch->maps = get_proc_maps(&batch->num_maps);
batch->last = NULL;
return true;
}
void ptr_valid_batch_end(struct ptr_valid_batch *batch)
{
if (child_alive(batch))
finish_child(batch);
free(batch->maps);
}
// Licensed under BSD-MIT: See LICENSE.
#ifndef CCAN_PTR_VALID_H
#define CCAN_PTR_VALID_H
#include "config.h"
#include <stdbool.h>
#include <stdlib.h>
/**
* ptr_valid_read - can I safely read from a pointer?
* @p: the proposed pointer.
*
* This function verifies that the pointer @p is safe to dereference for
* reading. It is very slow, particularly if the answer is "no".
*
* Sets errno to EFAULT on failure.
*
* See Also:
* ptr_valid_batch_read()
*/
#define ptr_valid_read(p) \
ptr_valid_r((p), PTR_VALID_ALIGNOF(*(p)), sizeof(*(p)))
/**
* ptr_valid_write - can I safely write to a pointer?
* @p: the proposed pointer.
*
* This function verifies that the pointer @p is safe to dereference
* for writing (and reading). It is very slow, particularly if the
* answer is "no".
*
* Sets errno to EFAULT on failure.
*
* See Also:
* ptr_valid_batch_write()
*/
#define ptr_valid_write(p) \
ptr_valid_w((p), PTR_VALID_ALIGNOF(*(p)), sizeof(*(p)))
/**
* ptr_valid_string - can I safely read a string?
* @p: the proposed string.
*
* This function verifies that the pointer @p is safe to dereference
* up to a nul character. It is very slow, particularly if the answer
* is "no".
*
* Sets errno to EFAULT on failure.
*
* See Also:
* ptr_valid_batch_string()
*/
bool ptr_valid_string(const char *p);
/**
* ptr_valid - generic pointer check function
* @p: the proposed pointer.
* @align: the alignment requirements of the pointer.
* @size: the size of the region @p should point to
* @write: true if @p should be writable as well as readable.
*
* This function verifies that the pointer @p is safe to dereference.
* It is very slow, particularly if the answer is "no".
*
* Sets errno to EFAULT on failure.
*
* See Also:
* ptr_valid_batch()
*/
bool ptr_valid(const void *p, size_t align, size_t size, bool write);
/**
* struct ptr_valid_batch - pointer to store state for batch ptr ops
*
* Treat as private.
*/
struct ptr_valid_batch {
unsigned int num_maps;
struct ptr_valid_map *maps;
int child_pid;
int to_child, from_child;
void *last;
bool last_ok;
};
/**
* ptr_valid_batch_start - prepare for a batch of ptr_valid checks.
* @batch: an uninitialized ptr_valid_batch structure.
*
* This initializes @batch; this same @batch pointer can be reused
* until the memory map changes (eg. via mmap(), munmap() or even
* malloc() and free()).
*
* This is useful to check many pointers, because otherwise it can be
* extremely slow.
*
* Example:
* struct linked {
* struct linked *next;
* const char *str;
* };
*
* static bool check_linked_carefully(struct linked *head)
* {
* struct ptr_valid_batch batch;
* struct linked *old = head;
* bool half = true;
*
* // If this fails, we can't check. Assume OK.
* if (!ptr_valid_batch_start(&batch))
* return true;
*
* while (head) {
* if (!ptr_valid_batch_read(&batch, head))
* goto fail;
* if (!ptr_valid_batch_string(&batch, head->str))
* goto fail;
* // Loop detection; move old at half speed of head.
* if (half)
* old = old->next;
* half = !half;
* if (head == old) {
* errno = ELOOP;
* goto fail;
* }
* }
* ptr_valid_batch_end(&batch);
* return true;
*
* fail:
* ptr_valid_batch_end(&batch);
* return false;
* }
*
* See Also:
* ptr_valid_batch_stop()
*/
bool ptr_valid_batch_start(struct ptr_valid_batch *batch);
/**
* ptr_valid_batch_read - can I safely read from a pointer?
* @batch: the batch initialized by ptr_valid_batch_start().
* @p: the proposed pointer.
*
* Batched version of ptr_valid_read().
*/
#define ptr_valid_batch_read(batch, p) \
ptr_valid_batch_r((batch), \
(p), PTR_VALID_ALIGNOF(*(p)), sizeof(*(p)))
/**
* ptr_valid_batch_write - can I safely write to a pointer?
* @batch: the batch initialized by ptr_valid_batch_start().
* @p: the proposed pointer.
*
* Batched version of ptr_valid_write().
*/
#define ptr_valid_batch_write(batch, p) \
ptr_valid_batch_w((batch), \
(p), PTR_VALID_ALIGNOF(*(p)), sizeof(*(p)))
/**
* ptr_valid_batch_string - can I safely read a string?
* @batch: the batch initialized by ptr_valid_batch_start().
* @p: the proposed string.
*
* Batched version of ptr_valid_string().
*/
bool ptr_valid_batch_string(struct ptr_valid_batch *batch, const char *p);
/**
* ptr_valid_batch - generic batched pointer check function
* @batch: the batch initialized by ptr_valid_batch_start().
* @p: the proposed pointer.
* @align: the alignment requirements of the pointer.
* @size: the size of the region @p should point to
* @write: true if @p should be writable as well as readable.
*
* Batched version of ptr_valid().
*/
bool ptr_valid_batch(struct ptr_valid_batch *batch,
const void *p, size_t alignment, size_t size, bool write);
/**
* ptr_valid_batch_end - end a batch of ptr_valid checks.
* @batch: a ptr_valid_batch structure.
*
* This is used after all checks are complete.
*
* See Also:
* ptr_valid_batch_start()
*/
void ptr_valid_batch_end(struct ptr_valid_batch *batch);
/* These wrappers get constness correct. */
static inline bool ptr_valid_r(const void *p, size_t align, size_t size)
{
return ptr_valid(p, align, size, false);
}
static inline bool ptr_valid_w(void *p, size_t align, size_t size)
{
return ptr_valid(p, align, size, true);
}
static inline bool ptr_valid_batch_r(struct ptr_valid_batch *batch,
const void *p, size_t align, size_t size)
{
return ptr_valid_batch(batch, p, align, size, false);
}
static inline bool ptr_valid_batch_w(struct ptr_valid_batch *batch,
void *p, size_t align, size_t size)
{
return ptr_valid_batch(batch, p, align, size, true);
}
struct ptr_valid_map {
const char *start, *end;
bool is_write;
};
#if HAVE_ALIGNOF
#define PTR_VALID_ALIGNOF(var) __alignof__(var)
#else
/* Can't check this... */
#define PTR_VALID_ALIGNOF(var) 1
#endif
#endif /* CCAN_PTR_VALID_H */
#include <ccan/ptr_valid/ptr_valid.h>
/* Include the C files directly. */
#include <ccan/ptr_valid/ptr_valid.c>
#include <ccan/tap/tap.h>
#include <sys/mman.h>
int main(void)
{
char *page;
struct ptr_valid_batch *batch = malloc(sizeof *batch);
/* This is how many tests you plan to run */
plan_tests(14);
page = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
strcpy(page, "hello");
ok1(ptr_valid_read(page));
ok1(ptr_valid_write(page));
ok1(ptr_valid_string(page));
ok1(ptr_valid_batch_start(batch));
ok1(ptr_valid_batch_string(batch, page));
ptr_valid_batch_end(batch);
/* Check invalid case. */
munmap(page, getpagesize());
ok1(!ptr_valid_string(page));
ok1(ptr_valid_batch_start(batch));
ok1(!ptr_valid_batch_string(batch, page));
ptr_valid_batch_end(batch);
/* Check for overrun. */
page = mmap(NULL, getpagesize()*2, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
munmap(page + getpagesize(), getpagesize());
memset(page, 'a', getpagesize());
ok1(!ptr_valid_string(page));
ok1(ptr_valid_batch_start(batch));
ok1(!ptr_valid_batch_string(batch, page));
ptr_valid_batch_end(batch);
page[getpagesize()-1] = '\0';
ok1(ptr_valid_string(page));
ok1(ptr_valid_batch_start(batch));
ok1(ptr_valid_batch_string(batch, page));
ptr_valid_batch_end(batch);
munmap(page, getpagesize());
free(batch);
/* This exits depending on whether all tests passed */
return exit_status();
}
#include <ccan/ptr_valid/ptr_valid.h>
/* Include the C files directly. */
#include <ccan/ptr_valid/ptr_valid.c>
#include <ccan/tap/tap.h>
#include <sys/mman.h>
static bool check_batch(char *p, unsigned int num, bool expect)
{
struct ptr_valid_batch batch;
unsigned int i;
if (!ptr_valid_batch_start(&batch))
return false;
for (i = 0; i < num; i++) {
if (ptr_valid_batch(&batch, p + i, 1, 1, false) != expect)
return false;
if (ptr_valid_batch(&batch, p + i, 1, 1, true) != expect)
return false;
}
ptr_valid_batch_end(&batch);
return true;
}
int main(void)
{
char *page;
/* This is how many tests you plan to run */
plan_tests(30);
page = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
ok1(ptr_valid_read(page));
ok1(ptr_valid_write(page));
ok1(ptr_valid(page, 1, getpagesize(), false));
ok1(ptr_valid(page, 1, getpagesize(), true));
/* Test alignment constraints. */
ok1(ptr_valid(page, getpagesize(), getpagesize(), false));
ok1(ptr_valid(page, getpagesize(), getpagesize(), true));
ok1(!ptr_valid(page+1, getpagesize(), 1, false));
ok1(!ptr_valid(page+1, getpagesize(), 1, true));
/* Test batch. */
ok1(check_batch(page, getpagesize(), true));
/* Unmap, all should fail. */
munmap(page, getpagesize());
ok1(!ptr_valid_read(page));
ok1(!ptr_valid_write(page));
ok1(!ptr_valid(page, 1, getpagesize(), false));
ok1(!ptr_valid(page, 1, getpagesize(), true));
/* Test alignment constraints. */
ok1(!ptr_valid(page, getpagesize(), getpagesize(), false));
ok1(!ptr_valid(page, getpagesize(), getpagesize(), true));
ok1(!ptr_valid(page+1, getpagesize(), 1, false));
ok1(!ptr_valid(page, getpagesize(), 1, true));
/* Test batch (slow, since each fails, so reduce count). */
ok1(check_batch(page, 4, false));
/* Check read-only */
page = mmap(NULL, getpagesize(), PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
ok1(ptr_valid_read(page));
ok1(!ptr_valid_write(page));
ok1(ptr_valid(page, 1, getpagesize(), false));
ok1(!ptr_valid(page, 1, getpagesize(), true));
/* Test alignment constraints. */
ok1(ptr_valid(page, getpagesize(), getpagesize(), false));
ok1(!ptr_valid(page, getpagesize(), getpagesize(), true));
ok1(!ptr_valid(page+1, getpagesize(), 1, false));
ok1(!ptr_valid(page+1, getpagesize(), 1, true));
munmap(page, getpagesize());
/* Check for overrun. */
page = mmap(NULL, getpagesize()*2, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
munmap(page + getpagesize(), getpagesize());
ok1(ptr_valid(page, 1, getpagesize(), false));
ok1(ptr_valid(page, 1, getpagesize(), true));
ok1(!ptr_valid(page, 1, getpagesize()+1, false));
ok1(!ptr_valid(page, 1, getpagesize()+1, true));
/* This exits depending on whether all tests passed */
return exit_status();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment