Commit f232856f authored by Alan Cox's avatar Alan Cox Committed by Linus Torvalds

[PATCH] Device Mapper, with updates

This is the device mapper with Joe's updates applied and in -ac for a bit
parent 6d23b118
...@@ -122,3 +122,15 @@ CONFIG_MD_MULTIPATH ...@@ -122,3 +122,15 @@ CONFIG_MD_MULTIPATH
If unsure, say N. If unsure, say N.
CONFIG_BLK_DEV_DM
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
mapping types are available, in addition people may write their own
modules containing custom mappings if they wish.
Higher level volume managers such as LVM2 use this driver.
If you want to compile this as a module, say M here and read
<file:Documentation/modules.txt>. The module will be called dm-mod.o.
If unsure, say N.
...@@ -14,5 +14,6 @@ dep_tristate ' RAID-4/RAID-5 mode' CONFIG_MD_RAID5 $CONFIG_BLK_DEV_MD ...@@ -14,5 +14,6 @@ dep_tristate ' RAID-4/RAID-5 mode' CONFIG_MD_RAID5 $CONFIG_BLK_DEV_MD
dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD
dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD
dep_tristate ' Device mapper support' CONFIG_BLK_DEV_DM $CONFIG_MD
endmenu endmenu
...@@ -2,8 +2,10 @@ ...@@ -2,8 +2,10 @@
# Makefile for the kernel software RAID and LVM drivers. # Makefile for the kernel software RAID and LVM drivers.
# #
export-objs := md.o xor.o export-objs := md.o xor.o dm-table.o dm-target.o
lvm-mod-objs := lvm.o lvm-snap.o lvm-fs.o lvm-mod-objs := lvm.o lvm-snap.o lvm-fs.o
dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o
# Note: link order is important. All raid personalities # Note: link order is important. All raid personalities
# and xor.o must come before md.o, as they each initialise # and xor.o must come before md.o, as they each initialise
...@@ -17,6 +19,7 @@ obj-$(CONFIG_MD_RAID5) += raid5.o xor.o ...@@ -17,6 +19,7 @@ obj-$(CONFIG_MD_RAID5) += raid5.o xor.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_BLK_DEV_MD) += md.o obj-$(CONFIG_BLK_DEV_MD) += md.o
obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
This diff is collapsed.
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
/*
* Linear: maps a linear range of a device.
*/
struct linear_c {
struct dm_dev *dev;
sector_t start;
};
/*
* Construct a linear mapping: <dev_path> <offset>
*/
static int linear_ctr(struct dm_target *ti, int argc, char **argv)
{
struct linear_c *lc;
if (argc != 2) {
ti->error = "dm-linear: Not enough arguments";
return -EINVAL;
}
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
if (lc == NULL) {
ti->error = "dm-linear: Cannot allocate linear context";
return -ENOMEM;
}
if (sscanf(argv[1], SECTOR_FORMAT, &lc->start) != 1) {
ti->error = "dm-linear: Invalid device sector";
goto bad;
}
if (dm_get_device(ti, argv[0], ti->begin, ti->len,
dm_table_get_mode(ti->table), &lc->dev)) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
}
ti->private = lc;
return 0;
bad:
kfree(lc);
return -EINVAL;
}
static void linear_dtr(struct dm_target *ti)
{
struct linear_c *lc = (struct linear_c *) ti->private;
dm_put_device(ti, lc->dev);
kfree(lc);
}
static int linear_map(struct dm_target *ti, struct bio *bio)
{
struct linear_c *lc = (struct linear_c *) ti->private;
bio->bi_bdev = lc->dev->bdev;
bio->bi_sector = lc->start + (bio->bi_sector - ti->begin);
return 1;
}
static int linear_status(struct dm_target *ti, status_type_t type,
char *result, int maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
snprintf(result, maxlen, "%s " SECTOR_FORMAT,
kdevname(to_kdev_t(lc->dev->bdev->bd_dev)), lc->start);
break;
}
return 0;
}
static struct target_type linear_target = {
.name = "linear",
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
.status = linear_status,
};
int __init dm_linear_init(void)
{
int r = dm_register_target(&linear_target);
if (r < 0)
DMERR("linear: register failed %d", r);
return r;
}
void dm_linear_exit(void)
{
int r = dm_unregister_target(&linear_target);
if (r < 0)
DMERR("linear: unregister failed %d", r);
}
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
struct stripe {
struct dm_dev *dev;
sector_t physical_start;
};
struct stripe_c {
uint32_t stripes;
/* The size of this target / num. stripes */
uint32_t stripe_width;
/* stripe chunk size */
uint32_t chunk_shift;
sector_t chunk_mask;
struct stripe stripe[0];
};
static inline struct stripe_c *alloc_context(int stripes)
{
size_t len;
if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
stripes))
return NULL;
len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
return kmalloc(len, GFP_KERNEL);
}
/*
* Parse a single <dev> <sector> pair
*/
static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
int stripe, char **argv)
{
sector_t start;
if (sscanf(argv[1], SECTOR_FORMAT, &start) != 1)
return -EINVAL;
if (dm_get_device(ti, argv[0], start, sc->stripe_width,
dm_table_get_mode(ti->table),
&sc->stripe[stripe].dev))
return -ENXIO;
sc->stripe[stripe].physical_start = start;
return 0;
}
/*
* FIXME: Nasty function, only present because we can't link
* against __moddi3 and __divdi3.
*
* returns a == b * n
*/
static int multiple(sector_t a, sector_t b, sector_t *n)
{
sector_t acc, prev, i;
*n = 0;
while (a >= b) {
for (acc = b, prev = 0, i = 1;
acc <= a;
prev = acc, acc <<= 1, i <<= 1)
;
a -= prev;
*n += i >> 1;
}
return a == 0;
}
/*
* Construct a striped mapping.
* <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
*/
static int stripe_ctr(struct dm_target *ti, int argc, char **argv)
{
struct stripe_c *sc;
sector_t width;
uint32_t stripes;
uint32_t chunk_size;
char *end;
int r, i;
if (argc < 2) {
ti->error = "dm-stripe: Not enough arguments";
return -EINVAL;
}
stripes = simple_strtoul(argv[0], &end, 10);
if (*end) {
ti->error = "dm-stripe: Invalid stripe count";
return -EINVAL;
}
chunk_size = simple_strtoul(argv[1], &end, 10);
if (*end) {
ti->error = "dm-stripe: Invalid chunk_size";
return -EINVAL;
}
if (!multiple(ti->len, stripes, &width)) {
ti->error = "dm-stripe: Target length not divisable by "
"number of stripes";
return -EINVAL;
}
sc = alloc_context(stripes);
if (!sc) {
ti->error = "dm-stripe: Memory allocation for striped context "
"failed";
return -ENOMEM;
}
sc->stripes = stripes;
sc->stripe_width = width;
ti->split_io = chunk_size;
/*
* chunk_size is a power of two
*/
if (!chunk_size || (chunk_size & (chunk_size - 1))) {
ti->error = "dm-stripe: Invalid chunk size";
kfree(sc);
return -EINVAL;
}
sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
chunk_size >>= 1;
sc->chunk_shift--;
/*
* Get the stripe destinations.
*/
for (i = 0; i < stripes; i++) {
if (argc < 2) {
ti->error = "dm-stripe: Not enough destinations "
"specified";
kfree(sc);
return -EINVAL;
}
argv += 2;
r = get_stripe(ti, sc, i, argv);
if (r < 0) {
ti->error = "dm-stripe: Couldn't parse stripe "
"destination";
while (i--)
dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
return r;
}
}
ti->private = sc;
return 0;
}
static void stripe_dtr(struct dm_target *ti)
{
unsigned int i;
struct stripe_c *sc = (struct stripe_c *) ti->private;
for (i = 0; i < sc->stripes; i++)
dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
}
static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
sector_t offset = bio->bi_sector - ti->begin;
uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift);
uint32_t stripe = chunk % sc->stripes; /* 32bit modulus */
chunk = chunk / sc->stripes;
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
bio->bi_sector = sc->stripe[stripe].physical_start +
(chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
return 1;
}
static int stripe_status(struct dm_target *ti,
status_type_t type, char *result, int maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
int offset;
int i;
switch (type) {
case STATUSTYPE_INFO:
result[0] = '\0';
break;
case STATUSTYPE_TABLE:
offset = snprintf(result, maxlen, "%d " SECTOR_FORMAT,
sc->stripes, sc->chunk_mask + 1);
for (i = 0; i < sc->stripes; i++) {
offset +=
snprintf(result + offset, maxlen - offset,
" %s " SECTOR_FORMAT,
kdevname(to_kdev_t(sc->stripe[i].dev->bdev->bd_dev)),
sc->stripe[i].physical_start);
}
break;
}
return 0;
}
static struct target_type stripe_target = {
.name = "striped",
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
.map = stripe_map,
.status = stripe_status,
};
int __init dm_stripe_init(void)
{
int r;
r = dm_register_target(&stripe_target);
if (r < 0)
DMWARN("striped target registration failed");
return r;
}
void dm_stripe_exit(void)
{
if (dm_unregister_target(&stripe_target))
DMWARN("striped target unregistration failed");
return;
}
This diff is collapsed.
/*
* Copyright (C) 2001 Sistina Software (UK) Limited
*
* This file is released under the GPL.
*/
#include "dm.h"
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/bio.h>
#include <linux/slab.h>
struct tt_internal {
struct target_type tt;
struct list_head list;
long use;
};
static LIST_HEAD(_targets);
static rwlock_t _lock = RW_LOCK_UNLOCKED;
#define DM_MOD_NAME_SIZE 32
static inline struct tt_internal *__find_target_type(const char *name)
{
struct list_head *tih;
struct tt_internal *ti;
list_for_each(tih, &_targets) {
ti = list_entry(tih, struct tt_internal, list);
if (!strcmp(name, ti->tt.name))
return ti;
}
return NULL;
}
static struct tt_internal *get_target_type(const char *name)
{
struct tt_internal *ti;
read_lock(&_lock);
ti = __find_target_type(name);
if (ti) {
if (ti->use == 0 && ti->tt.module)
__MOD_INC_USE_COUNT(ti->tt.module);
ti->use++;
}
read_unlock(&_lock);
return ti;
}
static void load_module(const char *name)
{
char module_name[DM_MOD_NAME_SIZE] = "dm-";
/* Length check for strcat() below */
if (strlen(name) > (DM_MOD_NAME_SIZE - 4))
return;
strcat(module_name, name);
request_module(module_name);
return;
}
struct target_type *dm_get_target_type(const char *name)
{
struct tt_internal *ti = get_target_type(name);
if (!ti) {
load_module(name);
ti = get_target_type(name);
}
return ti ? &ti->tt : NULL;
}
void dm_put_target_type(struct target_type *t)
{
struct tt_internal *ti = (struct tt_internal *) t;
read_lock(&_lock);
if (--ti->use == 0 && ti->tt.module)
__MOD_DEC_USE_COUNT(ti->tt.module);
if (ti->use < 0)
BUG();
read_unlock(&_lock);
return;
}
static struct tt_internal *alloc_target(struct target_type *t)
{
struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
if (ti) {
memset(ti, 0, sizeof(*ti));
ti->tt = *t;
}
return ti;
}
int dm_register_target(struct target_type *t)
{
int rv = 0;
struct tt_internal *ti = alloc_target(t);
if (!ti)
return -ENOMEM;
write_lock(&_lock);
if (__find_target_type(t->name))
rv = -EEXIST;
else
list_add(&ti->list, &_targets);
write_unlock(&_lock);
return rv;
}
int dm_unregister_target(struct target_type *t)
{
struct tt_internal *ti;
write_lock(&_lock);
if (!(ti = __find_target_type(t->name))) {
write_unlock(&_lock);
return -EINVAL;
}
if (ti->use) {
write_unlock(&_lock);
return -ETXTBSY;
}
list_del(&ti->list);
kfree(ti);
write_unlock(&_lock);
return 0;
}
/*
* io-err: always fails an io, useful for bringing
* up LVs that have holes in them.
*/
static int io_err_ctr(struct dm_target *ti, int argc, char **args)
{
return 0;
}
static void io_err_dtr(struct dm_target *ti)
{
/* empty */
return;
}
static int io_err_map(struct dm_target *ti, struct bio *bio)
{
bio_io_error(bio, 0);
return 0;
}
static struct target_type error_target = {
.name = "error",
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
};
int dm_target_init(void)
{
return dm_register_target(&error_target);
}
void dm_target_exit(void)
{
if (dm_unregister_target(&error_target))
DMWARN("error target unregistration failed");
}
EXPORT_SYMBOL(dm_register_target);
EXPORT_SYMBOL(dm_unregister_target);
This diff is collapsed.
/*
* Internal header file for device mapper
*
* Copyright (C) 2001, 2002 Sistina Software
*
* This file is released under the LGPL.
*/
#ifndef DM_INTERNAL_H
#define DM_INTERNAL_H
#include <linux/fs.h>
#include <linux/device-mapper.h>
#include <linux/list.h>
#include <linux/blkdev.h>
#define DM_NAME "device-mapper"
#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
/*
* FIXME: I think this should be with the definition of sector_t
* in types.h.
*/
#ifdef CONFIG_LBD
#define SECTOR_FORMAT "%Lu"
#else
#define SECTOR_FORMAT "%lu"
#endif
extern struct block_device_operations dm_blk_dops;
/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev {
struct list_head list;
atomic_t count;
int mode;
struct block_device *bdev;
};
struct dm_table;
struct mapped_device;
/*-----------------------------------------------------------------
* Functions for manipulating a struct mapped_device.
* Drop the reference with dm_put when you finish with the object.
*---------------------------------------------------------------*/
int dm_create(int minor, struct dm_table *table, struct mapped_device **md);
/*
* Reference counting for md.
*/
void dm_get(struct mapped_device *md);
void dm_put(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
int dm_suspend(struct mapped_device *md);
int dm_resume(struct mapped_device *md);
/*
* The device must be suspended before calling this method.
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
/*
* Drop a reference on the table when you've finished with the
* result.
*/
struct dm_table *dm_get_table(struct mapped_device *md);
/*
* Info functions.
*/
kdev_t dm_kdev(struct mapped_device *md);
int dm_suspended(struct mapped_device *md);
/*-----------------------------------------------------------------
* Functions for manipulating a table. Tables are also reference
* counted.
*---------------------------------------------------------------*/
int dm_table_create(struct dm_table **result, int mode);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
int dm_table_complete(struct dm_table *t);
void dm_table_event(struct dm_table *t);
sector_t dm_table_get_size(struct dm_table *t);
struct dm_target *dm_table_get_target(struct dm_table *t, int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
unsigned int dm_table_get_num_targets(struct dm_table *t);
struct list_head *dm_table_get_devices(struct dm_table *t);
int dm_table_get_mode(struct dm_table *t);
void dm_table_add_wait_queue(struct dm_table *t, wait_queue_t *wq);
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
int dm_target_init(void);
void dm_target_exit(void);
struct target_type *dm_get_target_type(const char *name);
void dm_put_target_type(struct target_type *t);
/*-----------------------------------------------------------------
* Useful inlines.
*---------------------------------------------------------------*/
static inline int array_too_big(unsigned long fixed, unsigned long obj,
unsigned long num)
{
return (num > (ULONG_MAX - fixed) / obj);
}
/*
* ceiling(n / size) * size
*/
static inline unsigned long dm_round_up(unsigned long n, unsigned long size)
{
unsigned long r = n % size;
return n + (r ? (size - r) : 0);
}
/*
* The device-mapper can be driven through one of two interfaces;
* ioctl or filesystem, depending which patch you have applied.
*/
int dm_interface_init(void);
void dm_interface_exit(void);
/*
* Targets for linear and striped mappings
*/
int dm_linear_init(void);
void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
#endif
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This file is released under the LGPL.
*/
#ifndef _LINUX_DEVICE_MAPPER_H
#define _LINUX_DEVICE_MAPPER_H
#define DM_DIR "mapper" /* Slashes not supported */
#define DM_MAX_TYPE_NAME 16
#define DM_NAME_LEN 128
#define DM_UUID_LEN 129
#ifdef __KERNEL__
struct dm_target;
struct dm_table;
struct dm_dev;
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
/*
* In the constructor the target parameter will already have the
* table, type, begin and len fields filled in.
*/
typedef int (*dm_ctr_fn) (struct dm_target *target, int argc, char **argv);
/*
* The destructor doesn't need to free the dm_target, just
* anything hidden ti->private.
*/
typedef void (*dm_dtr_fn) (struct dm_target *ti);
/*
* The map function must return:
* < 0: error
* = 0: The target will handle the io by resubmitting it later
* > 0: simple remap complete
*/
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
char *result, int maxlen);
void dm_error(const char *message);
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
* FIXME: too many arguments.
*/
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
sector_t len, int mode, struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
* Information about a target type
*/
struct target_type {
const char *name;
struct module *module;
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
dm_status_fn status;
};
struct io_restrictions {
unsigned short max_sectors;
unsigned short max_phys_segments;
unsigned short max_hw_segments;
unsigned short hardsect_size;
unsigned int max_segment_size;
unsigned long seg_boundary_mask;
};
struct dm_target {
struct dm_table *table;
struct target_type *type;
/* target limits */
sector_t begin;
sector_t len;
/* FIXME: turn this into a mask, and merge with io_restrictions */
sector_t split_io;
/*
* These are automaticall filled in by
* dm_table_get_device.
*/
struct io_restrictions limits;
/* target specific data */
void *private;
/* Used to provide an error string from the ctr */
char *error;
};
int dm_register_target(struct target_type *t);
int dm_unregister_target(struct target_type *t);
#endif /* __KERNEL__ */
#endif /* _LINUX_DEVICE_MAPPER_H */
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
*
* This file is released under the LGPL.
*/
#ifndef _LINUX_DM_IOCTL_H
#define _LINUX_DM_IOCTL_H
#include <linux/device-mapper.h>
#include <linux/types.h>
/*
* Implements a traditional ioctl interface to the device mapper.
*/
/*
* All ioctl arguments consist of a single chunk of memory, with
* this structure at the start. If a uuid is specified any
* lookup (eg. for a DM_INFO) will be done on that, *not* the
* name.
*/
struct dm_ioctl {
/*
* The version number is made up of three parts:
* major - no backward or forward compatibility,
* minor - only backwards compatible,
* patch - both backwards and forwards compatible.
*
* All clients of the ioctl interface should fill in the
* version number of the interface that they were
* compiled with.
*
* All recognised ioctl commands (ie. those that don't
* return -ENOTTY) fill out this field, even if the
* command failed.
*/
uint32_t version[3]; /* in/out */
uint32_t data_size; /* total size of data passed in
* including this struct */
uint32_t data_start; /* offset to start of data
* relative to start of this struct */
uint32_t target_count; /* in/out */
uint32_t open_count; /* out */
uint32_t flags; /* in/out */
__kernel_dev_t dev; /* in/out */
char name[DM_NAME_LEN]; /* device name */
char uuid[DM_UUID_LEN]; /* unique identifier for
* the block device */
};
/*
* Used to specify tables. These structures appear after the
* dm_ioctl.
*/
struct dm_target_spec {
int32_t status; /* used when reading from kernel only */
uint64_t sector_start;
uint32_t length;
/*
* Offset in bytes (from the start of this struct) to
* next target_spec.
*/
uint32_t next;
char target_type[DM_MAX_TYPE_NAME];
/*
* Parameter string starts immediately after this object.
* Be careful to add padding after string to ensure correct
* alignment of subsequent dm_target_spec.
*/
};
/*
* Used to retrieve the target dependencies.
*/
struct dm_target_deps {
uint32_t count;
__kernel_dev_t dev[0]; /* out */
};
/*
* If you change this make sure you make the corresponding change
* to dm-ioctl.c:lookup_ioctl()
*/
enum {
/* Top level cmds */
DM_VERSION_CMD = 0,
DM_REMOVE_ALL_CMD,
/* device level cmds */
DM_DEV_CREATE_CMD,
DM_DEV_REMOVE_CMD,
DM_DEV_RELOAD_CMD,
DM_DEV_RENAME_CMD,
DM_DEV_SUSPEND_CMD,
DM_DEV_DEPS_CMD,
DM_DEV_STATUS_CMD,
/* target level cmds */
DM_TARGET_STATUS_CMD,
DM_TARGET_WAIT_CMD
};
#define DM_IOCTL 0xfd
#define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl)
#define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl)
#define DM_DEV_CREATE _IOWR(DM_IOCTL, DM_DEV_CREATE_CMD, struct dm_ioctl)
#define DM_DEV_REMOVE _IOWR(DM_IOCTL, DM_DEV_REMOVE_CMD, struct dm_ioctl)
#define DM_DEV_RELOAD _IOWR(DM_IOCTL, DM_DEV_RELOAD_CMD, struct dm_ioctl)
#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl)
#define DM_DEV_RENAME _IOWR(DM_IOCTL, DM_DEV_RENAME_CMD, struct dm_ioctl)
#define DM_DEV_DEPS _IOWR(DM_IOCTL, DM_DEV_DEPS_CMD, struct dm_ioctl)
#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl)
#define DM_TARGET_STATUS _IOWR(DM_IOCTL, DM_TARGET_STATUS_CMD, struct dm_ioctl)
#define DM_TARGET_WAIT _IOWR(DM_IOCTL, DM_TARGET_WAIT_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 1
#define DM_VERSION_MINOR 0
#define DM_VERSION_PATCHLEVEL 6
#define DM_VERSION_EXTRA "-ioctl (2002-10-15)"
/* Status bits */
#define DM_READONLY_FLAG 0x00000001
#define DM_SUSPEND_FLAG 0x00000002
#define DM_EXISTS_FLAG 0x00000004
#define DM_PERSISTENT_DEV_FLAG 0x00000008
/*
* Flag passed into ioctl STATUS command to get table information
* rather than current status.
*/
#define DM_STATUS_TABLE_FLAG 0x00000010
#endif /* _LINUX_DM_IOCTL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment