Commit a4a82ce3 authored by Heinz Mauelshagen's avatar Heinz Mauelshagen Committed by Mike Snitzer

dm: correct block comments format.

Signed-off-by: default avatarHeinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 255e2646
......@@ -259,9 +259,11 @@ static void buffer_record_stack(struct dm_buffer *b)
}
#endif
/*----------------------------------------------------------------
/*
*----------------------------------------------------------------
* A red/black tree acts as an index for all the buffers.
*--------------------------------------------------------------*/
*----------------------------------------------------------------
*/
static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
{
struct rb_node *n = c->buffer_tree.rb_node;
......@@ -562,7 +564,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
b->last_accessed = jiffies;
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------------------
* Submit I/O on the buffer.
*
* Bio interface is faster but it has some problems:
......@@ -578,7 +581,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
* rejects the bio because it is too large, use dm-io layer to do the I/O.
* The dm-io layer splits the I/O into multiple requests, avoiding the above
* shortcomings.
*--------------------------------------------------------------*/
*--------------------------------------------------------------------------
*/
/*
* dm-io completion routine. It just calls b->bio.bi_end_io, pretending
......@@ -717,9 +721,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
use_dmio(b, op, sector, n_sectors, offset);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Writing dirty buffers
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
/*
* The endio routine for write.
......@@ -999,9 +1005,11 @@ static void __check_watermark(struct dm_bufio_client *c,
__write_dirty_buffers_async(c, 1, write_list);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Getting a buffer
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
enum new_flag nf, int *need_submit,
......@@ -2060,9 +2068,11 @@ static void work_fn(struct work_struct *w)
DM_BUFIO_WORK_TIMER_SECS * HZ);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Module setup
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
/*
* This is called only once for the whole dm_bufio module.
......
......@@ -163,10 +163,11 @@ struct dm_cache_metadata {
struct dm_bitset_cursor dirty_cursor;
};
/*-------------------------------------------------------------------
/*
*-----------------------------------------------------------------
* superblock validator
*-----------------------------------------------------------------*/
*-----------------------------------------------------------------
*/
#define SUPERBLOCK_CSUM_XOR 9031977
static void sb_prepare_for_write(struct dm_block_validator *v,
......
......@@ -525,9 +525,11 @@ static unsigned int lock_level(struct bio *bio)
READ_WRITE_LOCK_LEVEL;
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Per bio data
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
......@@ -706,9 +708,11 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
return r;
}
/*----------------------------------------------------------------
/*
* -------------------------------------------------------------
* Remapping
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void remap_to_origin(struct cache *cache, struct bio *bio)
{
bio_set_dev(bio, cache->origin_dev->bdev);
......@@ -834,9 +838,11 @@ static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
remap_to_cache(cache, bio, cblock);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Failure modes
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static enum cache_metadata_mode get_cache_mode(struct cache *cache)
{
return cache->features.mode;
......@@ -973,13 +979,14 @@ static void update_stats(struct cache_stats *stats, enum policy_operation op)
}
}
/*----------------------------------------------------------------
/*
*---------------------------------------------------------------------
* Migration processing
*
* Migration covers moving data from the origin device to the cache, or
* vice versa.
*--------------------------------------------------------------*/
*---------------------------------------------------------------------
*/
static void inc_io_migrations(struct cache *cache)
{
atomic_inc(&cache->nr_io_migrations);
......@@ -1431,9 +1438,11 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return mg_lock_writes(mg);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* invalidation processing
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void invalidate_complete(struct dm_cache_migration *mg, bool success)
{
......@@ -1554,9 +1563,11 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return invalidate_lock(mg);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* bio processing
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
enum busy {
IDLE,
......@@ -1764,9 +1775,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
{
dm_dblock_t b, e;
// FIXME: do we need to lock the region? Or can we just assume the
// user wont be so foolish as to issue discard concurrently with
// other IO?
/*
* FIXME: do we need to lock the region? Or can we just assume the
* user wont be so foolish as to issue discard concurrently with
* other IO?
*/
calc_discard_block_range(cache, bio, &b, &e);
while (b != e) {
set_discard(cache, b);
......@@ -1812,10 +1825,11 @@ static void process_deferred_bios(struct work_struct *ws)
schedule_commit(&cache->committer);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Main worker loop
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void requeue_deferred_bios(struct cache *cache)
{
struct bio *bio;
......@@ -1871,9 +1885,11 @@ static void check_migrations(struct work_struct *ws)
}
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Target methods
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
/*
* This function gets called on the error paths of the constructor, so we
......
......@@ -364,9 +364,10 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
bs = crypto_skcipher_blocksize(any_tfm(cc));
log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */
/*
* We need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen.
*/
if (1 << log != bs) {
ti->error = "cypher blocksize is not a power of 2";
return -EINVAL;
......
......@@ -22,9 +22,11 @@
#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
#define MIN_BLOCK_SIZE 8
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Writeset
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
struct writeset_metadata {
uint32_t nr_bits;
dm_block_t root;
......@@ -148,9 +150,11 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
return 1;
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* On disk metadata layout
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
#define SPACE_MAP_ROOT_SIZE 128
#define UUID_LEN 16
......@@ -186,9 +190,11 @@ struct superblock_disk {
__le64 metadata_snap;
} __packed;
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Superblock validation
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void sb_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
......@@ -252,9 +258,11 @@ static struct dm_block_validator sb_validator = {
.check = sb_check
};
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Low level metadata handling
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
#define DM_ERA_METADATA_BLOCK_SIZE 4096
#define ERA_MAX_CONCURRENT_LOCKS 5
......@@ -658,13 +666,15 @@ static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset
synchronize_rcu();
}
/*----------------------------------------------------------------
/*
*------------------------------------------------------------------------
* Writesets get 'digested' into the main era array.
*
* We're using a coroutine here so the worker thread can do the digestion,
* thus avoiding synchronisation of the metadata. Digesting a whole
* writeset in one go would cause too much latency.
*--------------------------------------------------------------*/
*------------------------------------------------------------------------
*/
struct digest {
uint32_t era;
unsigned int nr_bits, current_bit;
......@@ -784,10 +794,12 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
return 0;
}
/*----------------------------------------------------------------
* High level metadata interface. Target methods should use these, and not
* the lower level ones.
*--------------------------------------------------------------*/
/*
*-----------------------------------------------------------------
* High level metadata interface. Target methods should use these,
* and not the lower level ones.
*-----------------------------------------------------------------
*/
static struct era_metadata *metadata_open(struct block_device *bdev,
sector_t block_size,
bool may_format)
......@@ -1189,9 +1201,11 @@ struct rpc {
struct completion complete;
};
/*----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Remapping.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static bool block_size_is_power_of_two(struct era *era)
{
return era->sectors_per_block_shift >= 0;
......@@ -1214,9 +1228,11 @@ static void remap_to_origin(struct era *era, struct bio *bio)
bio_set_dev(bio, era->origin_dev->bdev);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Worker thread
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void wake_worker(struct era *era)
{
if (!atomic_read(&era->suspended))
......@@ -1403,9 +1419,11 @@ static void stop_worker(struct era *era)
drain_workqueue(era->wq);
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Target methods
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void era_destroy(struct era *era)
{
if (era->md)
......
......@@ -81,13 +81,15 @@ void dm_io_client_destroy(struct dm_io_client *client)
}
EXPORT_SYMBOL(dm_io_client_destroy);
/*-----------------------------------------------------------------
/*
*-------------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
* To avoid a memory allocation to store just 5 or 6 bits, we
* ensure the 'struct io' pointer is aligned so enough low bits are
* always zero and then combine it with the region number directly in
* bi_private.
*---------------------------------------------------------------*/
*-------------------------------------------------------------------
*/
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
unsigned int region)
{
......@@ -108,10 +110,12 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
*region = val & (DM_IO_MAX_REGIONS - 1);
}
/*-----------------------------------------------------------------
/*
*--------------------------------------------------------------
* We need an io object to keep track of the number of bios that
* have been dispatched for a particular io.
*---------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void complete_io(struct io *io)
{
unsigned long error_bits = io->error_bits;
......@@ -155,10 +159,12 @@ static void endio(struct bio *bio)
dec_count(io, region, error);
}
/*-----------------------------------------------------------------
/*
*--------------------------------------------------------------
* These little objects provide an abstraction for getting a new
* destination page for io.
*---------------------------------------------------------------*/
*--------------------------------------------------------------
*/
struct dpages {
void (*get_page)(struct dpages *dp,
struct page **p, unsigned long *len, unsigned int *offset);
......@@ -291,9 +297,11 @@ static void km_dp_init(struct dpages *dp, void *data)
dp->context_ptr = data;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
......
......@@ -35,10 +35,12 @@ struct dm_file {
volatile unsigned int global_event_nr;
};
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* The ioctl interface needs to be able to look up devices by
* name or uuid.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct hash_cell {
struct rb_node name_node;
struct rb_node uuid_node;
......@@ -79,9 +81,11 @@ static void dm_hash_exit(void)
dm_hash_remove_all(false, false, false);
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Code for looking up a device by name
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static struct hash_cell *__get_name_cell(const char *str)
{
struct rb_node *n = name_rb_tree.rb_node;
......@@ -196,9 +200,11 @@ static struct hash_cell *__get_dev_cell(uint64_t dev)
return hc;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Inserting, removing and renaming a device.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static struct hash_cell *alloc_cell(const char *name, const char *uuid,
struct mapped_device *md)
{
......@@ -501,9 +507,11 @@ void dm_deferred_remove(void)
dm_hash_remove_all(true, false, true);
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Implementation of the ioctl commands
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
/*
* All the ioctl commands get dispatched to functions with this
* prototype.
......@@ -1764,10 +1772,11 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
#define IOCTL_FLAGS_NO_PARAMS 1
#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
* device.
*---------------------------------------------------------------*/
/*
*---------------------------------------------------------------
* Implementation of open/close/ioctl on the special char device.
*---------------------------------------------------------------
*/
static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{
static const struct {
......
......@@ -51,10 +51,12 @@ static unsigned int dm_get_kcopyd_subjob_size(void)
return sub_job_size_kb << 1;
}
/*-----------------------------------------------------------------
/*
*----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct dm_kcopyd_client {
struct page_list *pages;
unsigned int nr_reserved_pages;
......@@ -334,11 +336,13 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* kcopyd_jobs need to be allocated by the *clients* of kcopyd,
* for this reason we use a mempool to prevent the client from
* ever having to do io (which could cause a deadlock).
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct kcopyd_job {
struct dm_kcopyd_client *kc;
struct list_head list;
......@@ -901,9 +905,11 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
}
#endif /* 0 */
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
......
......@@ -183,10 +183,12 @@ void dm_dirty_log_destroy(struct dm_dirty_log *log)
}
EXPORT_SYMBOL(dm_dirty_log_destroy);
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Persistent and core logs share a lot of their implementation.
* FIXME: need a reload method to be called from a resume
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
/*
* Magic for persistent mirrors: "MiRr"
*/
......@@ -275,9 +277,11 @@ static inline void log_clear_bit(struct log_c *l,
l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Header IO
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
......@@ -353,11 +357,13 @@ static int _check_region_size(struct dm_target *ti, uint32_t region_size)
return 1;
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* core log constructor/destructor
*
* argv contains region_size followed optionally by [no]sync
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
#define BYTE_SHIFT 3
static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv,
......@@ -532,11 +538,13 @@ static void core_dtr(struct dm_dirty_log *log)
destroy_log_context(lc);
}
/*----------------------------------------------------------------
/*
*---------------------------------------------------------------------
* disk log constructor/destructor
*
* argv contains log_device region_size followed optionally by [no]sync
*--------------------------------------------------------------*/
*---------------------------------------------------------------------
*/
static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv)
{
......
......@@ -118,10 +118,11 @@ static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
static void queue_if_no_path_timeout_work(struct timer_list *t);
/*-----------------------------------------------
/*
*-----------------------------------------------
* Multipath state flags.
*-----------------------------------------------*/
*-----------------------------------------------
*/
#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
......@@ -144,10 +145,11 @@ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
return r;
}
/*-----------------------------------------------
/*
*-----------------------------------------------
* Allocation routines
*-----------------------------------------------*/
*-----------------------------------------------
*/
static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
......@@ -303,10 +305,11 @@ static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mp
dm_bio_record(bio_details, bio);
}
/*-----------------------------------------------
/*
*-----------------------------------------------
* Path selection
*-----------------------------------------------*/
*-----------------------------------------------
*/
static int __pg_init_all_paths(struct multipath *m)
{
struct pgpath *pgpath;
......@@ -826,7 +829,8 @@ static void trigger_event(struct work_struct *work)
dm_table_event(m->ti->table);
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Constructor/argument parsing:
* <#multipath feature args> [<arg>]*
* <#hw_handler args> [hw_handler [<arg>]*]
......@@ -835,7 +839,8 @@ static void trigger_event(struct work_struct *work)
* [<selector> <#selector args> [<arg>]*
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
......@@ -2169,9 +2174,11 @@ static int multipath_busy(struct dm_target *ti)
return busy;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Module setup
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static struct target_type multipath_target = {
.name = "multipath",
.version = {1, 14, 0},
......
......@@ -21,9 +21,11 @@
#define RR_MIN_IO 1
#define RR_VERSION "1.2.0"
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Path-handling code, paths are held in lists
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct path_info {
struct list_head list;
struct dm_path *path;
......@@ -40,10 +42,11 @@ static void free_paths(struct list_head *paths)
}
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Round-robin selector
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
......
......@@ -3184,12 +3184,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Out-of-place space has to be available to allow for a reshape unless raid1! */
if (reshape_sectors || rs_is_raid1(rs)) {
/*
* We can only prepare for a reshape here, because the
* raid set needs to run to provide the repective reshape
* check functions via its MD personality instance.
*
* So do the reshape check after md_run() succeeded.
*/
* We can only prepare for a reshape here, because the
* raid set needs to run to provide the repective reshape
* check functions via its MD personality instance.
*
* So do the reshape check after md_run() succeeded.
*/
r = rs_prepare_reshape(rs);
if (r)
goto bad;
......
......@@ -33,9 +33,11 @@
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Mirror set structures.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
DM_RAID1_FLUSH_ERROR,
......@@ -286,13 +288,15 @@ static int mirror_flush(struct dm_target *ti)
return 0;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Recovery.
*
* When a mirror is first activated we may find that some regions
* are in the no-sync state. We have to recover these by
* recopying from the default mirror to all the others.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
......@@ -409,9 +413,11 @@ static void do_recovery(struct mirror_set *ms)
}
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
struct mirror *m = get_default_mirror(ms);
......@@ -499,9 +505,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
spin_unlock_irq(&ms->lock);
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static void read_callback(unsigned long error, void *context)
{
struct bio *bio = context;
......@@ -580,7 +588,8 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
}
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------------
* Writes.
*
* We do different things with the write io depending on the
......@@ -589,9 +598,8 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* SYNC: increment pending, use kcopyd to write to *all* mirrors
* RECOVERING: delay the io until recovery completes
* NOSYNC: increment pending, just write to the default mirror
*---------------------------------------------------------------*/
*---------------------------------------------------------------------
*/
static void write_callback(unsigned long error, void *context)
{
unsigned int i;
......@@ -843,9 +851,11 @@ static void trigger_event(struct work_struct *work)
dm_table_event(ms->ti->table);
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* kmirrord
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static void do_mirror(struct work_struct *work)
{
struct mirror_set *ms = container_of(work, struct mirror_set,
......@@ -869,9 +879,11 @@ static void do_mirror(struct work_struct *work)
do_failures(ms, &failures);
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Target functions
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
uint32_t region_size,
struct dm_target *ti,
......
......@@ -19,7 +19,8 @@
#define DM_MSG_PREFIX "region hash"
/*-----------------------------------------------------------------
/*
*------------------------------------------------------------------
* Region hash
*
* The mirror splits itself up into discrete regions. Each
......@@ -54,7 +55,8 @@
* lists in the region_hash, with the 'state', 'list' and
* 'delayed_bios' fields of the regions. This is used from irq
* context, so all other uses will have to suspend local irqs.
*---------------------------------------------------------------*/
*------------------------------------------------------------------
*/
struct dm_region_hash {
uint32_t region_size;
unsigned int region_shift;
......
......@@ -22,10 +22,12 @@
#define DM_PREFETCH_CHUNKS 12
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Persistent snapshots, by persistent we mean that the snapshot
* will survive a reboot.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
/*
* We need to store a record of which parts of the origin have
......
......@@ -17,9 +17,11 @@
#define DM_MSG_PREFIX "transient snapshot"
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Implementation of the store for non-persistent snapshots.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
struct transient_c {
sector_t next_free;
};
......
......@@ -2420,10 +2420,11 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
}
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
/*
* If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
* supplied bio was ignored. The caller may submit it immediately.
......
......@@ -15,7 +15,8 @@
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
/*--------------------------------------------------------------------------
/*
*--------------------------------------------------------------------------
* As far as the metadata goes, there is:
*
* - A superblock in block zero, taking up fewer than 512 bytes for
......@@ -71,7 +72,8 @@
*
* All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
* from the block manager.
*--------------------------------------------------------------------------*/
*--------------------------------------------------------------------------
*/
#define DM_MSG_PREFIX "thin metadata"
......@@ -240,10 +242,11 @@ struct dm_thin_device {
uint32_t snapshotted_time;
};
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* superblock validator
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
#define SUPERBLOCK_CSUM_XOR 160774
static void sb_prepare_for_write(struct dm_block_validator *v,
......@@ -297,10 +300,11 @@ static struct dm_block_validator sb_validator = {
.check = sb_check
};
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Methods for the btree value types
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static uint64_t pack_block_time(dm_block_t b, uint32_t t)
{
return (b << 24) | t;
......
......@@ -2800,9 +2800,11 @@ static void requeue_bios(struct pool *pool)
rcu_read_unlock();
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Binding of control targets to a pool object
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static bool is_factor(sector_t block_size, uint32_t n)
{
return !sector_div(block_size, n);
......@@ -2866,9 +2868,11 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
pool->ti = NULL;
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Pool creation
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
/* Initialize pool features. */
static void pool_features_init(struct pool_features *pf)
{
......@@ -3092,9 +3096,11 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
return pool;
}
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Pool target methods
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void pool_dtr(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
......@@ -4096,9 +4102,11 @@ static struct target_type pool_target = {
.io_hints = pool_io_hints,
};
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Thin target methods
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void thin_get(struct thin_c *tc)
{
refcount_inc(&tc->refcount);
......
......@@ -1873,9 +1873,11 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
return 1;
}
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
......@@ -2960,9 +2962,11 @@ void dm_internal_resume_fast(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned int cookie, bool need_resize_uevent)
{
......
......@@ -48,9 +48,11 @@ struct dm_md_mempools;
struct dm_target_io;
struct dm_io;
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Internal table functions.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
......@@ -119,9 +121,11 @@ static inline int dm_zone_map_bio(struct dm_target_io *tio)
}
#endif
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
int dm_target_init(void);
void dm_target_exit(void);
struct target_type *dm_get_target_type(const char *name);
......
......@@ -368,9 +368,11 @@ static void dm_block_manager_write_callback(struct dm_buffer *buf)
}
}
/*----------------------------------------------------------------
/*
* -------------------------------------------------------------
* Public interface
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
struct dm_block_manager {
struct dm_bufio_client *bufio;
bool read_only:1;
......
......@@ -14,9 +14,11 @@
#define DM_MSG_PREFIX "btree"
/*----------------------------------------------------------------
/*
*--------------------------------------------------------------
* Array manipulation
*--------------------------------------------------------------*/
*--------------------------------------------------------------
*/
static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_written_to_disk(src)
{
......
......@@ -434,10 +434,12 @@ const char *dm_shift_arg(struct dm_arg_set *as);
*/
void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
/*-----------------------------------------------------------------
/*
*----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
*---------------------------------------------------------------*/
*----------------------------------------------------------------
*/
/*
* DM_ANY_MINOR chooses the next available minor number.
......@@ -518,9 +520,11 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Functions for manipulating device-mapper tables.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
/*
* First create an empty table.
......@@ -594,9 +598,11 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
*/
void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
/*-----------------------------------------------------------------
/*
*---------------------------------------------------------------
* Macros.
*---------------------------------------------------------------*/
*---------------------------------------------------------------
*/
#define DM_NAME "device-mapper"
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
......
......@@ -97,7 +97,7 @@ struct dm_dirty_log_type {
* Do not confuse this function with 'in_sync()', one
* tells you if an area is synchronised, the other
* assigns recovery work.
*/
*/
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
/*
......
......@@ -13,9 +13,11 @@
#include <linux/dm-dirty-log.h>
/*-----------------------------------------------------------------
/*
*----------------------------------------------------------------
* Region hash
*----------------------------------------------------------------*/
*----------------------------------------------------------------
*/
struct dm_region_hash;
struct dm_region;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment