Commit 59fdd433 authored by Yu Kuai's avatar Yu Kuai Committed by Song Liu

md/md-bitmap: make in memory structure internal

Now that struct bitmap_page and bitmap is not used externally anymore,
move them from md-bitmap.h to md-bitmap.c (expect that dm-raid is still
using define marco 'COUNTER_MAX').

Also fix some checkpatch warnings.
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20240826074452.1490072-43-yukuai1@huaweicloud.comSigned-off-by: default avatarSong Liu <song@kernel.org>
parent dab2ce55
This diff is collapsed.
...@@ -7,81 +7,7 @@ ...@@ -7,81 +7,7 @@
#ifndef BITMAP_H #ifndef BITMAP_H
#define BITMAP_H 1 #define BITMAP_H 1
#define BITMAP_MAJOR_LO 3 #define BITMAP_MAGIC 0x6d746962
/* version 4 insists the bitmap is in little-endian order
* with version 3, it is host-endian which is non-portable
* Version 5 is currently set only for clustered devices
*/
#define BITMAP_MAJOR_HI 4
#define BITMAP_MAJOR_CLUSTERED 5
#define BITMAP_MAJOR_HOSTENDIAN 3
/*
* in-memory bitmap:
*
* Use 16 bit block counters to track pending writes to each "chunk".
* The 2 high order bits are special-purpose, the first is a flag indicating
* whether a resync is needed. The second is a flag indicating whether a
* resync is active.
* This means that the counter is actually 14 bits:
*
* +--------+--------+------------------------------------------------+
* | resync | resync | counter |
* | needed | active | |
* | (0-1) | (0-1) | (0-16383) |
* +--------+--------+------------------------------------------------+
*
* The "resync needed" bit is set when:
* a '1' bit is read from storage at startup.
* a write request fails on some drives
* a resync is aborted on a chunk with 'resync active' set
* It is cleared (and resync-active set) when a resync starts across all drives
* of the chunk.
*
*
* The "resync active" bit is set when:
* a resync is started on all drives, and resync_needed is set.
* resync_needed will be cleared (as long as resync_active wasn't already set).
* It is cleared when a resync completes.
*
* The counter counts pending write requests, plus the on-disk bit.
* When the counter is '1' and the resync bits are clear, the on-disk
* bit can be cleared as well, thus setting the counter to 0.
* When we set a bit, or in the counter (to start a write), if the fields is
* 0, we first set the disk bit and set the counter to 1.
*
* If the counter is 0, the on-disk bit is clear and the stripe is clean
* Anything that dirties the stripe pushes the counter to 2 (at least)
* and sets the on-disk bit (lazily).
* If a periodic sweep find the counter at 2, it is decremented to 1.
* If the sweep find the counter at 1, the on-disk bit is cleared and the
* counter goes to zero.
*
* Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
* counters as a fallback when "page" memory cannot be allocated:
*
* Normal case (page memory allocated):
*
* page pointer (32-bit)
*
* [ ] ------+
* |
* +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
* c1 c2 c2048
*
* Hijacked case (page memory allocation failed):
*
* hijacked page pointer (32-bit)
*
* [ ][ ] (no page memory allocated)
* counter #1 (16-bit) counter #2 (16-bit)
*
*/
#ifdef __KERNEL__
#define PAGE_BITS (PAGE_SIZE << 3)
#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
typedef __u16 bitmap_counter_t; typedef __u16 bitmap_counter_t;
#define COUNTER_BITS 16 #define COUNTER_BITS 16
...@@ -91,26 +17,6 @@ typedef __u16 bitmap_counter_t; ...@@ -91,26 +17,6 @@ typedef __u16 bitmap_counter_t;
#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) #define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) #define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
/* how many counters per page? */
#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
/* same, except a shift value for more efficient bitops */
#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
/* same, except a mask value for more efficient bitops */
#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
#define BITMAP_BLOCK_SHIFT 9
#endif
/*
* bitmap structures:
*/
#define BITMAP_MAGIC 0x6d746962
/* use these for bitmap->flags and bitmap->sb->state bit-fields */ /* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state { enum bitmap_state {
...@@ -152,88 +58,6 @@ typedef struct bitmap_super_s { ...@@ -152,88 +58,6 @@ typedef struct bitmap_super_s {
* devices. For raid10 it is the size of the array. * devices. For raid10 it is the size of the array.
*/ */
#ifdef __KERNEL__
/* the in-memory bitmap is represented by bitmap_pages */
struct bitmap_page {
/*
* map points to the actual memory page
*/
char *map;
/*
* in emergencies (when map cannot be alloced), hijack the map
* pointer and use it as two counters itself
*/
unsigned int hijacked:1;
/*
* If any counter in this page is '1' or '2' - and so could be
* cleared then that page is marked as 'pending'
*/
unsigned int pending:1;
/*
* count of dirty bits on the page
*/
unsigned int count:30;
};
/* the main bitmap structure - one per mddev */
struct bitmap {
struct bitmap_counts {
spinlock_t lock;
struct bitmap_page *bp;
unsigned long pages; /* total number of pages
* in the bitmap */
unsigned long missing_pages; /* number of pages
* not yet allocated */
unsigned long chunkshift; /* chunksize = 2^chunkshift
* (for bitops) */
unsigned long chunks; /* Total number of data
* chunks for the array */
} counts;
struct mddev *mddev; /* the md device that the bitmap is for */
__u64 events_cleared;
int need_sync;
struct bitmap_storage {
struct file *file; /* backing disk file */
struct page *sb_page; /* cached copy of the bitmap
* file superblock */
unsigned long sb_index;
struct page **filemap; /* list of cache pages for
* the file */
unsigned long *filemap_attr; /* attributes associated
* w/ filemap pages */
unsigned long file_pages; /* number of pages in the file*/
unsigned long bytes; /* total bytes in the bitmap */
} storage;
unsigned long flags;
int allclean;
atomic_t behind_writes;
unsigned long behind_writes_used; /* highest actual value at runtime */
/*
* the bitmap daemon - periodically wakes up and sweeps the bitmap
* file, cleaning up bits and flushing out pages to disk as necessary
*/
unsigned long daemon_lastrun; /* jiffies of last run */
unsigned long last_end_sync; /* when we lasted called end_sync to
* update bitmap with resync progress */
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
wait_queue_head_t behind_wait;
struct kernfs_node *sysfs_can_clear;
int cluster_slot; /* Slot offset for clustered env */
};
struct md_bitmap_stats { struct md_bitmap_stats {
u64 events_cleared; u64 events_cleared;
int behind_writes; int behind_writes;
...@@ -272,21 +96,20 @@ struct bitmap_operations { ...@@ -272,21 +96,20 @@ struct bitmap_operations {
void (*cond_end_sync)(struct mddev *mddev, sector_t sector, bool force); void (*cond_end_sync)(struct mddev *mddev, sector_t sector, bool force);
void (*close_sync)(struct mddev *mddev); void (*close_sync)(struct mddev *mddev);
void (*update_sb)(struct bitmap *bitmap); void (*update_sb)(void *data);
int (*get_stats)(struct bitmap *bitmap, struct md_bitmap_stats *stats); int (*get_stats)(void *data, struct md_bitmap_stats *stats);
void (*sync_with_cluster)(struct mddev *mddev, void (*sync_with_cluster)(struct mddev *mddev,
sector_t old_lo, sector_t old_hi, sector_t old_lo, sector_t old_hi,
sector_t new_lo, sector_t new_hi); sector_t new_lo, sector_t new_hi);
struct bitmap *(*get_from_slot)(struct mddev *mddev, int slot); void *(*get_from_slot)(struct mddev *mddev, int slot);
int (*copy_from_slot)(struct mddev *mddev, int slot, sector_t *lo, int (*copy_from_slot)(struct mddev *mddev, int slot, sector_t *lo,
sector_t *hi, bool clear_bits); sector_t *hi, bool clear_bits);
void (*set_pages)(struct bitmap *bitmap, unsigned long pages); void (*set_pages)(void *data, unsigned long pages);
void (*free)(struct bitmap *bitmap); void (*free)(void *data);
}; };
/* the bitmap API */ /* the bitmap API */
void mddev_set_bitmap_ops(struct mddev *mddev); void mddev_set_bitmap_ops(struct mddev *mddev);
#endif #endif
#endif
...@@ -1144,7 +1144,7 @@ static int update_bitmap_size(struct mddev *mddev, sector_t size) ...@@ -1144,7 +1144,7 @@ static int update_bitmap_size(struct mddev *mddev, sector_t size)
static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize) static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize)
{ {
struct bitmap *bitmap = mddev->bitmap; void *bitmap = mddev->bitmap;
struct md_bitmap_stats stats; struct md_bitmap_stats stats;
unsigned long my_pages; unsigned long my_pages;
int i, rv; int i, rv;
...@@ -1218,9 +1218,9 @@ static int cluster_check_sync_size(struct mddev *mddev) ...@@ -1218,9 +1218,9 @@ static int cluster_check_sync_size(struct mddev *mddev)
{ {
int current_slot = md_cluster_ops->slot_number(mddev); int current_slot = md_cluster_ops->slot_number(mddev);
int node_num = mddev->bitmap_info.nodes; int node_num = mddev->bitmap_info.nodes;
struct bitmap *bitmap = mddev->bitmap;
struct dlm_lock_resource *bm_lockres; struct dlm_lock_resource *bm_lockres;
struct md_bitmap_stats stats; struct md_bitmap_stats stats;
void *bitmap = mddev->bitmap;
unsigned long sync_size = 0; unsigned long sync_size = 0;
unsigned long my_sync_size; unsigned long my_sync_size;
char str[64]; char str[64];
......
...@@ -535,7 +535,7 @@ struct mddev { ...@@ -535,7 +535,7 @@ struct mddev {
struct percpu_ref writes_pending; struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */ int sync_checkers; /* # of threads checking writes_pending */
struct bitmap *bitmap; /* the bitmap for the device */ void *bitmap; /* the bitmap for the device */
struct bitmap_operations *bitmap_ops; struct bitmap_operations *bitmap_ops;
struct { struct {
struct file *file; /* the bitmap file */ struct file *file; /* the bitmap file */
......
...@@ -1412,7 +1412,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1412,7 +1412,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
struct r1conf *conf = mddev->private; struct r1conf *conf = mddev->private;
struct r1bio *r1_bio; struct r1bio *r1_bio;
int i, disks; int i, disks;
struct bitmap *bitmap = mddev->bitmap;
unsigned long flags; unsigned long flags;
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
int first_clone; int first_clone;
...@@ -1565,7 +1564,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1565,7 +1564,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* at a time and thus needs a new bio that can fit the whole payload * at a time and thus needs a new bio that can fit the whole payload
* this bio in page sized chunks. * this bio in page sized chunks.
*/ */
if (write_behind && bitmap) if (write_behind && mddev->bitmap)
max_sectors = min_t(int, max_sectors, max_sectors = min_t(int, max_sectors,
BIO_MAX_VECS * (PAGE_SIZE >> 9)); BIO_MAX_VECS * (PAGE_SIZE >> 9));
if (max_sectors < bio_sectors(bio)) { if (max_sectors < bio_sectors(bio)) {
...@@ -1601,7 +1600,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1601,7 +1600,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* Not if there are too many, or cannot * Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly * allocate memory, or a reader on WriteMostly
* is waiting for behind writes to flush */ * is waiting for behind writes to flush */
err = mddev->bitmap_ops->get_stats(bitmap, &stats); err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (!err && write_behind && !stats.behind_wait && if (!err && write_behind && !stats.behind_wait &&
stats.behind_writes < max_write_behind) stats.behind_writes < max_write_behind)
alloc_behind_master_bio(r1_bio, bio); alloc_behind_master_bio(r1_bio, bio);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment