Commit 24c4830c authored by Bart Van Assche's avatar Bart Van Assche Committed by Philipp Reisner

drbd: Fix spelling

Found these with the help of ispell -l.
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
parent 9a0d9d03
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "drbd_int.h" #include "drbd_int.h"
#include "drbd_wrappers.h" #include "drbd_wrappers.h"
/* We maintain a trivial check sum in our on disk activity log. /* We maintain a trivial checksum in our on disk activity log.
* With that we can ensure correct operation even when the storage * With that we can ensure correct operation even when the storage
* device might do a partial (last) sector write while losing power. * device might do a partial (last) sector write while losing power.
*/ */
......
...@@ -74,7 +74,7 @@ ...@@ -74,7 +74,7 @@
* as we are "attached" to a local disk, which at 32 GiB for 1PiB storage * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
* seems excessive. * seems excessive.
* *
* We plan to reduce the amount of in-core bitmap pages by pageing them in * We plan to reduce the amount of in-core bitmap pages by paging them in
* and out against their on-disk location as necessary, but need to make * and out against their on-disk location as necessary, but need to make
* sure we don't cause too much meta data IO, and must not deadlock in * sure we don't cause too much meta data IO, and must not deadlock in
* tight memory situations. This needs some more work. * tight memory situations. This needs some more work.
...@@ -200,7 +200,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev) ...@@ -200,7 +200,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
* we if bits have been cleared since last IO. */ * we if bits have been cleared since last IO. */
#define BM_PAGE_LAZY_WRITEOUT 28 #define BM_PAGE_LAZY_WRITEOUT 28
/* store_page_idx uses non-atomic assingment. It is only used directly after /* store_page_idx uses non-atomic assignment. It is only used directly after
* allocating the page. All other bm_set_page_* and bm_clear_page_* need to * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
* use atomic bit manipulation, as set_out_of_sync (and therefore bitmap * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
* changes) may happen from various contexts, and wait_on_bit/wake_up_bit * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
...@@ -318,7 +318,7 @@ static void bm_unmap(unsigned long *p_addr) ...@@ -318,7 +318,7 @@ static void bm_unmap(unsigned long *p_addr)
/* word offset from start of bitmap to word number _in_page_ /* word offset from start of bitmap to word number _in_page_
* modulo longs per page * modulo longs per page
#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
so do it explicitly: so do it explicitly:
*/ */
#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
......
...@@ -699,7 +699,7 @@ struct drbd_request { ...@@ -699,7 +699,7 @@ struct drbd_request {
* see drbd_endio_pri(). */ * see drbd_endio_pri(). */
struct bio *private_bio; struct bio *private_bio;
struct hlist_node colision; struct hlist_node collision;
sector_t sector; sector_t sector;
unsigned int size; unsigned int size;
unsigned int epoch; /* barrier_nr */ unsigned int epoch; /* barrier_nr */
...@@ -765,7 +765,7 @@ struct digest_info { ...@@ -765,7 +765,7 @@ struct digest_info {
struct drbd_epoch_entry { struct drbd_epoch_entry {
struct drbd_work w; struct drbd_work w;
struct hlist_node colision; struct hlist_node collision;
struct drbd_epoch *epoch; /* for writes */ struct drbd_epoch *epoch; /* for writes */
struct drbd_conf *mdev; struct drbd_conf *mdev;
struct page *pages; struct page *pages;
...@@ -1520,7 +1520,7 @@ extern void drbd_resume_io(struct drbd_conf *mdev); ...@@ -1520,7 +1520,7 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size); extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *); extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
......
...@@ -2732,7 +2732,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) ...@@ -2732,7 +2732,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
/* double check digest, sometimes buffers have been modified in flight. */ /* double check digest, sometimes buffers have been modified in flight. */
if (dgs > 0 && dgs <= 64) { if (dgs > 0 && dgs <= 64) {
/* 64 byte, 512 bit, is the larges digest size /* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */ * currently supported in kernel crypto. */
unsigned char digest[64]; unsigned char digest[64];
drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
...@@ -3287,7 +3287,7 @@ static void drbd_delete_device(unsigned int minor) ...@@ -3287,7 +3287,7 @@ static void drbd_delete_device(unsigned int minor)
drbd_release_ee_lists(mdev); drbd_release_ee_lists(mdev);
/* should be free'd on disconnect? */ /* should be freed on disconnect? */
kfree(mdev->ee_hash); kfree(mdev->ee_hash);
/* /*
mdev->ee_hash_s = 0; mdev->ee_hash_s = 0;
......
...@@ -596,7 +596,7 @@ void drbd_resume_io(struct drbd_conf *mdev) ...@@ -596,7 +596,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors. * Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function. * You should call drbd_md_sync() after calling this function.
*/ */
enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
{ {
sector_t prev_first_sect, prev_size; /* previous meta location */ sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size; sector_t la_size;
...@@ -1205,7 +1205,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1205,7 +1205,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags); set_bit(USE_DEGR_WFC_T, &mdev->flags);
dd = drbd_determin_dev_size(mdev, 0); dd = drbd_determine_dev_size(mdev, 0);
if (dd == dev_size_error) { if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP; retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec; goto force_diskless_dec;
...@@ -1719,7 +1719,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, ...@@ -1719,7 +1719,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
dd = drbd_determin_dev_size(mdev, ddsf); dd = drbd_determine_dev_size(mdev, ddsf);
drbd_md_sync(mdev); drbd_md_sync(mdev);
put_ldev(mdev); put_ldev(mdev);
if (dd == dev_size_error) { if (dd == dev_size_error) {
......
...@@ -333,7 +333,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, ...@@ -333,7 +333,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
if (!page) if (!page)
goto fail; goto fail;
INIT_HLIST_NODE(&e->colision); INIT_HLIST_NODE(&e->collision);
e->epoch = NULL; e->epoch = NULL;
e->mdev = mdev; e->mdev = mdev;
e->pages = page; e->pages = page;
...@@ -356,7 +356,7 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i ...@@ -356,7 +356,7 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i
kfree(e->digest); kfree(e->digest);
drbd_pp_free(mdev, e->pages, is_net); drbd_pp_free(mdev, e->pages, is_net);
D_ASSERT(atomic_read(&e->pending_bios) == 0); D_ASSERT(atomic_read(&e->pending_bios) == 0);
D_ASSERT(hlist_unhashed(&e->colision)); D_ASSERT(hlist_unhashed(&e->collision));
mempool_free(e, drbd_ee_mempool); mempool_free(e, drbd_ee_mempool);
} }
...@@ -1413,7 +1413,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u ...@@ -1413,7 +1413,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
sector_t sector = e->sector; sector_t sector = e->sector;
int ok; int ok;
D_ASSERT(hlist_unhashed(&e->colision)); D_ASSERT(hlist_unhashed(&e->collision));
if (likely((e->flags & EE_WAS_ERROR) == 0)) { if (likely((e->flags & EE_WAS_ERROR) == 0)) {
drbd_set_in_sync(mdev, sector, e->size); drbd_set_in_sync(mdev, sector, e->size);
...@@ -1482,7 +1482,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi ...@@ -1482,7 +1482,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
return false; return false;
} }
/* hlist_del(&req->colision) is done in _req_may_be_done, to avoid /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
* special casing it there for the various failure cases. * special casing it there for the various failure cases.
* still no race with drbd_fail_pending_reads */ * still no race with drbd_fail_pending_reads */
ok = recv_dless_read(mdev, req, sector, data_size); ok = recv_dless_read(mdev, req, sector, data_size);
...@@ -1553,11 +1553,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1553,11 +1553,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (mdev->net_conf->two_primaries) { if (mdev->net_conf->two_primaries) {
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
D_ASSERT(!hlist_unhashed(&e->colision)); D_ASSERT(!hlist_unhashed(&e->collision));
hlist_del_init(&e->colision); hlist_del_init(&e->collision);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
} else { } else {
D_ASSERT(hlist_unhashed(&e->colision)); D_ASSERT(hlist_unhashed(&e->collision));
} }
drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
...@@ -1574,8 +1574,8 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u ...@@ -1574,8 +1574,8 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
D_ASSERT(!hlist_unhashed(&e->colision)); D_ASSERT(!hlist_unhashed(&e->collision));
hlist_del_init(&e->colision); hlist_del_init(&e->collision);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
dec_unacked(mdev); dec_unacked(mdev);
...@@ -1750,7 +1750,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ...@@ -1750,7 +1750,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
#define OVERLAPS overlaps(i->sector, i->size, sector, size) #define OVERLAPS overlaps(i->sector, i->size, sector, size)
slot = tl_hash_slot(mdev, sector); slot = tl_hash_slot(mdev, sector);
...@@ -1760,7 +1760,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ...@@ -1760,7 +1760,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
int have_conflict = 0; int have_conflict = 0;
prepare_to_wait(&mdev->misc_wait, &wait, prepare_to_wait(&mdev->misc_wait, &wait,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
hlist_for_each_entry(i, n, slot, colision) { hlist_for_each_entry(i, n, slot, collision) {
if (OVERLAPS) { if (OVERLAPS) {
/* only ALERT on first iteration, /* only ALERT on first iteration,
* we may be woken up early... */ * we may be woken up early... */
...@@ -1799,7 +1799,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ...@@ -1799,7 +1799,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
} }
if (signal_pending(current)) { if (signal_pending(current)) {
hlist_del_init(&e->colision); hlist_del_init(&e->collision);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
...@@ -1857,7 +1857,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ...@@ -1857,7 +1857,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dev_err(DEV, "submit failed, triggering re-connect\n"); dev_err(DEV, "submit failed, triggering re-connect\n");
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
list_del(&e->w.list); list_del(&e->w.list);
hlist_del_init(&e->colision); hlist_del_init(&e->collision);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
if (e->flags & EE_CALL_AL_COMPLETE_IO) if (e->flags & EE_CALL_AL_COMPLETE_IO)
drbd_al_complete_io(mdev, e->sector); drbd_al_complete_io(mdev, e->sector);
...@@ -2988,7 +2988,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ...@@ -2988,7 +2988,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
ddsf = be16_to_cpu(p->dds_flags); ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) { if (get_ldev(mdev)) {
dd = drbd_determin_dev_size(mdev, ddsf); dd = drbd_determine_dev_size(mdev, ddsf);
put_ldev(mdev); put_ldev(mdev);
if (dd == dev_size_error) if (dd == dev_size_error)
return false; return false;
...@@ -4261,7 +4261,7 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, ...@@ -4261,7 +4261,7 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
struct hlist_node *n; struct hlist_node *n;
struct drbd_request *req; struct drbd_request *req;
hlist_for_each_entry(req, n, slot, colision) { hlist_for_each_entry(req, n, slot, collision) {
if ((unsigned long)req == (unsigned long)id) { if ((unsigned long)req == (unsigned long)id) {
if (req->sector != sector) { if (req->sector != sector) {
dev_err(DEV, "_ack_id_to_req: found req %p but it has " dev_err(DEV, "_ack_id_to_req: found req %p but it has "
......
...@@ -163,7 +163,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, ...@@ -163,7 +163,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
* they must have been failed on the spot */ * they must have been failed on the spot */
#define OVERLAPS overlaps(sector, size, i->sector, i->size) #define OVERLAPS overlaps(sector, size, i->sector, i->size)
slot = tl_hash_slot(mdev, sector); slot = tl_hash_slot(mdev, sector);
hlist_for_each_entry(i, n, slot, colision) { hlist_for_each_entry(i, n, slot, collision) {
if (OVERLAPS) { if (OVERLAPS) {
dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; " dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
"other: %p %llus +%u\n", "other: %p %llus +%u\n",
...@@ -187,7 +187,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, ...@@ -187,7 +187,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
#undef OVERLAPS #undef OVERLAPS
#define OVERLAPS overlaps(sector, size, e->sector, e->size) #define OVERLAPS overlaps(sector, size, e->sector, e->size)
slot = ee_hash_slot(mdev, req->sector); slot = ee_hash_slot(mdev, req->sector);
hlist_for_each_entry(e, n, slot, colision) { hlist_for_each_entry(e, n, slot, collision) {
if (OVERLAPS) { if (OVERLAPS) {
wake_up(&mdev->misc_wait); wake_up(&mdev->misc_wait);
break; break;
...@@ -260,8 +260,8 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) ...@@ -260,8 +260,8 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
/* remove the request from the conflict detection /* remove the request from the conflict detection
* respective block_id verification hash */ * respective block_id verification hash */
if (!hlist_unhashed(&req->colision)) if (!hlist_unhashed(&req->collision))
hlist_del(&req->colision); hlist_del(&req->collision);
else else
D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
...@@ -329,7 +329,7 @@ static int _req_conflicts(struct drbd_request *req) ...@@ -329,7 +329,7 @@ static int _req_conflicts(struct drbd_request *req)
struct hlist_node *n; struct hlist_node *n;
struct hlist_head *slot; struct hlist_head *slot;
D_ASSERT(hlist_unhashed(&req->colision)); D_ASSERT(hlist_unhashed(&req->collision));
if (!get_net_conf(mdev)) if (!get_net_conf(mdev))
return 0; return 0;
...@@ -341,7 +341,7 @@ static int _req_conflicts(struct drbd_request *req) ...@@ -341,7 +341,7 @@ static int _req_conflicts(struct drbd_request *req)
#define OVERLAPS overlaps(i->sector, i->size, sector, size) #define OVERLAPS overlaps(i->sector, i->size, sector, size)
slot = tl_hash_slot(mdev, sector); slot = tl_hash_slot(mdev, sector);
hlist_for_each_entry(i, n, slot, colision) { hlist_for_each_entry(i, n, slot, collision) {
if (OVERLAPS) { if (OVERLAPS) {
dev_alert(DEV, "%s[%u] Concurrent local write detected! " dev_alert(DEV, "%s[%u] Concurrent local write detected! "
"[DISCARD L] new: %llus +%u; " "[DISCARD L] new: %llus +%u; "
...@@ -359,7 +359,7 @@ static int _req_conflicts(struct drbd_request *req) ...@@ -359,7 +359,7 @@ static int _req_conflicts(struct drbd_request *req)
#undef OVERLAPS #undef OVERLAPS
#define OVERLAPS overlaps(e->sector, e->size, sector, size) #define OVERLAPS overlaps(e->sector, e->size, sector, size)
slot = ee_hash_slot(mdev, sector); slot = ee_hash_slot(mdev, sector);
hlist_for_each_entry(e, n, slot, colision) { hlist_for_each_entry(e, n, slot, collision) {
if (OVERLAPS) { if (OVERLAPS) {
dev_alert(DEV, "%s[%u] Concurrent remote write detected!" dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
" [DISCARD L] new: %llus +%u; " " [DISCARD L] new: %llus +%u; "
...@@ -491,7 +491,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -491,7 +491,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* so we can verify the handle in the answer packet /* so we can verify the handle in the answer packet
* corresponding hlist_del is in _req_may_be_done() */ * corresponding hlist_del is in _req_may_be_done() */
hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector)); hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
set_bit(UNPLUG_REMOTE, &mdev->flags); set_bit(UNPLUG_REMOTE, &mdev->flags);
...@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* assert something? */ /* assert something? */
/* from drbd_make_request_common only */ /* from drbd_make_request_common only */
hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector)); hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
/* corresponding hlist_del is in _req_may_be_done() */ /* corresponding hlist_del is in _req_may_be_done() */
/* NOTE /* NOTE
......
...@@ -256,7 +256,7 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, ...@@ -256,7 +256,7 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
struct hlist_node *n; struct hlist_node *n;
struct drbd_request *req; struct drbd_request *req;
hlist_for_each_entry(req, n, slot, colision) { hlist_for_each_entry(req, n, slot, collision) {
if ((unsigned long)req == (unsigned long)id) { if ((unsigned long)req == (unsigned long)id) {
D_ASSERT(req->sector == sector); D_ASSERT(req->sector == sector);
return req; return req;
...@@ -291,7 +291,7 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, ...@@ -291,7 +291,7 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
req->epoch = 0; req->epoch = 0;
req->sector = bio_src->bi_sector; req->sector = bio_src->bi_sector;
req->size = bio_src->bi_size; req->size = bio_src->bi_size;
INIT_HLIST_NODE(&req->colision); INIT_HLIST_NODE(&req->collision);
INIT_LIST_HEAD(&req->tl_requests); INIT_LIST_HEAD(&req->tl_requests);
INIT_LIST_HEAD(&req->w.list); INIT_LIST_HEAD(&req->w.list);
} }
......
...@@ -126,7 +126,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo ...@@ -126,7 +126,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
list_del(&e->w.list); /* has been on active_ee or sync_ee */ list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee); list_add_tail(&e->w.list, &mdev->done_ee);
/* No hlist_del_init(&e->colision) here, we did not send the Ack yet, /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
* neither did we wake possibly waiting conflicting requests. * neither did we wake possibly waiting conflicting requests.
* done from "drbd_process_done_ee" within the appropriate w.cb * done from "drbd_process_done_ee" within the appropriate w.cb
* (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */ * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
...@@ -840,7 +840,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) ...@@ -840,7 +840,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
const int ratio = const int ratio =
(t == 0) ? 0 : (t == 0) ? 0 :
(t < 100000) ? ((s*100)/t) : (s/(t/100)); (t < 100000) ? ((s*100)/t) : (s/(t/100));
dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; " dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
"transferred %luK total %luK\n", "transferred %luK total %luK\n",
ratio, ratio,
Bit2KB(mdev->rs_same_csum), Bit2KB(mdev->rs_same_csum),
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
/* Although the Linux source code makes a difference between /* Although the Linux source code makes a difference between
generic endianness and the bitfields' endianness, there is no generic endianness and the bitfields' endianness, there is no
architecture as of Linux-2.6.24-rc4 where the bitfileds' endianness architecture as of Linux-2.6.24-rc4 where the bitfields' endianness
does not match the generic endianness. */ does not match the generic endianness. */
#if __BYTE_ORDER == __LITTLE_ENDIAN #if __BYTE_ORDER == __LITTLE_ENDIAN
...@@ -195,7 +195,7 @@ enum drbd_conns { ...@@ -195,7 +195,7 @@ enum drbd_conns {
C_WF_REPORT_PARAMS, /* we have a socket */ C_WF_REPORT_PARAMS, /* we have a socket */
C_CONNECTED, /* we have introduced each other */ C_CONNECTED, /* we have introduced each other */
C_STARTING_SYNC_S, /* starting full sync by admin request. */ C_STARTING_SYNC_S, /* starting full sync by admin request. */
C_STARTING_SYNC_T, /* stariing full sync by admin request. */ C_STARTING_SYNC_T, /* starting full sync by admin request. */
C_WF_BITMAP_S, C_WF_BITMAP_S,
C_WF_BITMAP_T, C_WF_BITMAP_T,
C_WF_SYNC_UUID, C_WF_SYNC_UUID,
...@@ -236,7 +236,7 @@ union drbd_state { ...@@ -236,7 +236,7 @@ union drbd_state {
* pointed out by Maxim Uvarov q<muvarov@ru.mvista.com> * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com>
* even though we transmit as "cpu_to_be32(state)", * even though we transmit as "cpu_to_be32(state)",
* the offsets of the bitfields still need to be swapped * the offsets of the bitfields still need to be swapped
* on different endianess. * on different endianness.
*/ */
struct { struct {
#if defined(__LITTLE_ENDIAN_BITFIELD) #if defined(__LITTLE_ENDIAN_BITFIELD)
...@@ -266,7 +266,7 @@ union drbd_state { ...@@ -266,7 +266,7 @@ union drbd_state {
unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
unsigned role:2 ; /* 3/4 primary/secondary/unknown */ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
#else #else
# error "this endianess is not supported" # error "this endianness is not supported"
#endif #endif
}; };
unsigned int i; unsigned int i;
......
...@@ -30,7 +30,7 @@ enum packet_types { ...@@ -30,7 +30,7 @@ enum packet_types {
int tag_and_len ## member; int tag_and_len ## member;
#include "linux/drbd_nl.h" #include "linux/drbd_nl.h"
/* declate tag-list-sizes */ /* declare tag-list-sizes */
static const int tag_list_sizes[] = { static const int tag_list_sizes[] = {
#define NL_PACKET(name, number, fields) 2 fields , #define NL_PACKET(name, number, fields) 2 fields ,
#define NL_INTEGER(pn, pr, member) + 4 + 4 #define NL_INTEGER(pn, pr, member) + 4 + 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment