Commit 81e84650 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner

drbd: Use the standard bool, true, and false keywords

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 6184ea21
......@@ -338,7 +338,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+ mdev->ldev->md.al_offset + mdev->al_tr_pos;
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
drbd_chk_io_error(mdev, 1, TRUE);
drbd_chk_io_error(mdev, 1, true);
if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
......@@ -528,7 +528,7 @@ static void atodb_endio(struct bio *bio, int error)
if (!error && !uptodate)
error = -EIO;
drbd_chk_io_error(mdev, error, TRUE);
drbd_chk_io_error(mdev, error, true);
if (error && wc->error == 0)
wc->error = error;
......@@ -991,7 +991,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
if (count && get_ldev(mdev)) {
drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
spin_lock_irqsave(&mdev->al_lock, flags);
drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
drbd_try_clear_on_disk_bm(mdev, sector, count, true);
spin_unlock_irqrestore(&mdev->al_lock, flags);
/* just wake_up unconditional now, various lc_chaged(),
......@@ -1441,7 +1441,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
mdev->rs_failed += count;
if (get_ldev(mdev)) {
drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
drbd_try_clear_on_disk_bm(mdev, sector, count, false);
put_ldev(mdev);
}
......
......@@ -844,7 +844,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, TRUE);
drbd_chk_io_error(mdev, 1, true);
err = -EIO;
}
......@@ -916,7 +916,7 @@ int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(lo
dev_err(DEV, "IO ERROR writing bitmap sector %lu "
"(meta-disk sector %llus)\n",
enr, (unsigned long long)on_disk_sector);
drbd_chk_io_error(mdev, 1, TRUE);
drbd_chk_io_error(mdev, 1, true);
for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
}
......
......@@ -72,13 +72,6 @@ extern int fault_devs;
extern char usermode_helper[];
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
/* I don't remember why XCPU ...
* This is used to wake the asender,
* and to interrupt sending the sending task
......@@ -2002,17 +1995,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
static inline void drbd_thread_stop(struct drbd_thread *thi)
{
_drbd_thread_stop(thi, FALSE, TRUE);
_drbd_thread_stop(thi, false, true);
}
static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
{
_drbd_thread_stop(thi, FALSE, FALSE);
_drbd_thread_stop(thi, false, false);
}
static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
{
_drbd_thread_stop(thi, TRUE, FALSE);
_drbd_thread_stop(thi, true, false);
}
/* counts how many answer packets packets we expect from our peer,
......
......@@ -456,7 +456,7 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
}
/**
* cl_wide_st_chg() - TRUE if the state change is a cluster wide one
* cl_wide_st_chg() - true if the state change is a cluster wide one
* @mdev: DRBD device.
* @os: old (current) state.
* @ns: new (wanted) state.
......@@ -1623,7 +1623,7 @@ int drbd_thread_start(struct drbd_thread *thi)
if (!try_module_get(THIS_MODULE)) {
dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags);
return FALSE;
return false;
}
init_completion(&thi->stop);
......@@ -1640,7 +1640,7 @@ int drbd_thread_start(struct drbd_thread *thi)
dev_err(DEV, "Couldn't start thread\n");
module_put(THIS_MODULE);
return FALSE;
return false;
}
spin_lock_irqsave(&thi->t_lock, flags);
thi->task = nt;
......@@ -1660,7 +1660,7 @@ int drbd_thread_start(struct drbd_thread *thi)
break;
}
return TRUE;
return true;
}
......@@ -1758,8 +1758,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
{
int sent, ok;
ERR_IF(!h) return FALSE;
ERR_IF(!size) return FALSE;
ERR_IF(!h) return false;
ERR_IF(!size) return false;
h->magic = BE_DRBD_MAGIC;
h->command = cpu_to_be16(cmd);
......@@ -2196,14 +2196,14 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
struct p_header80 *p;
int ret;
ERR_IF(!mdev->bitmap) return FALSE;
ERR_IF(!mdev->bitmap) return false;
/* maybe we should use some per thread scratch page,
* and allocate that during initial device creation? */
p = (struct p_header80 *) __get_free_page(GFP_NOIO);
if (!p) {
dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
return FALSE;
return false;
}
if (get_ldev(mdev)) {
......@@ -2256,7 +2256,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
p.set_size = cpu_to_be32(set_size);
if (mdev->state.conn < C_CONNECTED)
return FALSE;
return false;
ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
(struct p_header80 *)&p, sizeof(p));
return ok;
......@@ -2284,7 +2284,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
return FALSE;
return false;
ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
(struct p_header80 *)&p, sizeof(p));
return ok;
......@@ -2390,8 +2390,8 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
}
/* called on sndtimeo
* returns FALSE if we should retry,
* TRUE if we think connection is dead
* returns false if we should retry,
* true if we think connection is dead
*/
static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
{
......@@ -2404,7 +2404,7 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
|| mdev->state.conn < C_CONNECTED;
if (drop_it)
return TRUE;
return true;
drop_it = !--mdev->ko_count;
if (!drop_it) {
......@@ -3283,7 +3283,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
goto out_no_disk;
mdev->vdisk = disk;
set_disk_ro(disk, TRUE);
set_disk_ro(disk, true);
disk->queue = q;
disk->major = DRBD_MAJOR;
......@@ -3560,7 +3560,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, TRUE);
drbd_chk_io_error(mdev, 1, true);
}
/* Update mdev->ldev->md.la_size_sect,
......
......@@ -385,7 +385,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, TRUE);
set_disk_ro(mdev->vdisk, true);
if (get_ldev(mdev)) {
mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
put_ldev(mdev);
......@@ -395,7 +395,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
mdev->net_conf->want_lose = 0;
put_net_conf(mdev);
}
set_disk_ro(mdev->vdisk, FALSE);
set_disk_ro(mdev->vdisk, false);
if (get_ldev(mdev)) {
if (((mdev->state.conn < C_CONNECTED ||
mdev->state.pdsk <= D_FAILED)
......
This diff is collapsed.
......@@ -445,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
__drbd_chk_io_error(mdev, FALSE);
__drbd_chk_io_error(mdev, false);
_req_may_be_done_not_susp(req, m);
put_ldev(mdev);
break;
......@@ -466,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(!(req->rq_state & RQ_NET_MASK));
__drbd_chk_io_error(mdev, FALSE);
__drbd_chk_io_error(mdev, false);
put_ldev(mdev);
/* no point in retrying if there is no good remote data,
......
......@@ -96,7 +96,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, FALSE);
__drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w);
......@@ -139,7 +139,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
: list_empty(&mdev->active_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags))
__drbd_chk_io_error(mdev, FALSE);
__drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->req_lock, flags);
if (is_syncer_req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment