Commit 8554df1c authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner

drbd: Convert all constants in enum drbd_req_event to upper case

Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent bb3bfe96
...@@ -2031,21 +2031,21 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) ...@@ -2031,21 +2031,21 @@ static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
* or implicit barrier packets as necessary. * or implicit barrier packets as necessary.
* increased: * increased:
* w_send_barrier * w_send_barrier
* _req_mod(req, queue_for_net_write or queue_for_net_read); * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
* it is much easier and equally valid to count what we queue for the * it is much easier and equally valid to count what we queue for the
* worker, even before it actually was queued or send. * worker, even before it actually was queued or send.
* (drbd_make_request_common; recovery path on read io-error) * (drbd_make_request_common; recovery path on read io-error)
* decreased: * decreased:
* got_BarrierAck (respective tl_clear, tl_clear_barrier) * got_BarrierAck (respective tl_clear, tl_clear_barrier)
* _req_mod(req, data_received) * _req_mod(req, DATA_RECEIVED)
* [from receive_DataReply] * [from receive_DataReply]
* _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked) * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
* [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
* for some reason it is NOT decreased in got_NegAck, * for some reason it is NOT decreased in got_NegAck,
* but in the resulting cleanup code from report_params. * but in the resulting cleanup code from report_params.
* we should try to remember the reason for that... * we should try to remember the reason for that...
* _req_mod(req, send_failed or send_canceled) * _req_mod(req, SEND_FAILED or SEND_CANCELED)
* _req_mod(req, connection_lost_while_pending) * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
* [from tl_clear_barrier] * [from tl_clear_barrier]
*/ */
static inline void inc_ap_pending(struct drbd_conf *mdev) static inline void inc_ap_pending(struct drbd_conf *mdev)
......
...@@ -290,7 +290,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, ...@@ -290,7 +290,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
/* Clean up list of requests processed during current epoch */ /* Clean up list of requests processed during current epoch */
list_for_each_safe(le, tle, &b->requests) { list_for_each_safe(le, tle, &b->requests) {
r = list_entry(le, struct drbd_request, tl_requests); r = list_entry(le, struct drbd_request, tl_requests);
_req_mod(r, barrier_acked); _req_mod(r, BARRIER_ACKED);
} }
/* There could be requests on the list waiting for completion /* There could be requests on the list waiting for completion
of the write to the local disk. To avoid corruptions of of the write to the local disk. To avoid corruptions of
...@@ -300,10 +300,10 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, ...@@ -300,10 +300,10 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
the write acks - which would be a bug and violating write ordering. the write acks - which would be a bug and violating write ordering.
To not deadlock in case we lose connection while such requests are To not deadlock in case we lose connection while such requests are
still pending, we need some way to find them for the still pending, we need some way to find them for the
_req_mode(connection_lost_while_pending). _req_mode(CONNECTION_LOST_WHILE_PENDING).
These have been list_move'd to the out_of_sequence_requests list in These have been list_move'd to the out_of_sequence_requests list in
_req_mod(, barrier_acked) above. _req_mod(, BARRIER_ACKED) above.
*/ */
list_del_init(&b->requests); list_del_init(&b->requests);
...@@ -336,8 +336,8 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, ...@@ -336,8 +336,8 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
* @mdev: DRBD device. * @mdev: DRBD device.
* @what: The action/event to perform with all request objects * @what: The action/event to perform with all request objects
* *
* @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io, * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
* restart_frozen_disk_io. * RESTART_FROZEN_DISK_IO.
*/ */
static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
{ {
...@@ -362,7 +362,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) ...@@ -362,7 +362,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
tmp = b->next; tmp = b->next;
if (n_writes) { if (n_writes) {
if (what == resend) { if (what == RESEND) {
b->n_writes = n_writes; b->n_writes = n_writes;
if (b->w.cb == NULL) { if (b->w.cb == NULL) {
b->w.cb = w_send_barrier; b->w.cb = w_send_barrier;
...@@ -423,7 +423,7 @@ void tl_clear(struct drbd_conf *mdev) ...@@ -423,7 +423,7 @@ void tl_clear(struct drbd_conf *mdev)
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
_tl_restart(mdev, connection_lost_while_pending); _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
/* we expect this list to be empty. */ /* we expect this list to be empty. */
D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
...@@ -433,7 +433,7 @@ void tl_clear(struct drbd_conf *mdev) ...@@ -433,7 +433,7 @@ void tl_clear(struct drbd_conf *mdev)
r = list_entry(le, struct drbd_request, tl_requests); r = list_entry(le, struct drbd_request, tl_requests);
/* It would be nice to complete outside of spinlock. /* It would be nice to complete outside of spinlock.
* But this is easier for now. */ * But this is easier for now. */
_req_mod(r, connection_lost_while_pending); _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
} }
/* ensure bit indicating barrier is required is clear */ /* ensure bit indicating barrier is required is clear */
...@@ -1321,7 +1321,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1321,7 +1321,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags) union drbd_state ns, enum chg_state_flags flags)
{ {
enum drbd_fencing_p fp; enum drbd_fencing_p fp;
enum drbd_req_event what = nothing; enum drbd_req_event what = NOTHING;
union drbd_state nsm = (union drbd_state){ .i = -1 }; union drbd_state nsm = (union drbd_state){ .i = -1 };
if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
...@@ -1349,12 +1349,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1349,12 +1349,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
nsm.i = -1; nsm.i = -1;
if (ns.susp_nod) { if (ns.susp_nod) {
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
what = resend; what = RESEND;
if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
what = restart_frozen_disk_io; what = RESTART_FROZEN_DISK_IO;
if (what != nothing) if (what != NOTHING)
nsm.susp_nod = 0; nsm.susp_nod = 0;
} }
...@@ -1373,12 +1373,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1373,12 +1373,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* case2: The connection was established again: */ /* case2: The connection was established again: */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
clear_bit(NEW_CUR_UUID, &mdev->flags); clear_bit(NEW_CUR_UUID, &mdev->flags);
what = resend; what = RESEND;
nsm.susp_fen = 0; nsm.susp_fen = 0;
} }
} }
if (what != nothing) { if (what != NOTHING) {
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
_tl_restart(mdev, what); _tl_restart(mdev, what);
nsm.i &= mdev->state.i; nsm.i &= mdev->state.i;
......
...@@ -2022,7 +2022,7 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -2022,7 +2022,7 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (mdev->state.conn < C_CONNECTED) if (mdev->state.conn < C_CONNECTED)
tl_clear(mdev); tl_clear(mdev);
if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
tl_restart(mdev, fail_frozen_disk_io); tl_restart(mdev, FAIL_FROZEN_DISK_IO);
} }
drbd_resume_io(mdev); drbd_resume_io(mdev);
......
...@@ -385,7 +385,7 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) ...@@ -385,7 +385,7 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
/* /*
* This function is called from _asender only_ * This function is called from _asender only_
* but see also comments in _req_mod(,barrier_acked) * but see also comments in _req_mod(,BARRIER_ACKED)
* and receive_Barrier. * and receive_Barrier.
* *
* Move entries from net_ee to done_ee, if ready. * Move entries from net_ee to done_ee, if ready.
...@@ -1507,7 +1507,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi ...@@ -1507,7 +1507,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
ok = recv_dless_read(mdev, req, sector, data_size); ok = recv_dless_read(mdev, req, sector, data_size);
if (ok) if (ok)
req_mod(req, data_received); req_mod(req, DATA_RECEIVED);
/* else: nothing. handled from drbd_disconnect... /* else: nothing. handled from drbd_disconnect...
* I don't think we may complete this just yet * I don't think we may complete this just yet
* in case we are "on-disconnect: freeze" */ * in case we are "on-disconnect: freeze" */
...@@ -3279,7 +3279,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ...@@ -3279,7 +3279,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
test_bit(NEW_CUR_UUID, &mdev->flags)) { test_bit(NEW_CUR_UUID, &mdev->flags)) {
/* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */ for temporal network outages! */
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
...@@ -4272,19 +4272,19 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) ...@@ -4272,19 +4272,19 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
switch (be16_to_cpu(h->command)) { switch (be16_to_cpu(h->command)) {
case P_RS_WRITE_ACK: case P_RS_WRITE_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
what = write_acked_by_peer_and_sis; what = WRITE_ACKED_BY_PEER_AND_SIS;
break; break;
case P_WRITE_ACK: case P_WRITE_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
what = write_acked_by_peer; what = WRITE_ACKED_BY_PEER;
break; break;
case P_RECV_ACK: case P_RECV_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B); D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
what = recv_acked_by_peer; what = RECV_ACKED_BY_PEER;
break; break;
case P_DISCARD_ACK: case P_DISCARD_ACK:
D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
what = conflict_discarded_by_peer; what = CONFLICT_DISCARDED_BY_PEER;
break; break;
default: default:
D_ASSERT(0); D_ASSERT(0);
...@@ -4315,7 +4315,7 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) ...@@ -4315,7 +4315,7 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
found = validate_req_change_req_state(mdev, p->block_id, sector, found = validate_req_change_req_state(mdev, p->block_id, sector,
&mdev->write_requests, __func__, &mdev->write_requests, __func__,
neg_acked, missing_ok); NEG_ACKED, missing_ok);
if (!found) { if (!found) {
/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
The master bio might already be completed, therefore the The master bio might already be completed, therefore the
...@@ -4340,7 +4340,7 @@ static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) ...@@ -4340,7 +4340,7 @@ static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
return validate_req_change_req_state(mdev, p->block_id, sector, return validate_req_change_req_state(mdev, p->block_id, sector,
&mdev->read_requests, __func__, &mdev->read_requests, __func__,
neg_acked, false); NEG_ACKED, false);
} }
static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
......
...@@ -225,10 +225,10 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) ...@@ -225,10 +225,10 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
return; return;
if (req->master_bio) { if (req->master_bio) {
/* this is data_received (remote read) /* this is DATA_RECEIVED (remote read)
* or protocol C P_WRITE_ACK * or protocol C P_WRITE_ACK
* or protocol B P_RECV_ACK * or protocol B P_RECV_ACK
* or protocol A "handed_over_to_network" (SendAck) * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck)
* or canceled or failed, * or canceled or failed,
* or killed from the transfer log due to connection loss. * or killed from the transfer log due to connection loss.
*/ */
...@@ -393,11 +393,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -393,11 +393,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* does not happen... /* does not happen...
* initialization done in drbd_req_new * initialization done in drbd_req_new
case created: case CREATED:
break; break;
*/ */
case to_be_send: /* via network */ case TO_BE_SENT: /* via network */
/* reached via drbd_make_request_common /* reached via drbd_make_request_common
* and from w_read_retry_remote */ * and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK)); D_ASSERT(!(req->rq_state & RQ_NET_MASK));
...@@ -405,13 +405,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -405,13 +405,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
inc_ap_pending(mdev); inc_ap_pending(mdev);
break; break;
case to_be_submitted: /* locally */ case TO_BE_SUBMITTED: /* locally */
/* reached via drbd_make_request_common */ /* reached via drbd_make_request_common */
D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK)); D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
req->rq_state |= RQ_LOCAL_PENDING; req->rq_state |= RQ_LOCAL_PENDING;
break; break;
case completed_ok: case COMPLETED_OK:
if (bio_data_dir(req->master_bio) == WRITE) if (bio_data_dir(req->master_bio) == WRITE)
mdev->writ_cnt += req->i.size >> 9; mdev->writ_cnt += req->i.size >> 9;
else else
...@@ -424,7 +424,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -424,7 +424,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
put_ldev(mdev); put_ldev(mdev);
break; break;
case write_completed_with_error: case WRITE_COMPLETED_WITH_ERROR:
req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING; req->rq_state &= ~RQ_LOCAL_PENDING;
...@@ -433,7 +433,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -433,7 +433,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
put_ldev(mdev); put_ldev(mdev);
break; break;
case read_ahead_completed_with_error: case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA */ /* it is legal to fail READA */
req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING; req->rq_state &= ~RQ_LOCAL_PENDING;
...@@ -441,7 +441,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -441,7 +441,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
put_ldev(mdev); put_ldev(mdev);
break; break;
case read_completed_with_error: case READ_COMPLETED_WITH_ERROR:
drbd_set_out_of_sync(mdev, req->i.sector, req->i.size); drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state |= RQ_LOCAL_COMPLETED;
...@@ -459,12 +459,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -459,12 +459,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
} }
/* _req_mod(req,to_be_send); oops, recursion... */ /* _req_mod(req,TO_BE_SENT); oops, recursion... */
req->rq_state |= RQ_NET_PENDING; req->rq_state |= RQ_NET_PENDING;
inc_ap_pending(mdev); inc_ap_pending(mdev);
/* fall through: _req_mod(req,queue_for_net_read); */ /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */
case queue_for_net_read: case QUEUE_FOR_NET_READ:
/* READ or READA, and /* READ or READA, and
* no local disk, * no local disk,
* or target area marked as invalid, * or target area marked as invalid,
...@@ -486,7 +486,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -486,7 +486,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->data.work, &req->w); drbd_queue_work(&mdev->data.work, &req->w);
break; break;
case queue_for_net_write: case QUEUE_FOR_NET_WRITE:
/* assert something? */ /* assert something? */
/* from drbd_make_request_common only */ /* from drbd_make_request_common only */
...@@ -533,17 +533,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -533,17 +533,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
case queue_for_send_oos: case QUEUE_FOR_SEND_OOS:
req->rq_state |= RQ_NET_QUEUED; req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_oos; req->w.cb = w_send_oos;
drbd_queue_work(&mdev->data.work, &req->w); drbd_queue_work(&mdev->data.work, &req->w);
break; break;
case oos_handed_to_network: case OOS_HANDED_TO_NETWORK:
/* actually the same */ /* actually the same */
case send_canceled: case SEND_CANCELED:
/* treat it the same */ /* treat it the same */
case send_failed: case SEND_FAILED:
/* real cleanup will be done from tl_clear. just update flags /* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */ * so it is no longer marked as on the worker queue */
req->rq_state &= ~RQ_NET_QUEUED; req->rq_state &= ~RQ_NET_QUEUED;
...@@ -552,7 +552,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -552,7 +552,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
break; break;
case handed_over_to_network: case HANDED_OVER_TO_NETWORK:
/* assert something? */ /* assert something? */
if (bio_data_dir(req->master_bio) == WRITE) if (bio_data_dir(req->master_bio) == WRITE)
atomic_add(req->i.size >> 9, &mdev->ap_in_flight); atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
...@@ -573,17 +573,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -573,17 +573,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~RQ_NET_QUEUED; req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT; req->rq_state |= RQ_NET_SENT;
/* because _drbd_send_zc_bio could sleep, and may want to /* because _drbd_send_zc_bio could sleep, and may want to
* dereference the bio even after the "write_acked_by_peer" and * dereference the bio even after the "WRITE_ACKED_BY_PEER" and
* "completed_ok" events came in, once we return from * "COMPLETED_OK" events came in, once we return from
* _drbd_send_zc_bio (drbd_send_dblock), we have to check * _drbd_send_zc_bio (drbd_send_dblock), we have to check
* whether it is done already, and end it. */ * whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
break; break;
case read_retry_remote_canceled: case READ_RETRY_REMOTE_CANCELED:
req->rq_state &= ~RQ_NET_QUEUED; req->rq_state &= ~RQ_NET_QUEUED;
/* fall through, in case we raced with drbd_disconnect */ /* fall through, in case we raced with drbd_disconnect */
case connection_lost_while_pending: case CONNECTION_LOST_WHILE_PENDING:
/* transfer log cleanup after connection loss */ /* transfer log cleanup after connection loss */
/* assert something? */ /* assert something? */
if (req->rq_state & RQ_NET_PENDING) if (req->rq_state & RQ_NET_PENDING)
...@@ -599,19 +599,19 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -599,19 +599,19 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done(req, m); /* Allowed while state.susp */ _req_may_be_done(req, m); /* Allowed while state.susp */
break; break;
case write_acked_by_peer_and_sis: case WRITE_ACKED_BY_PEER_AND_SIS:
req->rq_state |= RQ_NET_SIS; req->rq_state |= RQ_NET_SIS;
case conflict_discarded_by_peer: case CONFLICT_DISCARDED_BY_PEER:
/* for discarded conflicting writes of multiple primaries, /* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential * there is no need to keep anything in the tl, potential
* node crashes are covered by the activity log. */ * node crashes are covered by the activity log. */
if (what == conflict_discarded_by_peer) if (what == CONFLICT_DISCARDED_BY_PEER)
dev_alert(DEV, "Got DiscardAck packet %llus +%u!" dev_alert(DEV, "Got DiscardAck packet %llus +%u!"
" DRBD is not a random data generator!\n", " DRBD is not a random data generator!\n",
(unsigned long long)req->i.sector, req->i.size); (unsigned long long)req->i.sector, req->i.size);
req->rq_state |= RQ_NET_DONE; req->rq_state |= RQ_NET_DONE;
/* fall through */ /* fall through */
case write_acked_by_peer: case WRITE_ACKED_BY_PEER:
/* protocol C; successfully written on peer. /* protocol C; successfully written on peer.
* Nothing to do here. * Nothing to do here.
* We want to keep the tl in place for all protocols, to cater * We want to keep the tl in place for all protocols, to cater
...@@ -623,9 +623,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -623,9 +623,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* P_BARRIER_ACK, but that is an unnecessary optimization. */ * P_BARRIER_ACK, but that is an unnecessary optimization. */
/* this makes it effectively the same as for: */ /* this makes it effectively the same as for: */
case recv_acked_by_peer: case RECV_ACKED_BY_PEER:
/* protocol B; pretends to be successfully written on peer. /* protocol B; pretends to be successfully written on peer.
* see also notes above in handed_over_to_network about * see also notes above in HANDED_OVER_TO_NETWORK about
* protocol != C */ * protocol != C */
req->rq_state |= RQ_NET_OK; req->rq_state |= RQ_NET_OK;
D_ASSERT(req->rq_state & RQ_NET_PENDING); D_ASSERT(req->rq_state & RQ_NET_PENDING);
...@@ -635,7 +635,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -635,7 +635,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
break; break;
case neg_acked: case NEG_ACKED:
/* assert something? */ /* assert something? */
if (req->rq_state & RQ_NET_PENDING) { if (req->rq_state & RQ_NET_PENDING) {
dec_ap_pending(mdev); dec_ap_pending(mdev);
...@@ -645,17 +645,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -645,17 +645,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_NET_DONE; req->rq_state |= RQ_NET_DONE;
_req_may_be_done_not_susp(req, m); _req_may_be_done_not_susp(req, m);
/* else: done by handed_over_to_network */ /* else: done by HANDED_OVER_TO_NETWORK */
break; break;
case fail_frozen_disk_io: case FAIL_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED)) if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break; break;
_req_may_be_done(req, m); /* Allowed while state.susp */ _req_may_be_done(req, m); /* Allowed while state.susp */
break; break;
case restart_frozen_disk_io: case RESTART_FROZEN_DISK_IO:
if (!(req->rq_state & RQ_LOCAL_COMPLETED)) if (!(req->rq_state & RQ_LOCAL_COMPLETED))
break; break;
...@@ -670,7 +670,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -670,7 +670,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->data.work, &req->w); drbd_queue_work(&mdev->data.work, &req->w);
break; break;
case resend: case RESEND:
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
before the connection loss (B&C only); only P_BARRIER_ACK was missing. before the connection loss (B&C only); only P_BARRIER_ACK was missing.
Trowing them out of the TL here by pretending we got a BARRIER_ACK Trowing them out of the TL here by pretending we got a BARRIER_ACK
...@@ -682,9 +682,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -682,9 +682,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
} }
break; break;
} }
/* else, fall through to barrier_acked */ /* else, fall through to BARRIER_ACKED */
case barrier_acked: case BARRIER_ACKED:
if (!(req->rq_state & RQ_WRITE)) if (!(req->rq_state & RQ_WRITE))
break; break;
...@@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* barrier came in before all requests have been acked. /* barrier came in before all requests have been acked.
* this is bad, because if the connection is lost now, * this is bad, because if the connection is lost now,
* we won't be able to clean them up... */ * we won't be able to clean them up... */
dev_err(DEV, "FIXME (barrier_acked but pending)\n"); dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
list_move(&req->tl_requests, &mdev->out_of_sequence_requests); list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
} }
if ((req->rq_state & RQ_NET_MASK) != 0) { if ((req->rq_state & RQ_NET_MASK) != 0) {
...@@ -703,7 +703,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -703,7 +703,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done(req, m); /* Allowed while state.susp */ _req_may_be_done(req, m); /* Allowed while state.susp */
break; break;
case data_received: case DATA_RECEIVED:
D_ASSERT(req->rq_state & RQ_NET_PENDING); D_ASSERT(req->rq_state & RQ_NET_PENDING);
dec_ap_pending(mdev); dec_ap_pending(mdev);
req->rq_state &= ~RQ_NET_PENDING; req->rq_state &= ~RQ_NET_PENDING;
...@@ -924,9 +924,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -924,9 +924,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
/* mark them early for readability. /* mark them early for readability.
* this just sets some state flags. */ * this just sets some state flags. */
if (remote) if (remote)
_req_mod(req, to_be_send); _req_mod(req, TO_BE_SENT);
if (local) if (local)
_req_mod(req, to_be_submitted); _req_mod(req, TO_BE_SUBMITTED);
/* check this request on the collision detection hash tables. /* check this request on the collision detection hash tables.
* if we have a conflict, just complete it here. * if we have a conflict, just complete it here.
...@@ -944,11 +944,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns ...@@ -944,11 +944,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* or READ, but not in sync. * or READ, but not in sync.
*/ */
_req_mod(req, (rw == WRITE) _req_mod(req, (rw == WRITE)
? queue_for_net_write ? QUEUE_FOR_NET_WRITE
: queue_for_net_read); : QUEUE_FOR_NET_READ);
} }
if (send_oos && drbd_set_out_of_sync(mdev, sector, size)) if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
_req_mod(req, queue_for_send_oos); _req_mod(req, QUEUE_FOR_SEND_OOS);
if (remote && if (remote &&
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
......
...@@ -77,39 +77,39 @@ ...@@ -77,39 +77,39 @@
*/ */
enum drbd_req_event { enum drbd_req_event {
created, CREATED,
to_be_send, TO_BE_SENT,
to_be_submitted, TO_BE_SUBMITTED,
/* XXX yes, now I am inconsistent... /* XXX yes, now I am inconsistent...
* these are not "events" but "actions" * these are not "events" but "actions"
* oh, well... */ * oh, well... */
queue_for_net_write, QUEUE_FOR_NET_WRITE,
queue_for_net_read, QUEUE_FOR_NET_READ,
queue_for_send_oos, QUEUE_FOR_SEND_OOS,
send_canceled, SEND_CANCELED,
send_failed, SEND_FAILED,
handed_over_to_network, HANDED_OVER_TO_NETWORK,
oos_handed_to_network, OOS_HANDED_TO_NETWORK,
connection_lost_while_pending, CONNECTION_LOST_WHILE_PENDING,
read_retry_remote_canceled, READ_RETRY_REMOTE_CANCELED,
recv_acked_by_peer, RECV_ACKED_BY_PEER,
write_acked_by_peer, WRITE_ACKED_BY_PEER,
write_acked_by_peer_and_sis, /* and set_in_sync */ WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
conflict_discarded_by_peer, CONFLICT_DISCARDED_BY_PEER,
neg_acked, NEG_ACKED,
barrier_acked, /* in protocol A and B */ BARRIER_ACKED, /* in protocol A and B */
data_received, /* (remote read) */ DATA_RECEIVED, /* (remote read) */
read_completed_with_error, READ_COMPLETED_WITH_ERROR,
read_ahead_completed_with_error, READ_AHEAD_COMPLETED_WITH_ERROR,
write_completed_with_error, WRITE_COMPLETED_WITH_ERROR,
completed_ok, COMPLETED_OK,
resend, RESEND,
fail_frozen_disk_io, FAIL_FROZEN_DISK_IO,
restart_frozen_disk_io, RESTART_FROZEN_DISK_IO,
nothing, /* for tracing only */ NOTHING,
}; };
/* encoding of request states for now. we don't actually need that many bits. /* encoding of request states for now. we don't actually need that many bits.
...@@ -138,8 +138,8 @@ enum drbd_req_state_bits { ...@@ -138,8 +138,8 @@ enum drbd_req_state_bits {
* recv_ack (B) or implicit "ack" (A), * recv_ack (B) or implicit "ack" (A),
* still waiting for the barrier ack. * still waiting for the barrier ack.
* master_bio may already be completed and invalidated. * master_bio may already be completed and invalidated.
* 11100: write_acked (C), * 11100: write acked (C),
* data_received (for remote read, any protocol) * data received (for remote read, any protocol)
* or finally the barrier ack has arrived (B,A)... * or finally the barrier ack has arrived (B,A)...
* request can be freed * request can be freed
* 01100: neg-acked (write, protocol C) * 01100: neg-acked (write, protocol C)
......
...@@ -209,12 +209,12 @@ void drbd_endio_pri(struct bio *bio, int error) ...@@ -209,12 +209,12 @@ void drbd_endio_pri(struct bio *bio, int error)
/* to avoid recursion in __req_mod */ /* to avoid recursion in __req_mod */
if (unlikely(error)) { if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE) what = (bio_data_dir(bio) == WRITE)
? write_completed_with_error ? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ) : (bio_rw(bio) == READ)
? read_completed_with_error ? READ_COMPLETED_WITH_ERROR
: read_ahead_completed_with_error; : READ_AHEAD_COMPLETED_WITH_ERROR;
} else } else
what = completed_ok; what = COMPLETED_OK;
bio_put(req->private_bio); bio_put(req->private_bio);
req->private_bio = ERR_PTR(error); req->private_bio = ERR_PTR(error);
...@@ -238,7 +238,7 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -238,7 +238,7 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
_req_mod(req, read_retry_remote_canceled); _req_mod(req, READ_RETRY_REMOTE_CANCELED);
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
return 1; return 1;
} }
...@@ -1243,12 +1243,12 @@ int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1243,12 +1243,12 @@ int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int ok; int ok;
if (unlikely(cancel)) { if (unlikely(cancel)) {
req_mod(req, send_canceled); req_mod(req, SEND_CANCELED);
return 1; return 1;
} }
ok = drbd_send_oos(mdev, req); ok = drbd_send_oos(mdev, req);
req_mod(req, oos_handed_to_network); req_mod(req, OOS_HANDED_TO_NETWORK);
return ok; return ok;
} }
...@@ -1265,12 +1265,12 @@ int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1265,12 +1265,12 @@ int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int ok; int ok;
if (unlikely(cancel)) { if (unlikely(cancel)) {
req_mod(req, send_canceled); req_mod(req, SEND_CANCELED);
return 1; return 1;
} }
ok = drbd_send_dblock(mdev, req); ok = drbd_send_dblock(mdev, req);
req_mod(req, ok ? handed_over_to_network : send_failed); req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
return ok; return ok;
} }
...@@ -1287,7 +1287,7 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1287,7 +1287,7 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
int ok; int ok;
if (unlikely(cancel)) { if (unlikely(cancel)) {
req_mod(req, send_canceled); req_mod(req, SEND_CANCELED);
return 1; return 1;
} }
...@@ -1300,7 +1300,7 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ...@@ -1300,7 +1300,7 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if (mdev->state.conn >= C_CONNECTED) if (mdev->state.conn >= C_CONNECTED)
drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
} }
req_mod(req, ok ? handed_over_to_network : send_failed); req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
return ok; return ok;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment