Commit 622d32d3 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-jens' of git://git.drbd.org/linux-2.6-drbd into for-2.6.33

parents 476d42f1 ed814525
...@@ -867,10 +867,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ...@@ -867,10 +867,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
} }
if (fp == FP_STONITH && if (fp == FP_STONITH &&
(ns.role == R_PRIMARY && (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
ns.conn < C_CONNECTED && !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
ns.pdsk > D_OUTDATED)) ns.susp = 1;
ns.susp = 1;
if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
if (ns.conn == C_SYNC_SOURCE) if (ns.conn == C_SYNC_SOURCE)
......
...@@ -894,11 +894,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -894,11 +894,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
} }
if (drbd_get_capacity(nbc->md_bdev) > max_possible_sectors)
dev_warn(DEV, "truncating very big lower level device "
"to currently maximum possible %llu sectors\n",
(unsigned long long) max_possible_sectors);
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
retcode = ERR_MD_DISK_TO_SMALL; retcode = ERR_MD_DISK_TO_SMALL;
dev_warn(DEV, "refusing attach: md-device too small, " dev_warn(DEV, "refusing attach: md-device too small, "
...@@ -917,6 +912,15 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -917,6 +912,15 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->known_size = drbd_get_capacity(nbc->backing_bdev); nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
if (nbc->known_size > max_possible_sectors) {
dev_warn(DEV, "==> truncating very big lower level device "
"to currently maximum possible %llu sectors <==\n",
(unsigned long long) max_possible_sectors);
if (nbc->dc.meta_dev_idx >= 0)
dev_warn(DEV, "==>> using internal or flexible "
"meta data may help <<==\n");
}
drbd_suspend_io(mdev); drbd_suspend_io(mdev);
/* also wait for the last barrier ack. */ /* also wait for the last barrier ack. */
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt));
......
...@@ -2099,7 +2099,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) ...@@ -2099,7 +2099,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
break; break;
} }
/* Else fall through to one of the other strategies... */ /* Else fall through to one of the other strategies... */
dev_warn(DEV, "Discard younger/older primary did not found a decision\n" dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n"); "Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG: case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) { if (ch_peer == 0 && ch_self == 0) {
...@@ -3619,10 +3619,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) ...@@ -3619,10 +3619,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
set_bit(STOP_SYNC_TIMER, &mdev->flags); set_bit(STOP_SYNC_TIMER, &mdev->flags);
resync_timer_fn((unsigned long)mdev); resync_timer_fn((unsigned long)mdev);
/* so we can be sure that all remote or resync reads
* made it at least to net_ee */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue * w_make_resync_request etc. which may still be on the worker queue
* to be "canceled" */ * to be "canceled" */
......
...@@ -505,7 +505,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -505,7 +505,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
* corresponding hlist_del is in _req_may_be_done() */ * corresponding hlist_del is in _req_may_be_done() */
hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector)); hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector));
set_bit(UNPLUG_REMOTE, &mdev->flags); /* why? */ set_bit(UNPLUG_REMOTE, &mdev->flags);
D_ASSERT(req->rq_state & RQ_NET_PENDING); D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED; req->rq_state |= RQ_NET_QUEUED;
...@@ -536,6 +536,11 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -536,6 +536,11 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
* *
* Add req to the (now) current epoch (barrier). */ * Add req to the (now) current epoch (barrier). */
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
set_bit(UNPLUG_REMOTE, &mdev->flags);
/* see drbd_make_request_common, /* see drbd_make_request_common,
* just after it grabs the req_lock */ * just after it grabs the req_lock */
D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void); extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.3rc2" #define REL_VERSION "8.3.5"
#define API_VERSION 88 #define API_VERSION 88
#define PRO_VERSION_MIN 86 #define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91 #define PRO_VERSION_MAX 91
......
...@@ -70,11 +70,11 @@ ...@@ -70,11 +70,11 @@
/* I don't think that a tcp send buffer of more than 10M is usefull */ /* I don't think that a tcp send buffer of more than 10M is usefull */
#define DRBD_SNDBUF_SIZE_MIN 0 #define DRBD_SNDBUF_SIZE_MIN 0
#define DRBD_SNDBUF_SIZE_MAX (10<<20) #define DRBD_SNDBUF_SIZE_MAX (10<<20)
#define DRBD_SNDBUF_SIZE_DEF (2*65535) #define DRBD_SNDBUF_SIZE_DEF 0
#define DRBD_RCVBUF_SIZE_MIN 0 #define DRBD_RCVBUF_SIZE_MIN 0
#define DRBD_RCVBUF_SIZE_MAX (10<<20) #define DRBD_RCVBUF_SIZE_MAX (10<<20)
#define DRBD_RCVBUF_SIZE_DEF (2*65535) #define DRBD_RCVBUF_SIZE_DEF 0
/* @4k PageSize -> 128kB - 512MB */ /* @4k PageSize -> 128kB - 512MB */
#define DRBD_MAX_BUFFERS_MIN 32 #define DRBD_MAX_BUFFERS_MIN 32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment