Commit b49946b2 authored by NeilBrown's avatar NeilBrown Committed by Greg Kroah-Hartman

staging: lustre: libcfs: discard cfs_time_after()

cfs_time_after() behaves exactly like time_after()
similarly cfs_time_aftereq() matches time_after_eq()

so discard the cfs versions.
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Reviewed-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e990f1c6
...@@ -36,20 +36,6 @@ ...@@ -36,20 +36,6 @@
#ifndef __LIBCFS_TIME_H__ #ifndef __LIBCFS_TIME_H__
#define __LIBCFS_TIME_H__ #define __LIBCFS_TIME_H__
/*
* generic time manipulation functions.
*/
static inline int cfs_time_after(unsigned long t1, unsigned long t2)
{
return time_before(t2, t1);
}
static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
{
return time_before_eq(t2, t1);
}
/* /*
* return valid time-out based on user supplied one. Currently we only check * return valid time-out based on user supplied one. Currently we only check
* that time-out is not shorted than allowed. * that time-out is not shorted than allowed.
......
...@@ -1522,7 +1522,7 @@ static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now) ...@@ -1522,7 +1522,7 @@ static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now)
return 0; return 0;
if (fpo->fpo_failed) if (fpo->fpo_failed)
return 1; return 1;
return cfs_time_aftereq(now, fpo->fpo_deadline); return time_after_eq(now, fpo->fpo_deadline);
} }
static int static int
...@@ -1850,7 +1850,7 @@ static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now) ...@@ -1850,7 +1850,7 @@ static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now)
return 0; return 0;
if (pool->po_failed) if (pool->po_failed)
return 1; return 1;
return cfs_time_aftereq(now, pool->po_deadline); return time_after_eq(now, pool->po_deadline);
} }
void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node) void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
......
...@@ -754,7 +754,7 @@ static inline int ...@@ -754,7 +754,7 @@ static inline int
kiblnd_send_keepalive(struct kib_conn *conn) kiblnd_send_keepalive(struct kib_conn *conn)
{ {
return (*kiblnd_tunables.kib_keepalive > 0) && return (*kiblnd_tunables.kib_keepalive > 0) &&
cfs_time_after(jiffies, conn->ibc_last_send + time_after(jiffies, conn->ibc_last_send +
msecs_to_jiffies(*kiblnd_tunables.kib_keepalive * msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
MSEC_PER_SEC)); MSEC_PER_SEC));
} }
......
...@@ -3141,7 +3141,7 @@ kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs) ...@@ -3141,7 +3141,7 @@ kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
LASSERT(tx->tx_waiting || tx->tx_sending); LASSERT(tx->tx_waiting || tx->tx_sending);
} }
if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { if (time_after_eq(jiffies, tx->tx_deadline)) {
CERROR("Timed out tx: %s, %lu seconds\n", CERROR("Timed out tx: %s, %lu seconds\n",
kiblnd_queue2str(conn, txs), kiblnd_queue2str(conn, txs),
cfs_duration_sec(jiffies - tx->tx_deadline)); cfs_duration_sec(jiffies - tx->tx_deadline));
......
...@@ -481,7 +481,7 @@ ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx) ...@@ -481,7 +481,7 @@ ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list, list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns); &ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(jiffies + SOCKNAL_ENOMEM_RETRY, if (!time_after_eq(jiffies + SOCKNAL_ENOMEM_RETRY,
ksocknal_data.ksnd_reaper_waketime)) ksocknal_data.ksnd_reaper_waketime))
wake_up(&ksocknal_data.ksnd_reaper_waitq); wake_up(&ksocknal_data.ksnd_reaper_waitq);
...@@ -590,7 +590,7 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, ...@@ -590,7 +590,7 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
case SOCKNAL_MATCH_YES: /* typed connection */ case SOCKNAL_MATCH_YES: /* typed connection */
if (!typed || tnob > nob || if (!typed || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin && (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) { time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
typed = c; typed = c;
tnob = nob; tnob = nob;
} }
...@@ -599,7 +599,7 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, ...@@ -599,7 +599,7 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
case SOCKNAL_MATCH_MAY: /* fallback connection */ case SOCKNAL_MATCH_MAY: /* fallback connection */
if (!fallback || fnob > nob || if (!fallback || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin && (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
fallback = c; fallback = c;
fnob = nob; fnob = nob;
} }
...@@ -745,7 +745,7 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer) ...@@ -745,7 +745,7 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
continue; continue;
if (!(!route->ksnr_retry_interval || /* first attempt */ if (!(!route->ksnr_retry_interval || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) { time_after_eq(now, route->ksnr_timeout))) {
CDEBUG(D_NET, CDEBUG(D_NET,
"Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n", "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
&route->ksnr_ipaddr, &route->ksnr_ipaddr,
...@@ -1823,7 +1823,7 @@ ksocknal_connect(struct ksock_route *route) ...@@ -1823,7 +1823,7 @@ ksocknal_connect(struct ksock_route *route)
write_unlock_bh(&ksocknal_data.ksnd_global_lock); write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (cfs_time_aftereq(jiffies, deadline)) { if (time_after_eq(jiffies, deadline)) {
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer->ksnp_id.nid, lnet_connect_console_error(rc, peer->ksnp_id.nid,
route->ksnr_ipaddr, route->ksnr_ipaddr,
...@@ -2052,7 +2052,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) ...@@ -2052,7 +2052,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
ksnr_connd_list) { ksnr_connd_list) {
if (!route->ksnr_retry_interval || if (!route->ksnr_retry_interval ||
cfs_time_aftereq(now, route->ksnr_timeout)) time_after_eq(now, route->ksnr_timeout))
return route; return route;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT || if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
...@@ -2224,7 +2224,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer) ...@@ -2224,7 +2224,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
} }
if (conn->ksnc_rx_started && if (conn->ksnc_rx_started &&
cfs_time_aftereq(jiffies, time_after_eq(jiffies,
conn->ksnc_rx_deadline)) { conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */ /* Timed out incomplete incoming message */
ksocknal_conn_addref(conn); ksocknal_conn_addref(conn);
...@@ -2240,7 +2240,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer) ...@@ -2240,7 +2240,7 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
if ((!list_empty(&conn->ksnc_tx_queue) || if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued) && conn->ksnc_sock->sk->sk_wmem_queued) &&
cfs_time_aftereq(jiffies, time_after_eq(jiffies,
conn->ksnc_tx_deadline)) { conn->ksnc_tx_deadline)) {
/* /*
* Timed out messages queued for sending or * Timed out messages queued for sending or
...@@ -2268,7 +2268,7 @@ ksocknal_flush_stale_txs(struct ksock_peer *peer) ...@@ -2268,7 +2268,7 @@ ksocknal_flush_stale_txs(struct ksock_peer *peer)
write_lock_bh(&ksocknal_data.ksnd_global_lock); write_lock_bh(&ksocknal_data.ksnd_global_lock);
list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) { list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
if (!cfs_time_aftereq(jiffies, if (!time_after_eq(jiffies,
tx->tx_deadline)) tx->tx_deadline))
break; break;
...@@ -2395,7 +2395,7 @@ ksocknal_check_peer_timeouts(int idx) ...@@ -2395,7 +2395,7 @@ ksocknal_check_peer_timeouts(int idx)
tx = list_entry(peer->ksnp_tx_queue.next, tx = list_entry(peer->ksnp_tx_queue.next,
struct ksock_tx, tx_list); struct ksock_tx, tx_list);
if (cfs_time_aftereq(jiffies, if (time_after_eq(jiffies,
tx->tx_deadline)) { tx->tx_deadline)) {
ksocknal_peer_addref(peer); ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock); read_unlock(&ksocknal_data.ksnd_global_lock);
...@@ -2413,7 +2413,7 @@ ksocknal_check_peer_timeouts(int idx) ...@@ -2413,7 +2413,7 @@ ksocknal_check_peer_timeouts(int idx)
tx_stale = NULL; tx_stale = NULL;
spin_lock(&peer->ksnp_lock); spin_lock(&peer->ksnp_lock);
list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
if (!cfs_time_aftereq(jiffies, if (!time_after_eq(jiffies,
tx->tx_deadline)) tx->tx_deadline))
break; break;
/* ignore the TX if connection is being closed */ /* ignore the TX if connection is being closed */
......
...@@ -431,7 +431,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, ...@@ -431,7 +431,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
if (cdls) { if (cdls) {
if (libcfs_console_ratelimit && if (libcfs_console_ratelimit &&
cdls->cdls_next && /* not first time ever */ cdls->cdls_next && /* not first time ever */
!cfs_time_after(jiffies, cdls->cdls_next)) { !time_after(jiffies, cdls->cdls_next)) {
/* skipping a console message */ /* skipping a console message */
cdls->cdls_count++; cdls->cdls_count++;
if (tcd) if (tcd)
...@@ -439,7 +439,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, ...@@ -439,7 +439,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
return 1; return 1;
} }
if (cfs_time_after(jiffies, if (time_after(jiffies,
cdls->cdls_next + libcfs_console_max_delay + cdls->cdls_next + libcfs_console_max_delay +
10 * HZ)) { 10 * HZ)) {
/* last timeout was a long time ago */ /* last timeout was a long time ago */
......
...@@ -520,11 +520,11 @@ lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now) ...@@ -520,11 +520,11 @@ lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now)
* ignore the initial assumed death (see lnet_peers_start_down()). * ignore the initial assumed death (see lnet_peers_start_down()).
*/ */
if (!lp->lp_alive && lp->lp_alive_count > 0 && if (!lp->lp_alive && lp->lp_alive_count > 0 &&
cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive)) time_after_eq(lp->lp_timestamp, lp->lp_last_alive))
return 0; return 0;
deadline = lp->lp_last_alive + lp->lp_ni->ni_peertimeout * HZ; deadline = lp->lp_last_alive + lp->lp_ni->ni_peertimeout * HZ;
alive = cfs_time_after(deadline, now); alive = time_after(deadline, now);
/* Update obsolete lp_alive except for routers assumed to be dead /* Update obsolete lp_alive except for routers assumed to be dead
* initially, because router checker would update aliveness in this * initially, because router checker would update aliveness in this
......
...@@ -309,9 +309,9 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, ...@@ -309,9 +309,9 @@ drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
unsigned long now = jiffies; unsigned long now = jiffies;
rule->dr_stat.fs_count++; rule->dr_stat.fs_count++;
drop = cfs_time_aftereq(now, rule->dr_drop_time); drop = time_after_eq(now, rule->dr_drop_time);
if (drop) { if (drop) {
if (cfs_time_after(now, rule->dr_time_base)) if (time_after(now, rule->dr_time_base))
rule->dr_time_base = now; rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base + rule->dr_drop_time = rule->dr_time_base +
...@@ -475,9 +475,9 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, ...@@ -475,9 +475,9 @@ delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
unsigned long now = jiffies; unsigned long now = jiffies;
rule->dl_stat.fs_count++; rule->dl_stat.fs_count++;
delay = cfs_time_aftereq(now, rule->dl_delay_time); delay = time_after_eq(now, rule->dl_delay_time);
if (delay) { if (delay) {
if (cfs_time_after(now, rule->dl_time_base)) if (time_after(now, rule->dl_time_base))
rule->dl_time_base = now; rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base + rule->dl_delay_time = rule->dl_time_base +
......
...@@ -982,7 +982,7 @@ lnet_ping_router_locked(struct lnet_peer *rtr) ...@@ -982,7 +982,7 @@ lnet_ping_router_locked(struct lnet_peer *rtr)
lnet_peer_addref_locked(rtr); lnet_peer_addref_locked(rtr);
if (rtr->lp_ping_deadline && /* ping timed out? */ if (rtr->lp_ping_deadline && /* ping timed out? */
cfs_time_after(now, rtr->lp_ping_deadline)) time_after(now, rtr->lp_ping_deadline))
lnet_notify_locked(rtr, 1, 0, now); lnet_notify_locked(rtr, 1, 0, now);
/* Run any outstanding notifications */ /* Run any outstanding notifications */
...@@ -1010,7 +1010,7 @@ lnet_ping_router_locked(struct lnet_peer *rtr) ...@@ -1010,7 +1010,7 @@ lnet_ping_router_locked(struct lnet_peer *rtr)
rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp); rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
if (secs && !rtr->lp_ping_notsent && if (secs && !rtr->lp_ping_notsent &&
cfs_time_after(now, rtr->lp_ping_timestamp + secs * HZ)) { time_after(now, rtr->lp_ping_timestamp + secs * HZ)) {
int rc; int rc;
struct lnet_process_id id; struct lnet_process_id id;
struct lnet_handle_md mdh; struct lnet_handle_md mdh;
...@@ -1748,7 +1748,7 @@ lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when) ...@@ -1748,7 +1748,7 @@ lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when)
} }
/* can't do predictions... */ /* can't do predictions... */
if (cfs_time_after(when, now)) { if (time_after(when, now)) {
CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n", CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
!ni ? "userspace" : libcfs_nid2str(ni->ni_nid), !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
libcfs_nid2str(nid), alive ? "up" : "down", libcfs_nid2str(nid), alive ? "up" : "down",
......
...@@ -315,7 +315,7 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error) ...@@ -315,7 +315,7 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
continue; continue;
nd = crpc->crp_node; nd = crpc->crp_node;
if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp)) if (time_after(nd->nd_stamp, crpc->crp_stamp))
continue; continue;
nd->nd_stamp = crpc->crp_stamp; nd->nd_stamp = crpc->crp_stamp;
...@@ -404,7 +404,7 @@ lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp) ...@@ -404,7 +404,7 @@ lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
crpc->crp_unpacked = 1; crpc->crp_unpacked = 1;
} }
if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp)) if (time_after(nd->nd_stamp, crpc->crp_stamp))
return 0; return 0;
nd->nd_stamp = crpc->crp_stamp; nd->nd_stamp = crpc->crp_stamp;
......
...@@ -155,7 +155,7 @@ stt_check_timers(unsigned long *last) ...@@ -155,7 +155,7 @@ stt_check_timers(unsigned long *last)
spin_lock(&stt_data.stt_lock); spin_lock(&stt_data.stt_lock);
while (cfs_time_aftereq(this_slot, *last)) { while (time_after_eq(this_slot, *last)) {
expired += stt_expire_list(STTIMER_SLOT(this_slot), now); expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
this_slot = this_slot - STTIMER_SLOTTIME; this_slot = this_slot - STTIMER_SLOTTIME;
} }
......
...@@ -325,7 +325,7 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, ...@@ -325,7 +325,7 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
lock_res_and_lock(lock); lock_res_and_lock(lock);
if (lock->l_granted_mode == LCK_PW && if (lock->l_granted_mode == LCK_PW &&
!lock->l_readers && !lock->l_writers && !lock->l_readers && !lock->l_writers &&
cfs_time_after(jiffies, time_after(jiffies,
lock->l_last_used + 10 * HZ)) { lock->l_last_used + 10 * HZ)) {
unlock_res_and_lock(lock); unlock_res_and_lock(lock);
if (ldlm_bl_to_thread_lock(ns, NULL, lock)) if (ldlm_bl_to_thread_lock(ns, NULL, lock))
......
...@@ -116,7 +116,7 @@ static void ldlm_expired_completion_wait(struct ldlm_lock *lock, __u32 conn_cnt) ...@@ -116,7 +116,7 @@ static void ldlm_expired_completion_wait(struct ldlm_lock *lock, __u32 conn_cnt)
(s64)lock->l_last_activity, (s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() - (s64)(ktime_get_real_seconds() -
lock->l_last_activity)); lock->l_last_activity));
if (cfs_time_after(jiffies, next_dump)) { if (time_after(jiffies, next_dump)) {
last_dump = next_dump; last_dump = next_dump;
next_dump = jiffies + 300 * HZ; next_dump = jiffies + 300 * HZ;
ldlm_namespace_dump(D_DLMTRACE, ldlm_namespace_dump(D_DLMTRACE,
...@@ -1176,7 +1176,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, ...@@ -1176,7 +1176,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
* Despite of the LV, It doesn't make sense to keep the lock which * Despite of the LV, It doesn't make sense to keep the lock which
* is unused for ns_max_age time. * is unused for ns_max_age time.
*/ */
if (cfs_time_after(jiffies, lock->l_last_used + ns->ns_max_age)) if (time_after(jiffies, lock->l_last_used + ns->ns_max_age))
return LDLM_POLICY_CANCEL_LOCK; return LDLM_POLICY_CANCEL_LOCK;
slv = ldlm_pool_get_slv(pl); slv = ldlm_pool_get_slv(pl);
......
...@@ -328,7 +328,7 @@ int osc_object_is_contended(struct osc_object *obj) ...@@ -328,7 +328,7 @@ int osc_object_is_contended(struct osc_object *obj)
* ll_file_is_contended. * ll_file_is_contended.
*/ */
retry_time = obj->oo_contention_time + osc_contention_time * HZ; retry_time = obj->oo_contention_time + osc_contention_time * HZ;
if (cfs_time_after(cur_time, retry_time)) { if (time_after(cur_time, retry_time)) {
osc_object_clear_contended(obj); osc_object_clear_contended(obj);
return 0; return 0;
} }
......
...@@ -748,7 +748,7 @@ static int osc_should_shrink_grant(struct client_obd *client) ...@@ -748,7 +748,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
OBD_CONNECT_GRANT_SHRINK) == 0) OBD_CONNECT_GRANT_SHRINK) == 0)
return 0; return 0;
if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { if (time_after_eq(time, next_shrink - 5 * CFS_TICK)) {
/* Get the current RPC size directly, instead of going via: /* Get the current RPC size directly, instead of going via:
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
* Keep comment here so that it can be found by searching. * Keep comment here so that it can be found by searching.
......
...@@ -180,7 +180,7 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp, ...@@ -180,7 +180,7 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
imp->imp_force_verify = 0; imp->imp_force_verify = 0;
if (cfs_time_aftereq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) && if (time_after_eq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) &&
!force) { !force) {
spin_unlock(&imp->imp_lock); spin_unlock(&imp->imp_lock);
return; return;
...@@ -236,7 +236,7 @@ static void ptlrpc_pinger_main(struct work_struct *ws) ...@@ -236,7 +236,7 @@ static void ptlrpc_pinger_main(struct work_struct *ws)
ptlrpc_pinger_process_import(imp, this_ping); ptlrpc_pinger_process_import(imp, this_ping);
/* obd_timeout might have changed */ /* obd_timeout might have changed */
if (imp->imp_pingable && imp->imp_next_ping && if (imp->imp_pingable && imp->imp_next_ping &&
cfs_time_after(imp->imp_next_ping, time_after(imp->imp_next_ping,
this_ping + PING_INTERVAL * HZ)) this_ping + PING_INTERVAL * HZ))
ptlrpc_update_next_ping(imp, 0); ptlrpc_update_next_ping(imp, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment