Commit 5f109b94 authored by David S. Miller's avatar David S. Miller

Merge tag 'rxrpc-fixes-20171124' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Fixes and improvements

Here's a set of patches that fix and improve some stuff in the AF_RXRPC
protocol:

The patches are:

 (1) Unlock mutex returned by rxrpc_accept_call().

 (2) Don't set connection upgrade by default.

 (3) Differentiate the call->user_mutex used by the kernel from that used
     by userspace calling sendmsg() to avoid lockdep warnings.

 (4) Delay terminal ACK transmission to a work queue so that it can be
     replaced by the next call if there is one.

 (5) Split the call parameters from the connection parameters so that more
     call-specific parameters can be passed through.

 (6) Fix the call timeouts to work the same as for other RxRPC/AFS
     implementations.

 (7) Don't transmit DELAY ACKs immediately, but instead delay them slightly
     so that can be discarded or can represent more packets.

 (8) Use RTT to calculate certain protocol timeouts.

 (9) Add a timeout to detect lost ACK/DATA packets.

(10) Add a keepalive function so that we ping the peer if we haven't
     transmitted for a short while, thereby keeping intervening firewall
     routes open.

(11) Make service endpoints expire like they're supposed to so that the UDP
     port can be reused.

(12) Fix connection expiry timers to make cleanup happen in a more timely
     fashion.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1d3b78bb 3d18cbb7
...@@ -49,6 +49,7 @@ enum rxrpc_conn_trace { ...@@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
rxrpc_conn_put_client, rxrpc_conn_put_client,
rxrpc_conn_put_service, rxrpc_conn_put_service,
rxrpc_conn_queued, rxrpc_conn_queued,
rxrpc_conn_reap_service,
rxrpc_conn_seen, rxrpc_conn_seen,
}; };
...@@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace { ...@@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace {
enum rxrpc_timer_trace { enum rxrpc_timer_trace {
rxrpc_timer_begin, rxrpc_timer_begin,
rxrpc_timer_exp_ack,
rxrpc_timer_exp_hard,
rxrpc_timer_exp_idle,
rxrpc_timer_exp_keepalive,
rxrpc_timer_exp_lost_ack,
rxrpc_timer_exp_normal,
rxrpc_timer_exp_ping,
rxrpc_timer_exp_resend,
rxrpc_timer_expired, rxrpc_timer_expired,
rxrpc_timer_init_for_reply, rxrpc_timer_init_for_reply,
rxrpc_timer_init_for_send_reply, rxrpc_timer_init_for_send_reply,
rxrpc_timer_restart,
rxrpc_timer_set_for_ack, rxrpc_timer_set_for_ack,
rxrpc_timer_set_for_hard,
rxrpc_timer_set_for_idle,
rxrpc_timer_set_for_keepalive,
rxrpc_timer_set_for_lost_ack,
rxrpc_timer_set_for_normal,
rxrpc_timer_set_for_ping, rxrpc_timer_set_for_ping,
rxrpc_timer_set_for_resend, rxrpc_timer_set_for_resend,
rxrpc_timer_set_for_send, rxrpc_timer_set_for_send,
...@@ -150,6 +165,7 @@ enum rxrpc_timer_trace { ...@@ -150,6 +165,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace { enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end, rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data, rxrpc_propose_ack_input_data,
rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack, rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply, rxrpc_propose_ack_ping_for_lost_reply,
rxrpc_propose_ack_ping_for_params, rxrpc_propose_ack_ping_for_params,
...@@ -206,6 +222,7 @@ enum rxrpc_congest_change { ...@@ -206,6 +222,7 @@ enum rxrpc_congest_change {
EM(rxrpc_conn_put_client, "PTc") \ EM(rxrpc_conn_put_client, "PTc") \
EM(rxrpc_conn_put_service, "PTs") \ EM(rxrpc_conn_put_service, "PTs") \
EM(rxrpc_conn_queued, "QUE") \ EM(rxrpc_conn_queued, "QUE") \
EM(rxrpc_conn_reap_service, "RPs") \
E_(rxrpc_conn_seen, "SEE") E_(rxrpc_conn_seen, "SEE")
#define rxrpc_client_traces \ #define rxrpc_client_traces \
...@@ -296,16 +313,31 @@ enum rxrpc_congest_change { ...@@ -296,16 +313,31 @@ enum rxrpc_congest_change {
#define rxrpc_timer_traces \ #define rxrpc_timer_traces \
EM(rxrpc_timer_begin, "Begin ") \ EM(rxrpc_timer_begin, "Begin ") \
EM(rxrpc_timer_expired, "*EXPR*") \ EM(rxrpc_timer_expired, "*EXPR*") \
EM(rxrpc_timer_exp_ack, "ExpAck") \
EM(rxrpc_timer_exp_hard, "ExpHrd") \
EM(rxrpc_timer_exp_idle, "ExpIdl") \
EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
EM(rxrpc_timer_exp_normal, "ExpNml") \
EM(rxrpc_timer_exp_ping, "ExpPng") \
EM(rxrpc_timer_exp_resend, "ExpRsn") \
EM(rxrpc_timer_init_for_reply, "IniRpl") \ EM(rxrpc_timer_init_for_reply, "IniRpl") \
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
EM(rxrpc_timer_restart, "Restrt") \
EM(rxrpc_timer_set_for_ack, "SetAck") \ EM(rxrpc_timer_set_for_ack, "SetAck") \
EM(rxrpc_timer_set_for_hard, "SetHrd") \
EM(rxrpc_timer_set_for_idle, "SetIdl") \
EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
EM(rxrpc_timer_set_for_normal, "SetNml") \
EM(rxrpc_timer_set_for_ping, "SetPng") \ EM(rxrpc_timer_set_for_ping, "SetPng") \
EM(rxrpc_timer_set_for_resend, "SetRTx") \ EM(rxrpc_timer_set_for_resend, "SetRTx") \
E_(rxrpc_timer_set_for_send, "SetTx ") E_(rxrpc_timer_set_for_send, "SetSnd")
#define rxrpc_propose_ack_traces \ #define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \ EM(rxrpc_propose_ack_input_data, "DataIn ") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \ EM(rxrpc_propose_ack_ping_for_params, "Params ") \
...@@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx, ...@@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx,
TRACE_EVENT(rxrpc_timer, TRACE_EVENT(rxrpc_timer,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why, TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
ktime_t now, unsigned long now_j), unsigned long now),
TP_ARGS(call, why, now, now_j), TP_ARGS(call, why, now),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct rxrpc_call *, call ) __field(struct rxrpc_call *, call )
__field(enum rxrpc_timer_trace, why ) __field(enum rxrpc_timer_trace, why )
__field_struct(ktime_t, now ) __field(long, now )
__field_struct(ktime_t, expire_at ) __field(long, ack_at )
__field_struct(ktime_t, ack_at ) __field(long, ack_lost_at )
__field_struct(ktime_t, resend_at ) __field(long, resend_at )
__field(unsigned long, now_j ) __field(long, ping_at )
__field(unsigned long, timer ) __field(long, expect_rx_by )
__field(long, expect_req_by )
__field(long, expect_term_by )
__field(long, timer )
), ),
TP_fast_assign( TP_fast_assign(
__entry->call = call; __entry->call = call;
__entry->why = why; __entry->why = why;
__entry->now = now; __entry->now = now;
__entry->expire_at = call->expire_at; __entry->ack_at = call->ack_at;
__entry->ack_at = call->ack_at; __entry->ack_lost_at = call->ack_lost_at;
__entry->resend_at = call->resend_at; __entry->resend_at = call->resend_at;
__entry->now_j = now_j; __entry->expect_rx_by = call->expect_rx_by;
__entry->timer = call->timer.expires; __entry->expect_req_by = call->expect_req_by;
__entry->expect_term_by = call->expect_term_by;
__entry->timer = call->timer.expires;
), ),
TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
__entry->call, __entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces), __print_symbolic(__entry->why, rxrpc_timer_traces),
ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), __entry->ack_at - __entry->now,
ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), __entry->ack_lost_at - __entry->now,
ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), __entry->resend_at - __entry->now,
__entry->timer - __entry->now_j) __entry->expect_rx_by - __entry->now,
__entry->expect_req_by - __entry->now,
__entry->expect_term_by - __entry->now,
__entry->timer - __entry->now)
); );
TRACE_EVENT(rxrpc_rx_lose, TRACE_EVENT(rxrpc_rx_lose,
...@@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest, ...@@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest,
memcpy(&__entry->sum, summary, sizeof(__entry->sum)); memcpy(&__entry->sum, summary, sizeof(__entry->sum));
), ),
TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call, __entry->call,
__entry->ack_serial, __entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
......
...@@ -59,6 +59,7 @@ enum rxrpc_cmsg_type { ...@@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
RXRPC__SUPPORTED RXRPC__SUPPORTED
}; };
......
...@@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, ...@@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
bool upgrade) bool upgrade)
{ {
struct rxrpc_conn_parameters cp; struct rxrpc_conn_parameters cp;
struct rxrpc_call_params p;
struct rxrpc_call *call; struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
int ret; int ret;
...@@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, ...@@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
if (key && !key->payload.data[0]) if (key && !key->payload.data[0])
key = NULL; /* a no-security key */ key = NULL; /* a no-security key */
memset(&p, 0, sizeof(p));
p.user_call_ID = user_call_ID;
p.tx_total_len = tx_total_len;
memset(&cp, 0, sizeof(cp)); memset(&cp, 0, sizeof(cp));
cp.local = rx->local; cp.local = rx->local;
cp.key = key; cp.key = key;
...@@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, ...@@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false; cp.exclusive = false;
cp.upgrade = upgrade; cp.upgrade = upgrade;
cp.service_id = srx->srx_service; cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
gfp);
/* The socket has been unlocked. */ /* The socket has been unlocked. */
if (!IS_ERR(call)) { if (!IS_ERR(call)) {
call->notify_rx = notify_rx; call->notify_rx = notify_rx;
...@@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk) ...@@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk)
sock_orphan(sk); sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_shutdown = SHUTDOWN_MASK;
/* We want to kill off all connections from a service socket
* as fast as possible because we can't share these; client
* sockets, on the other hand, can share an endpoint.
*/
switch (sk->sk_state) {
case RXRPC_SERVER_BOUND:
case RXRPC_SERVER_BOUND2:
case RXRPC_SERVER_LISTENING:
case RXRPC_SERVER_LISTEN_DISABLED:
rx->local->service_closed = true;
break;
}
spin_lock_bh(&sk->sk_receive_queue.lock); spin_lock_bh(&sk->sk_receive_queue.lock);
sk->sk_state = RXRPC_CLOSE; sk->sk_state = RXRPC_CLOSE;
spin_unlock_bh(&sk->sk_receive_queue.lock); spin_unlock_bh(&sk->sk_receive_queue.lock);
...@@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk) ...@@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk)
rxrpc_release_calls_on_socket(rx); rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue); flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue); rxrpc_purge_queue(&sk->sk_receive_queue);
rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
rxrpc_put_local(rx->local); rxrpc_put_local(rx->local);
rx->local = NULL; rx->local = NULL;
......
...@@ -79,17 +79,20 @@ struct rxrpc_net { ...@@ -79,17 +79,20 @@ struct rxrpc_net {
struct list_head conn_proc_list; /* List of conns in this namespace for proc */ struct list_head conn_proc_list; /* List of conns in this namespace for proc */
struct list_head service_conns; /* Service conns in this namespace */ struct list_head service_conns; /* Service conns in this namespace */
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
struct delayed_work service_conn_reaper; struct work_struct service_conn_reaper;
struct timer_list service_conn_reap_timer;
unsigned int nr_client_conns; unsigned int nr_client_conns;
unsigned int nr_active_client_conns; unsigned int nr_active_client_conns;
bool kill_all_client_conns; bool kill_all_client_conns;
bool live;
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
struct list_head waiting_client_conns; struct list_head waiting_client_conns;
struct list_head active_client_conns; struct list_head active_client_conns;
struct list_head idle_client_conns; struct list_head idle_client_conns;
struct delayed_work client_conn_reaper; struct work_struct client_conn_reaper;
struct timer_list client_conn_reap_timer;
struct list_head local_endpoints; struct list_head local_endpoints;
struct mutex local_mutex; /* Lock for ->local_endpoints */ struct mutex local_mutex; /* Lock for ->local_endpoints */
...@@ -265,6 +268,7 @@ struct rxrpc_local { ...@@ -265,6 +268,7 @@ struct rxrpc_local {
rwlock_t services_lock; /* lock for services list */ rwlock_t services_lock; /* lock for services list */
int debug_id; /* debug ID for printks */ int debug_id; /* debug ID for printks */
bool dead; bool dead;
bool service_closed; /* Service socket closed */
struct sockaddr_rxrpc srx; /* local address */ struct sockaddr_rxrpc srx; /* local address */
}; };
...@@ -338,8 +342,17 @@ enum rxrpc_conn_flag { ...@@ -338,8 +342,17 @@ enum rxrpc_conn_flag {
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */ RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
}; };
#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
(1UL << RXRPC_CONN_FINAL_ACK_1) | \
(1UL << RXRPC_CONN_FINAL_ACK_2) | \
(1UL << RXRPC_CONN_FINAL_ACK_3))
/* /*
* Events that can be raised upon a connection. * Events that can be raised upon a connection.
*/ */
...@@ -393,6 +406,7 @@ struct rxrpc_connection { ...@@ -393,6 +406,7 @@ struct rxrpc_connection {
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1) #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
struct list_head waiting_calls; /* Calls waiting for channels */ struct list_head waiting_calls; /* Calls waiting for channels */
struct rxrpc_channel { struct rxrpc_channel {
unsigned long final_ack_at; /* Time at which to issue final ACK */
struct rxrpc_call __rcu *call; /* Active call */ struct rxrpc_call __rcu *call; /* Active call */
u32 call_id; /* ID of current call */ u32 call_id; /* ID of current call */
u32 call_counter; /* Call ID counter */ u32 call_counter; /* Call ID counter */
...@@ -404,6 +418,7 @@ struct rxrpc_connection { ...@@ -404,6 +418,7 @@ struct rxrpc_connection {
}; };
} channels[RXRPC_MAXCALLS]; } channels[RXRPC_MAXCALLS];
struct timer_list timer; /* Conn event timer */
struct work_struct processor; /* connection event processor */ struct work_struct processor; /* connection event processor */
union { union {
struct rb_node client_node; /* Node in local->client_conns */ struct rb_node client_node; /* Node in local->client_conns */
...@@ -457,9 +472,10 @@ enum rxrpc_call_flag { ...@@ -457,9 +472,10 @@ enum rxrpc_call_flag {
enum rxrpc_call_event { enum rxrpc_call_event {
RXRPC_CALL_EV_ACK, /* need to generate ACK */ RXRPC_CALL_EV_ACK, /* need to generate ACK */
RXRPC_CALL_EV_ABORT, /* need to generate abort */ RXRPC_CALL_EV_ABORT, /* need to generate abort */
RXRPC_CALL_EV_TIMER, /* Timer expired */
RXRPC_CALL_EV_RESEND, /* Tx resend required */ RXRPC_CALL_EV_RESEND, /* Tx resend required */
RXRPC_CALL_EV_PING, /* Ping send required */ RXRPC_CALL_EV_PING, /* Ping send required */
RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
}; };
/* /*
...@@ -503,10 +519,16 @@ struct rxrpc_call { ...@@ -503,10 +519,16 @@ struct rxrpc_call {
struct rxrpc_peer *peer; /* Peer record for remote address */ struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */ struct rxrpc_sock __rcu *socket; /* socket responsible */
struct mutex user_mutex; /* User access mutex */ struct mutex user_mutex; /* User access mutex */
ktime_t ack_at; /* When deferred ACK needs to happen */ unsigned long ack_at; /* When deferred ACK needs to happen */
ktime_t resend_at; /* When next resend needs to happen */ unsigned long ack_lost_at; /* When ACK is figured as lost */
ktime_t ping_at; /* When next to send a ping */ unsigned long resend_at; /* When next resend needs to happen */
ktime_t expire_at; /* When the call times out */ unsigned long ping_at; /* When next to send a ping */
unsigned long keepalive_at; /* When next to send a keepalive ping */
unsigned long expect_rx_by; /* When we expect to get a packet by */
unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
unsigned long expect_term_by; /* When we expect call termination by */
u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
struct timer_list timer; /* Combined event timer */ struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */ struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
...@@ -609,6 +631,8 @@ struct rxrpc_call { ...@@ -609,6 +631,8 @@ struct rxrpc_call {
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
rxrpc_serial_t acks_latest; /* serial number of latest ACK received */ rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
}; };
/* /*
...@@ -632,6 +656,35 @@ struct rxrpc_ack_summary { ...@@ -632,6 +656,35 @@ struct rxrpc_ack_summary {
u8 cumulative_acks; u8 cumulative_acks;
}; };
/*
* sendmsg() cmsg-specified parameters.
*/
enum rxrpc_command {
RXRPC_CMD_SEND_DATA, /* send data message */
RXRPC_CMD_SEND_ABORT, /* request abort generation */
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
};
struct rxrpc_call_params {
s64 tx_total_len; /* Total Tx data length (if send data) */
unsigned long user_call_ID; /* User's call ID */
struct {
u32 hard; /* Maximum lifetime (sec) */
u32 idle; /* Max time since last data packet (msec) */
u32 normal; /* Max time since last call packet (msec) */
} timeouts;
u8 nr_timeouts; /* Number of timeouts specified */
};
struct rxrpc_send_params {
struct rxrpc_call_params call;
u32 abort_code; /* Abort code to Tx (if abort) */
enum rxrpc_command command : 8; /* The command to implement */
bool exclusive; /* Shared or exclusive call */
bool upgrade; /* If the connection is upgradeable */
};
#include <trace/events/rxrpc.h> #include <trace/events/rxrpc.h>
/* /*
...@@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *); ...@@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/* /*
* call_event.c * call_event.c
*/ */
void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
enum rxrpc_propose_ack_trace); enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *); void rxrpc_process_call(struct work_struct *);
static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
unsigned long expire_at,
unsigned long now,
enum rxrpc_timer_trace why)
{
trace_rxrpc_timer(call, why, now);
timer_reduce(&call->timer, expire_at);
}
/* /*
* call_object.c * call_object.c
*/ */
...@@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime; ...@@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar; extern struct kmem_cache *rxrpc_call_jar;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
struct rxrpc_call *rxrpc_alloc_call(gfp_t); struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *, struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *, struct sockaddr_rxrpc *,
unsigned long, s64, gfp_t); struct rxrpc_call_params *, gfp_t);
int rxrpc_retry_client_call(struct rxrpc_sock *, int rxrpc_retry_client_call(struct rxrpc_sock *,
struct rxrpc_call *, struct rxrpc_call *,
struct rxrpc_conn_parameters *, struct rxrpc_conn_parameters *,
...@@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, ...@@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
*/ */
extern unsigned int rxrpc_max_client_connections; extern unsigned int rxrpc_max_client_connections;
extern unsigned int rxrpc_reap_client_connections; extern unsigned int rxrpc_reap_client_connections;
extern unsigned int rxrpc_conn_idle_client_expiry; extern unsigned long rxrpc_conn_idle_client_expiry;
extern unsigned int rxrpc_conn_idle_client_fast_expiry; extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids; extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void); void rxrpc_destroy_client_conn_ids(void);
...@@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *); ...@@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *);
* conn_object.c * conn_object.c
*/ */
extern unsigned int rxrpc_connection_expiry; extern unsigned int rxrpc_connection_expiry;
extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
...@@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn) ...@@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
rxrpc_put_service_conn(conn); rxrpc_put_service_conn(conn);
} }
static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
unsigned long expire_at)
{
timer_reduce(&conn->timer, expire_at);
}
/* /*
* conn_service.c * conn_service.c
*/ */
...@@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local) ...@@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
* misc.c * misc.c
*/ */
extern unsigned int rxrpc_max_backlog __read_mostly; extern unsigned int rxrpc_max_backlog __read_mostly;
extern unsigned int rxrpc_requested_ack_delay; extern unsigned long rxrpc_requested_ack_delay;
extern unsigned int rxrpc_soft_ack_delay; extern unsigned long rxrpc_soft_ack_delay;
extern unsigned int rxrpc_idle_ack_delay; extern unsigned long rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size; extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu; extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max; extern unsigned int rxrpc_rx_jumbo_max;
extern unsigned int rxrpc_resend_timeout; extern unsigned long rxrpc_resend_timeout;
extern const s8 rxrpc_ack_priority[]; extern const s8 rxrpc_ack_priority[];
...@@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net) ...@@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/* /*
* output.c * output.c
*/ */
int rxrpc_send_ack_packet(struct rxrpc_call *, bool); int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
int rxrpc_send_abort_packet(struct rxrpc_call *); int rxrpc_send_abort_packet(struct rxrpc_call *);
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
void rxrpc_reject_packets(struct rxrpc_local *); void rxrpc_reject_packets(struct rxrpc_local *);
......
...@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
/* Now it gets complicated, because calls get registered with the /* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user. * socket here, particularly if a user ID is preassigned by the user.
*/ */
call = rxrpc_alloc_call(gfp); call = rxrpc_alloc_call(rx, gfp);
if (!call) if (!call)
return -ENOMEM; return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE); call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
......
...@@ -21,80 +21,6 @@ ...@@ -21,80 +21,6 @@
#include <net/af_rxrpc.h> #include <net/af_rxrpc.h>
#include "ar-internal.h" #include "ar-internal.h"
/*
* Set the timer
*/
void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
ktime_t now)
{
unsigned long t_j, now_j = jiffies;
ktime_t t;
bool queue = false;
if (call->state < RXRPC_CALL_COMPLETE) {
t = call->expire_at;
if (!ktime_after(t, now)) {
trace_rxrpc_timer(call, why, now, now_j);
queue = true;
goto out;
}
if (!ktime_after(call->resend_at, now)) {
call->resend_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
queue = true;
} else if (ktime_before(call->resend_at, t)) {
t = call->resend_at;
}
if (!ktime_after(call->ack_at, now)) {
call->ack_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
queue = true;
} else if (ktime_before(call->ack_at, t)) {
t = call->ack_at;
}
if (!ktime_after(call->ping_at, now)) {
call->ping_at = call->expire_at;
if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
queue = true;
} else if (ktime_before(call->ping_at, t)) {
t = call->ping_at;
}
t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
t_j += jiffies;
/* We have to make sure that the calculated jiffies value falls
* at or after the nsec value, or we may loop ceaselessly
* because the timer times out, but we haven't reached the nsec
* timeout yet.
*/
t_j++;
if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
mod_timer(&call->timer, t_j);
trace_rxrpc_timer(call, why, now, now_j);
}
}
out:
if (queue)
rxrpc_queue_call(call);
}
/*
* Set the timer
*/
void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
ktime_t now)
{
read_lock_bh(&call->state_lock);
__rxrpc_set_timer(call, why, now);
read_unlock_bh(&call->state_lock);
}
/* /*
* Propose a PING ACK be sent. * Propose a PING ACK be sent.
*/ */
...@@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call, ...@@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
rxrpc_queue_call(call); rxrpc_queue_call(call);
} else { } else {
ktime_t now = ktime_get_real(); unsigned long now = jiffies;
ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); unsigned long ping_at = now + rxrpc_idle_ack_delay;
if (ktime_before(ping_at, call->ping_at)) { if (time_before(ping_at, call->ping_at)) {
call->ping_at = ping_at; WRITE_ONCE(call->ping_at, ping_at);
rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); rxrpc_reduce_call_timer(call, ping_at, now,
rxrpc_timer_set_for_ping);
} }
} }
} }
...@@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, ...@@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
enum rxrpc_propose_ack_trace why) enum rxrpc_propose_ack_trace why)
{ {
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
unsigned int expiry = rxrpc_soft_ack_delay; unsigned long expiry = rxrpc_soft_ack_delay;
ktime_t now, ack_at;
s8 prior = rxrpc_ack_priority[ack_reason]; s8 prior = rxrpc_ack_priority[ack_reason];
/* Pings are handled specially because we don't want to accidentally /* Pings are handled specially because we don't want to accidentally
...@@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, ...@@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
background) background)
rxrpc_queue_call(call); rxrpc_queue_call(call);
} else { } else {
now = ktime_get_real(); unsigned long now = jiffies, ack_at;
ack_at = ktime_add_ms(now, expiry);
if (ktime_before(ack_at, call->ack_at)) { if (call->peer->rtt_usage > 0)
call->ack_at = ack_at; ack_at = nsecs_to_jiffies(call->peer->rtt);
rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now); else
ack_at = expiry;
ack_at = jiffies + expiry;
if (time_before(ack_at, call->ack_at)) {
WRITE_ONCE(call->ack_at, ack_at);
rxrpc_reduce_call_timer(call, ack_at, now,
rxrpc_timer_set_for_ack);
} }
} }
...@@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) ...@@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
/* /*
* Perform retransmission of NAK'd and unack'd packets. * Perform retransmission of NAK'd and unack'd packets.
*/ */
static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
{ {
struct rxrpc_skb_priv *sp; struct rxrpc_skb_priv *sp;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long resend_at;
rxrpc_seq_t cursor, seq, top; rxrpc_seq_t cursor, seq, top;
ktime_t max_age, oldest, ack_ts; ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
int ix; int ix;
u8 annotation, anno_type, retrans = 0, unacked = 0; u8 annotation, anno_type, retrans = 0, unacked = 0;
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top); _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
max_age = ktime_sub_ms(now, rxrpc_resend_timeout); if (call->peer->rtt_usage > 1)
timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
else
timeout = ms_to_ktime(rxrpc_resend_timeout);
min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
if (ktime_before(timeout, min_timeo))
timeout = min_timeo;
now = ktime_get_real();
max_age = ktime_sub(now, timeout);
spin_lock_bh(&call->lock); spin_lock_bh(&call->lock);
...@@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) ...@@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
ktime_to_ns(ktime_sub(skb->tstamp, max_age))); ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
} }
call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
resend_at += jiffies + rxrpc_resend_timeout;
WRITE_ONCE(call->resend_at, resend_at);
if (unacked) if (unacked)
rxrpc_congestion_timeout(call); rxrpc_congestion_timeout(call);
...@@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) ...@@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
* retransmitting data. * retransmitting data.
*/ */
if (!retrans) { if (!retrans) {
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); rxrpc_reduce_call_timer(call, resend_at, now,
rxrpc_timer_set_for_resend);
spin_unlock_bh(&call->lock); spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts); ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt) if (ktime_to_ns(ack_ts) < call->peer->rtt)
goto out; goto out;
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack); rxrpc_propose_ack_ping_for_lost_ack);
rxrpc_send_ack_packet(call, true); rxrpc_send_ack_packet(call, true, NULL);
goto out; goto out;
} }
...@@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work) ...@@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work)
{ {
struct rxrpc_call *call = struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor); container_of(work, struct rxrpc_call, processor);
ktime_t now; rxrpc_serial_t *send_ack;
unsigned long now, next, t;
rxrpc_see_call(call); rxrpc_see_call(call);
...@@ -384,22 +331,89 @@ void rxrpc_process_call(struct work_struct *work) ...@@ -384,22 +331,89 @@ void rxrpc_process_call(struct work_struct *work)
goto out_put; goto out_put;
} }
now = ktime_get_real(); /* Work out if any timeouts tripped */
if (ktime_before(call->expire_at, now)) { now = jiffies;
t = READ_ONCE(call->expect_rx_by);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
}
t = READ_ONCE(call->expect_req_by);
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
}
t = READ_ONCE(call->expect_term_by);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
}
t = READ_ONCE(call->ack_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
set_bit(RXRPC_CALL_EV_ACK, &call->events);
}
t = READ_ONCE(call->ack_lost_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
}
t = READ_ONCE(call->keepalive_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
rxrpc_propose_ack_ping_for_keepalive);
set_bit(RXRPC_CALL_EV_PING, &call->events);
}
t = READ_ONCE(call->ping_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
set_bit(RXRPC_CALL_EV_PING, &call->events);
}
t = READ_ONCE(call->resend_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
set_bit(RXRPC_CALL_EV_RESEND, &call->events);
}
/* Process events */
if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events); set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state; goto recheck_state;
} }
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { send_ack = NULL;
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
call->acks_lost_top = call->tx_top;
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
send_ack = &call->acks_lost_ping;
}
if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
send_ack) {
if (call->ackr_reason) { if (call->ackr_reason) {
rxrpc_send_ack_packet(call, false); rxrpc_send_ack_packet(call, false, send_ack);
goto recheck_state; goto recheck_state;
} }
} }
if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
rxrpc_send_ack_packet(call, true); rxrpc_send_ack_packet(call, true, NULL);
goto recheck_state; goto recheck_state;
} }
...@@ -408,7 +422,24 @@ void rxrpc_process_call(struct work_struct *work) ...@@ -408,7 +422,24 @@ void rxrpc_process_call(struct work_struct *work)
goto recheck_state; goto recheck_state;
} }
rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); /* Make sure the timer is restarted */
next = call->expect_rx_by;
#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
set(call->expect_req_by);
set(call->expect_term_by);
set(call->ack_at);
set(call->ack_lost_at);
set(call->resend_at);
set(call->keepalive_at);
set(call->ping_at);
now = jiffies;
if (time_after_eq(now, next))
goto recheck_state;
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */ /* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) { if (call->events && call->state < RXRPC_CALL_COMPLETE) {
......
...@@ -51,10 +51,14 @@ static void rxrpc_call_timer_expired(unsigned long _call) ...@@ -51,10 +51,14 @@ static void rxrpc_call_timer_expired(unsigned long _call)
_enter("%d", call->debug_id); _enter("%d", call->debug_id);
if (call->state < RXRPC_CALL_COMPLETE) if (call->state < RXRPC_CALL_COMPLETE) {
rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
rxrpc_queue_call(call);
}
} }
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
/* /*
* find an extant server call * find an extant server call
* - called in process context with IRQs enabled * - called in process context with IRQs enabled
...@@ -95,7 +99,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, ...@@ -95,7 +99,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
/* /*
* allocate a new call * allocate a new call
*/ */
struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
{ {
struct rxrpc_call *call; struct rxrpc_call *call;
...@@ -114,6 +118,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) ...@@ -114,6 +118,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
goto nomem_2; goto nomem_2;
mutex_init(&call->user_mutex); mutex_init(&call->user_mutex);
/* Prevent lockdep reporting a deadlock false positive between the afs
* filesystem and sys_sendmsg() via the mmap sem.
*/
if (rx->sk.sk_kern_sock)
lockdep_set_class(&call->user_mutex,
&rxrpc_call_user_mutex_lock_class_key);
setup_timer(&call->timer, rxrpc_call_timer_expired, setup_timer(&call->timer, rxrpc_call_timer_expired,
(unsigned long)call); (unsigned long)call);
INIT_WORK(&call->processor, &rxrpc_process_call); INIT_WORK(&call->processor, &rxrpc_process_call);
...@@ -129,6 +141,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) ...@@ -129,6 +141,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
atomic_set(&call->usage, 1); atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id); call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call->tx_total_len = -1; call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ;
call->next_req_timo = 1 * HZ;
memset(&call->sock_node, 0xed, sizeof(call->sock_node)); memset(&call->sock_node, 0xed, sizeof(call->sock_node));
...@@ -151,7 +165,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) ...@@ -151,7 +165,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
/* /*
* Allocate a new client call. * Allocate a new client call.
*/ */
static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
struct sockaddr_rxrpc *srx,
gfp_t gfp) gfp_t gfp)
{ {
struct rxrpc_call *call; struct rxrpc_call *call;
...@@ -159,7 +174,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, ...@@ -159,7 +174,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
_enter(""); _enter("");
call = rxrpc_alloc_call(gfp); call = rxrpc_alloc_call(rx, gfp);
if (!call) if (!call)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
...@@ -178,15 +193,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, ...@@ -178,15 +193,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
*/ */
static void rxrpc_start_call_timer(struct rxrpc_call *call) static void rxrpc_start_call_timer(struct rxrpc_call *call)
{ {
ktime_t now = ktime_get_real(), expire_at; unsigned long now = jiffies;
unsigned long j = now + MAX_JIFFY_OFFSET;
expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
call->expire_at = expire_at; call->ack_at = j;
call->ack_at = expire_at; call->ack_lost_at = j;
call->ping_at = expire_at; call->resend_at = j;
call->resend_at = expire_at; call->ping_at = j;
call->timer.expires = jiffies + LONG_MAX / 2; call->expect_rx_by = j;
rxrpc_set_timer(call, rxrpc_timer_begin, now); call->expect_req_by = j;
call->expect_term_by = j;
call->timer.expires = now;
} }
/* /*
...@@ -197,8 +214,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call) ...@@ -197,8 +214,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp, struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx, struct sockaddr_rxrpc *srx,
unsigned long user_call_ID, struct rxrpc_call_params *p,
s64 tx_total_len,
gfp_t gfp) gfp_t gfp)
__releases(&rx->sk.sk_lock.slock) __releases(&rx->sk.sk_lock.slock)
{ {
...@@ -208,18 +224,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -208,18 +224,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
const void *here = __builtin_return_address(0); const void *here = __builtin_return_address(0);
int ret; int ret;
_enter("%p,%lx", rx, user_call_ID); _enter("%p,%lx", rx, p->user_call_ID);
call = rxrpc_alloc_client_call(srx, gfp); call = rxrpc_alloc_client_call(rx, srx, gfp);
if (IS_ERR(call)) { if (IS_ERR(call)) {
release_sock(&rx->sk); release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call)); _leave(" = %ld", PTR_ERR(call));
return call; return call;
} }
call->tx_total_len = tx_total_len; call->tx_total_len = p->tx_total_len;
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
here, (const void *)user_call_ID); here, (const void *)p->user_call_ID);
/* We need to protect a partially set up call against the user as we /* We need to protect a partially set up call against the user as we
* will be acting outside the socket lock. * will be acting outside the socket lock.
...@@ -235,16 +251,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -235,16 +251,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
parent = *pp; parent = *pp;
xcall = rb_entry(parent, struct rxrpc_call, sock_node); xcall = rb_entry(parent, struct rxrpc_call, sock_node);
if (user_call_ID < xcall->user_call_ID) if (p->user_call_ID < xcall->user_call_ID)
pp = &(*pp)->rb_left; pp = &(*pp)->rb_left;
else if (user_call_ID > xcall->user_call_ID) else if (p->user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right; pp = &(*pp)->rb_right;
else else
goto error_dup_user_ID; goto error_dup_user_ID;
} }
rcu_assign_pointer(call->socket, rx); rcu_assign_pointer(call->socket, rx);
call->user_call_ID = user_call_ID; call->user_call_ID = p->user_call_ID;
__set_bit(RXRPC_CALL_HAS_USERID, &call->flags); __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
rxrpc_get_call(call, rxrpc_call_got_userid); rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp); rb_link_node(&call->sock_node, parent, pp);
......
...@@ -85,8 +85,8 @@ ...@@ -85,8 +85,8 @@
__read_mostly unsigned int rxrpc_max_client_connections = 1000; __read_mostly unsigned int rxrpc_max_client_connections = 1000;
__read_mostly unsigned int rxrpc_reap_client_connections = 900; __read_mostly unsigned int rxrpc_reap_client_connections = 900;
__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
/* /*
* We use machine-unique IDs for our client connections. * We use machine-unique IDs for our client connections.
...@@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, ...@@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
/* Cancel the final ACK on the previous call if it hasn't been sent yet
* as the DATA packet will implicitly ACK it.
*/
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
write_lock_bh(&call->state_lock); write_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
...@@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call, ...@@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
_enter("{%d,%lx},", call->debug_id, call->user_call_ID); _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work); rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet); rxrpc_cull_active_client_conns(rxnet);
ret = rxrpc_get_client_conn(call, cp, srx, gfp); ret = rxrpc_get_client_conn(call, cp, srx, gfp);
...@@ -751,6 +756,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call) ...@@ -751,6 +756,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
} }
} }
/*
* Set the reap timer.
*/
static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
{
unsigned long now = jiffies;
unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
if (rxnet->live)
timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
}
/* /*
* Disconnect a client call. * Disconnect a client call.
*/ */
...@@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) ...@@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
goto out_2; goto out_2;
} }
/* Schedule the final ACK to be transmitted in a short while so that it
* can be skipped if we find a follow-on call. The first DATA packet
* of the follow on call will implicitly ACK this call.
*/
if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
unsigned long final_ack_at = jiffies + 2;
WRITE_ONCE(chan->final_ack_at, final_ack_at);
smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
rxrpc_reduce_conn_timer(conn, final_ack_at);
}
/* Things are more complex and we need the cache lock. We might be /* Things are more complex and we need the cache lock. We might be
* able to simply idle the conn or it might now be lurking on the wait * able to simply idle the conn or it might now be lurking on the wait
* list. It might even get moved back to the active list whilst we're * list. It might even get moved back to the active list whilst we're
...@@ -878,9 +908,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) ...@@ -878,9 +908,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
if (rxnet->idle_client_conns.next == &conn->cache_link && if (rxnet->idle_client_conns.next == &conn->cache_link &&
!rxnet->kill_all_client_conns) !rxnet->kill_all_client_conns)
queue_delayed_work(rxrpc_workqueue, rxrpc_set_client_reap_timer(rxnet);
&rxnet->client_conn_reaper,
rxrpc_conn_idle_client_expiry);
} else { } else {
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
...@@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) ...@@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
{ {
struct rxrpc_connection *conn; struct rxrpc_connection *conn;
struct rxrpc_net *rxnet = struct rxrpc_net *rxnet =
container_of(to_delayed_work(work), container_of(work, struct rxrpc_net, client_conn_reaper);
struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now; unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns; unsigned int nr_conns;
bool did_discard = false; bool did_discard = false;
...@@ -1061,6 +1088,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) ...@@ -1061,6 +1088,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
expiry = rxrpc_conn_idle_client_expiry; expiry = rxrpc_conn_idle_client_expiry;
if (nr_conns > rxrpc_reap_client_connections) if (nr_conns > rxrpc_reap_client_connections)
expiry = rxrpc_conn_idle_client_fast_expiry; expiry = rxrpc_conn_idle_client_fast_expiry;
if (conn->params.local->service_closed)
expiry = rxrpc_closed_conn_expiry * HZ;
conn_expires_at = conn->idle_timestamp + expiry; conn_expires_at = conn->idle_timestamp + expiry;
...@@ -1096,9 +1125,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) ...@@ -1096,9 +1125,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
*/ */
_debug("not yet"); _debug("not yet");
if (!rxnet->kill_all_client_conns) if (!rxnet->kill_all_client_conns)
queue_delayed_work(rxrpc_workqueue, timer_reduce(&rxnet->client_conn_reap_timer,
&rxnet->client_conn_reaper, conn_expires_at);
conn_expires_at - now);
out: out:
spin_unlock(&rxnet->client_conn_cache_lock); spin_unlock(&rxnet->client_conn_cache_lock);
...@@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) ...@@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
rxnet->kill_all_client_conns = true; rxnet->kill_all_client_conns = true;
spin_unlock(&rxnet->client_conn_cache_lock); spin_unlock(&rxnet->client_conn_cache_lock);
cancel_delayed_work(&rxnet->client_conn_reaper); del_timer_sync(&rxnet->client_conn_reap_timer);
if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0)) if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
_debug("destroy: queue failed"); _debug("destroy: queue failed");
_leave(""); _leave("");
......
...@@ -24,9 +24,10 @@ ...@@ -24,9 +24,10 @@
* Retransmit terminal ACK or ABORT of the previous call. * Retransmit terminal ACK or ABORT of the previous call.
*/ */
static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct sk_buff *skb) struct sk_buff *skb,
unsigned int channel)
{ {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
struct rxrpc_channel *chan; struct rxrpc_channel *chan;
struct msghdr msg; struct msghdr msg;
struct kvec iov; struct kvec iov;
...@@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
_enter("%d", conn->debug_id); _enter("%d", conn->debug_id);
chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK]; chan = &conn->channels[channel];
/* If the last call got moved on whilst we were waiting to run, just /* If the last call got moved on whilst we were waiting to run, just
* ignore this packet. * ignore this packet.
...@@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
call_id = READ_ONCE(chan->last_call); call_id = READ_ONCE(chan->last_call);
/* Sync with __rxrpc_disconnect_call() */ /* Sync with __rxrpc_disconnect_call() */
smp_rmb(); smp_rmb();
if (call_id != sp->hdr.callNumber) if (skb && call_id != sp->hdr.callNumber)
return; return;
msg.msg_name = &conn->params.peer->srx.transport; msg.msg_name = &conn->params.peer->srx.transport;
...@@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
msg.msg_controllen = 0; msg.msg_controllen = 0;
msg.msg_flags = 0; msg.msg_flags = 0;
pkt.whdr.epoch = htonl(sp->hdr.epoch); pkt.whdr.epoch = htonl(conn->proto.epoch);
pkt.whdr.cid = htonl(sp->hdr.cid); pkt.whdr.cid = htonl(conn->proto.cid);
pkt.whdr.callNumber = htonl(sp->hdr.callNumber); pkt.whdr.callNumber = htonl(call_id);
pkt.whdr.seq = 0; pkt.whdr.seq = 0;
pkt.whdr.type = chan->last_type; pkt.whdr.type = chan->last_type;
pkt.whdr.flags = conn->out_clientflag; pkt.whdr.flags = conn->out_clientflag;
...@@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, ...@@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
mtu = conn->params.peer->if_mtu; mtu = conn->params.peer->if_mtu;
mtu -= conn->params.peer->hdrsize; mtu -= conn->params.peer->hdrsize;
pkt.ack.bufferSpace = 0; pkt.ack.bufferSpace = 0;
pkt.ack.maxSkew = htons(skb->priority); pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
pkt.ack.firstPacket = htonl(chan->last_seq); pkt.ack.firstPacket = htonl(chan->last_seq + 1);
pkt.ack.previousPacket = htonl(chan->last_seq - 1); pkt.ack.previousPacket = htonl(chan->last_seq);
pkt.ack.serial = htonl(sp->hdr.serial); pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = RXRPC_ACK_DUPLICATE; pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0; pkt.ack.nAcks = 0;
pkt.info.rxMTU = htonl(rxrpc_rx_mtu); pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
pkt.info.maxMTU = htonl(mtu); pkt.info.maxMTU = htonl(mtu);
...@@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, ...@@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
switch (sp->hdr.type) { switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA: case RXRPC_PACKET_TYPE_DATA:
case RXRPC_PACKET_TYPE_ACK: case RXRPC_PACKET_TYPE_ACK:
rxrpc_conn_retransmit_call(conn, skb); rxrpc_conn_retransmit_call(conn, skb,
sp->hdr.cid & RXRPC_CHANNELMASK);
return 0; return 0;
case RXRPC_PACKET_TYPE_BUSY: case RXRPC_PACKET_TYPE_BUSY:
...@@ -378,6 +380,48 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) ...@@ -378,6 +380,48 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
_leave(" [aborted]"); _leave(" [aborted]");
} }
/*
* Process delayed final ACKs that we haven't subsumed into a subsequent call.
*/
static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
{
unsigned long j = jiffies, next_j;
unsigned int channel;
bool set;
again:
next_j = j + LONG_MAX;
set = false;
for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
struct rxrpc_channel *chan = &conn->channels[channel];
unsigned long ack_at;
if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
continue;
smp_rmb(); /* vs rxrpc_disconnect_client_call */
ack_at = READ_ONCE(chan->final_ack_at);
if (time_before(j, ack_at)) {
if (time_before(ack_at, next_j)) {
next_j = ack_at;
set = true;
}
continue;
}
if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
&conn->flags))
rxrpc_conn_retransmit_call(conn, NULL, channel);
}
j = jiffies;
if (time_before_eq(next_j, j))
goto again;
if (set)
rxrpc_reduce_conn_timer(conn, next_j);
}
/* /*
* connection-level event processor * connection-level event processor
*/ */
...@@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work) ...@@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work)
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn); rxrpc_secure_connection(conn);
/* Process delayed ACKs whose time has come. */
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
rxrpc_process_delayed_final_acks(conn);
/* go through the conn-level event packets, releasing the ref on this /* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */ * connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) { while ((skb = skb_dequeue(&conn->rx_queue))) {
......
...@@ -20,10 +20,19 @@ ...@@ -20,10 +20,19 @@
/* /*
* Time till a connection expires after last use (in seconds). * Time till a connection expires after last use (in seconds).
*/ */
unsigned int rxrpc_connection_expiry = 10 * 60; unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
static void rxrpc_destroy_connection(struct rcu_head *); static void rxrpc_destroy_connection(struct rcu_head *);
static void rxrpc_connection_timer(struct timer_list *timer)
{
struct rxrpc_connection *conn =
container_of(timer, struct rxrpc_connection, timer);
rxrpc_queue_conn(conn);
}
/* /*
* allocate a new connection * allocate a new connection
*/ */
...@@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) ...@@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
INIT_LIST_HEAD(&conn->cache_link); INIT_LIST_HEAD(&conn->cache_link);
spin_lock_init(&conn->channel_lock); spin_lock_init(&conn->channel_lock);
INIT_LIST_HEAD(&conn->waiting_calls); INIT_LIST_HEAD(&conn->waiting_calls);
timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
INIT_WORK(&conn->processor, &rxrpc_process_connection); INIT_WORK(&conn->processor, &rxrpc_process_connection);
INIT_LIST_HEAD(&conn->proc_link); INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link); INIT_LIST_HEAD(&conn->link);
...@@ -300,22 +310,30 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn) ...@@ -300,22 +310,30 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
return conn; return conn;
} }
/*
* Set the service connection reap timer.
*/
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
unsigned long reap_at)
{
if (rxnet->live)
timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
}
/* /*
* Release a service connection * Release a service connection
*/ */
void rxrpc_put_service_conn(struct rxrpc_connection *conn) void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{ {
struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0); const void *here = __builtin_return_address(0);
int n; int n;
n = atomic_dec_return(&conn->usage); n = atomic_dec_return(&conn->usage);
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0); ASSERTCMP(n, >=, 0);
if (n == 0) { if (n == 1)
rxnet = conn->params.local->rxnet; rxrpc_set_service_reap_timer(conn->params.local->rxnet,
rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); jiffies + rxrpc_connection_expiry);
}
} }
/* /*
...@@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) ...@@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
_net("DESTROY CONN %d", conn->debug_id); _net("DESTROY CONN %d", conn->debug_id);
del_timer_sync(&conn->timer);
rxrpc_purge_queue(&conn->rx_queue); rxrpc_purge_queue(&conn->rx_queue);
conn->security->clear(conn); conn->security->clear(conn);
...@@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work) ...@@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
{ {
struct rxrpc_connection *conn, *_p; struct rxrpc_connection *conn, *_p;
struct rxrpc_net *rxnet = struct rxrpc_net *rxnet =
container_of(to_delayed_work(work), container_of(work, struct rxrpc_net, service_conn_reaper);
struct rxrpc_net, service_conn_reaper); unsigned long expire_at, earliest, idle_timestamp, now;
unsigned long reap_older_than, earliest, idle_timestamp, now;
LIST_HEAD(graveyard); LIST_HEAD(graveyard);
_enter(""); _enter("");
now = jiffies; now = jiffies;
reap_older_than = now - rxrpc_connection_expiry * HZ; earliest = now + MAX_JIFFY_OFFSET;
earliest = ULONG_MAX;
write_lock(&rxnet->conn_lock); write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
...@@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work) ...@@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
continue; continue;
idle_timestamp = READ_ONCE(conn->idle_timestamp); if (rxnet->live) {
_debug("reap CONN %d { u=%d,t=%ld }", idle_timestamp = READ_ONCE(conn->idle_timestamp);
conn->debug_id, atomic_read(&conn->usage), expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
(long)reap_older_than - (long)idle_timestamp); if (conn->params.local->service_closed)
expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
if (time_after(idle_timestamp, reap_older_than)) {
if (time_before(idle_timestamp, earliest)) _debug("reap CONN %d { u=%d,t=%ld }",
earliest = idle_timestamp; conn->debug_id, atomic_read(&conn->usage),
continue; (long)expire_at - (long)now);
if (time_before(now, expire_at)) {
if (time_before(expire_at, earliest))
earliest = expire_at;
continue;
}
} }
/* The usage count sits at 1 whilst the object is unused on the /* The usage count sits at 1 whilst the object is unused on the
...@@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) ...@@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
*/ */
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
continue; continue;
trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
if (rxrpc_conn_is_client(conn)) if (rxrpc_conn_is_client(conn))
BUG(); BUG();
...@@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work) ...@@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
} }
write_unlock(&rxnet->conn_lock); write_unlock(&rxnet->conn_lock);
if (earliest != ULONG_MAX) { if (earliest != now + MAX_JIFFY_OFFSET) {
_debug("reschedule reaper %ld", (long) earliest - now); _debug("reschedule reaper %ld", (long)earliest - (long)now);
ASSERT(time_after(earliest, now)); ASSERT(time_after(earliest, now));
rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, rxrpc_set_service_reap_timer(rxnet, earliest);
earliest - now);
} }
while (!list_empty(&graveyard)) { while (!list_empty(&graveyard)) {
...@@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) ...@@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
rxrpc_destroy_all_client_connections(rxnet); rxrpc_destroy_all_client_connections(rxnet);
rxrpc_connection_expiry = 0; del_timer_sync(&rxnet->service_conn_reap_timer);
cancel_delayed_work(&rxnet->client_conn_reaper); rxrpc_queue_work(&rxnet->service_conn_reaper);
rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
flush_workqueue(rxrpc_workqueue); flush_workqueue(rxrpc_workqueue);
write_lock(&rxnet->conn_lock); write_lock(&rxnet->conn_lock);
......
...@@ -318,16 +318,18 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, ...@@ -318,16 +318,18 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
static bool rxrpc_receiving_reply(struct rxrpc_call *call) static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{ {
struct rxrpc_ack_summary summary = { 0 }; struct rxrpc_ack_summary summary = { 0 };
unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top); rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) { if (call->ackr_reason) {
spin_lock_bh(&call->lock); spin_lock_bh(&call->lock);
call->ackr_reason = 0; call->ackr_reason = 0;
call->resend_at = call->expire_at;
call->ack_at = call->expire_at;
spin_unlock_bh(&call->lock); spin_unlock_bh(&call->lock);
rxrpc_set_timer(call, rxrpc_timer_init_for_reply, now = jiffies;
ktime_get_real()); timo = now + MAX_JIFFY_OFFSET;
WRITE_ONCE(call->resend_at, timo);
WRITE_ONCE(call->ack_at, timo);
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
} }
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
...@@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
if (state >= RXRPC_CALL_COMPLETE) if (state >= RXRPC_CALL_COMPLETE)
return; return;
if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
unsigned long timo = READ_ONCE(call->next_req_timo);
unsigned long now, expect_req_by;
if (timo) {
now = jiffies;
expect_req_by = now + timo;
WRITE_ONCE(call->expect_req_by, expect_req_by);
rxrpc_reduce_call_timer(call, expect_req_by, now,
rxrpc_timer_set_for_idle);
}
}
/* Received data implicitly ACKs all of the request packets we sent /* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client. * when we're acting as a client.
*/ */
...@@ -615,6 +630,43 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call, ...@@ -615,6 +630,43 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
orig_serial, ack_serial, sent_at, resp_time); orig_serial, ack_serial, sent_at, resp_time);
} }
/*
* Process the response to a ping that we sent to find out if we lost an ACK.
*
* If we got back a ping response that indicates a lower tx_top than what we
* had at the time of the ping transmission, we adjudge all the DATA packets
* sent between the response tx_top and the ping-time tx_top to have been lost.
*/
static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
{
rxrpc_seq_t top, bottom, seq;
bool resend = false;
spin_lock_bh(&call->lock);
bottom = call->tx_hard_ack + 1;
top = call->acks_lost_top;
if (before(bottom, top)) {
for (seq = bottom; before_eq(seq, top); seq++) {
int ix = seq & RXRPC_RXTX_BUFF_MASK;
u8 annotation = call->rxtx_annotations[ix];
u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
if (anno_type != RXRPC_TX_ANNO_UNACK)
continue;
annotation &= ~RXRPC_TX_ANNO_MASK;
annotation |= RXRPC_TX_ANNO_RETRANS;
call->rxtx_annotations[ix] = annotation;
resend = true;
}
}
spin_unlock_bh(&call->lock);
if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_queue_call(call);
}
/* /*
* Process a ping response. * Process a ping response.
*/ */
...@@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call, ...@@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
smp_rmb(); smp_rmb();
ping_serial = call->ping_serial; ping_serial = call->ping_serial;
if (orig_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
before(orig_serial, ping_serial)) before(orig_serial, ping_serial))
return; return;
...@@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, ...@@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
struct sk_buff *skb, u16 skew) struct sk_buff *skb, u16 skew)
{ {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long timo;
_enter("%p,%p", call, skb); _enter("%p,%p", call, skb);
timo = READ_ONCE(call->next_rx_timo);
if (timo) {
unsigned long now = jiffies, expect_rx_by;
expect_rx_by = jiffies + timo;
WRITE_ONCE(call->expect_rx_by, expect_rx_by);
rxrpc_reduce_call_timer(call, expect_rx_by, now,
rxrpc_timer_set_for_normal);
}
switch (sp->hdr.type) { switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA: case RXRPC_PACKET_TYPE_DATA:
rxrpc_input_data(call, skb, skew); rxrpc_input_data(call, skb, skew);
......
...@@ -20,34 +20,29 @@ ...@@ -20,34 +20,29 @@
*/ */
unsigned int rxrpc_max_backlog __read_mostly = 10; unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
* Maximum lifetime of a call (in mx).
*/
unsigned int rxrpc_max_call_lifetime = 60 * 1000;
/* /*
* How long to wait before scheduling ACK generation after seeing a * How long to wait before scheduling ACK generation after seeing a
* packet with RXRPC_REQUEST_ACK set (in ms). * packet with RXRPC_REQUEST_ACK set (in jiffies).
*/ */
unsigned int rxrpc_requested_ack_delay = 1; unsigned long rxrpc_requested_ack_delay = 1;
/* /*
* How long to wait before scheduling an ACK with subtype DELAY (in ms). * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
* *
* We use this when we've received new data packets. If those packets aren't * We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not * all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend. * requested to let the sender know it doesn't need to resend.
*/ */
unsigned int rxrpc_soft_ack_delay = 1 * 1000; unsigned long rxrpc_soft_ack_delay = HZ;
/* /*
* How long to wait before scheduling an ACK with subtype IDLE (in ms). * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
* *
* We use this when we've consumed some previously soft-ACK'd packets when * We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE * further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space. * ACK let the other end know that it can free up its Tx buffer space.
*/ */
unsigned int rxrpc_idle_ack_delay = 0.5 * 1000; unsigned long rxrpc_idle_ack_delay = HZ / 2;
/* /*
* Receive window size in packets. This indicates the maximum number of * Receive window size in packets. This indicates the maximum number of
...@@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4; ...@@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
/* /*
* Time till packet resend (in milliseconds). * Time till packet resend (in milliseconds).
*/ */
unsigned int rxrpc_resend_timeout = 4 * 1000; unsigned long rxrpc_resend_timeout = 4 * HZ;
const s8 rxrpc_ack_priority[] = { const s8 rxrpc_ack_priority[] = {
[0] = 0, [0] = 0,
......
...@@ -14,6 +14,24 @@ ...@@ -14,6 +14,24 @@
unsigned int rxrpc_net_id; unsigned int rxrpc_net_id;
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
{
struct rxrpc_net *rxnet =
container_of(timer, struct rxrpc_net, client_conn_reap_timer);
if (rxnet->live)
rxrpc_queue_work(&rxnet->client_conn_reaper);
}
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
{
struct rxrpc_net *rxnet =
container_of(timer, struct rxrpc_net, service_conn_reap_timer);
if (rxnet->live)
rxrpc_queue_work(&rxnet->service_conn_reaper);
}
/* /*
* Initialise a per-network namespace record. * Initialise a per-network namespace record.
*/ */
...@@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net) ...@@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net)
struct rxrpc_net *rxnet = rxrpc_net(net); struct rxrpc_net *rxnet = rxrpc_net(net);
int ret; int ret;
rxnet->live = true;
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
rxnet->epoch |= RXRPC_RANDOM_EPOCH; rxnet->epoch |= RXRPC_RANDOM_EPOCH;
...@@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net) ...@@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->conn_proc_list); INIT_LIST_HEAD(&rxnet->conn_proc_list);
INIT_LIST_HEAD(&rxnet->service_conns); INIT_LIST_HEAD(&rxnet->service_conns);
rwlock_init(&rxnet->conn_lock); rwlock_init(&rxnet->conn_lock);
INIT_DELAYED_WORK(&rxnet->service_conn_reaper, INIT_WORK(&rxnet->service_conn_reaper,
rxrpc_service_connection_reaper); rxrpc_service_connection_reaper);
timer_setup(&rxnet->service_conn_reap_timer,
rxrpc_service_conn_reap_timeout, 0);
rxnet->nr_client_conns = 0; rxnet->nr_client_conns = 0;
rxnet->nr_active_client_conns = 0; rxnet->nr_active_client_conns = 0;
...@@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net) ...@@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->waiting_client_conns); INIT_LIST_HEAD(&rxnet->waiting_client_conns);
INIT_LIST_HEAD(&rxnet->active_client_conns); INIT_LIST_HEAD(&rxnet->active_client_conns);
INIT_LIST_HEAD(&rxnet->idle_client_conns); INIT_LIST_HEAD(&rxnet->idle_client_conns);
INIT_DELAYED_WORK(&rxnet->client_conn_reaper, INIT_WORK(&rxnet->client_conn_reaper,
rxrpc_discard_expired_client_conns); rxrpc_discard_expired_client_conns);
timer_setup(&rxnet->client_conn_reap_timer,
rxrpc_client_conn_reap_timeout, 0);
INIT_LIST_HEAD(&rxnet->local_endpoints); INIT_LIST_HEAD(&rxnet->local_endpoints);
mutex_init(&rxnet->local_mutex); mutex_init(&rxnet->local_mutex);
...@@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net) ...@@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net)
return 0; return 0;
err_proc: err_proc:
rxnet->live = false;
return ret; return ret;
} }
...@@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net) ...@@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
{ {
struct rxrpc_net *rxnet = rxrpc_net(net); struct rxrpc_net *rxnet = rxrpc_net(net);
rxnet->live = false;
rxrpc_destroy_all_calls(rxnet); rxrpc_destroy_all_calls(rxnet);
rxrpc_destroy_all_connections(rxnet); rxrpc_destroy_all_connections(rxnet);
rxrpc_destroy_all_locals(rxnet); rxrpc_destroy_all_locals(rxnet);
......
...@@ -32,6 +32,24 @@ struct rxrpc_abort_buffer { ...@@ -32,6 +32,24 @@ struct rxrpc_abort_buffer {
__be32 abort_code; __be32 abort_code;
}; };
/*
* Arrange for a keepalive ping a certain time after we last transmitted. This
* lets the far side know we're still interested in this call and helps keep
* the route through any intervening firewall open.
*
* Receiving a response to the ping will prevent the ->expect_rx_by timer from
* expiring.
*/
static void rxrpc_set_keepalive(struct rxrpc_call *call)
{
unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
keepalive_at += now;
WRITE_ONCE(call->keepalive_at, keepalive_at);
rxrpc_reduce_call_timer(call, keepalive_at, now,
rxrpc_timer_set_for_keepalive);
}
/* /*
* Fill out an ACK packet. * Fill out an ACK packet.
*/ */
...@@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, ...@@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
/* /*
* Send an ACK call packet. * Send an ACK call packet.
*/ */
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t *_serial)
{ {
struct rxrpc_connection *conn = NULL; struct rxrpc_connection *conn = NULL;
struct rxrpc_ack_buffer *pkt; struct rxrpc_ack_buffer *pkt;
...@@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) ...@@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
ntohl(pkt->ack.firstPacket), ntohl(pkt->ack.firstPacket),
ntohl(pkt->ack.serial), ntohl(pkt->ack.serial),
pkt->ack.reason, pkt->ack.nAcks); pkt->ack.reason, pkt->ack.nAcks);
if (_serial)
*_serial = serial;
if (ping) { if (ping) {
call->ping_serial = serial; call->ping_serial = serial;
...@@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) ...@@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
call->ackr_seen = top; call->ackr_seen = top;
spin_unlock_bh(&call->lock); spin_unlock_bh(&call->lock);
} }
rxrpc_set_keepalive(call);
} }
out: out:
...@@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* ACKs if a DATA packet appears to have been lost. * ACKs if a DATA packet appears to have been lost.
*/ */
if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
(retrans || (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
retrans ||
call->cong_mode == RXRPC_CALL_SLOW_START || call->cong_mode == RXRPC_CALL_SLOW_START ||
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
...@@ -370,8 +394,23 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, ...@@ -370,8 +394,23 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
if (whdr.flags & RXRPC_REQUEST_ACK) { if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = now; call->peer->rtt_last_req = now;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_usage > 1) {
unsigned long nowj = jiffies, ack_lost_at;
ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
if (ack_lost_at < 1)
ack_lost_at = 1;
ack_lost_at += nowj;
WRITE_ONCE(call->ack_lost_at, ack_lost_at);
rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
rxrpc_timer_set_for_lost_ack);
}
} }
} }
rxrpc_set_keepalive(call);
_leave(" = %d [%u]", ret, call->peer->maxdata); _leave(" = %d [%u]", ret, call->peer->maxdata);
return ret; return ret;
......
...@@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) ...@@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
rxrpc_propose_ack_terminal_ack); rxrpc_propose_ack_terminal_ack);
rxrpc_send_ack_packet(call, false); rxrpc_send_ack_packet(call, false, NULL);
} }
#endif
write_lock_bh(&call->state_lock); write_lock_bh(&call->state_lock);
...@@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) ...@@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_RECV_REQUEST:
call->tx_phase = true; call->tx_phase = true;
call->state = RXRPC_CALL_SERVER_ACK_REQUEST; call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
call->ack_at = call->expire_at; call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock); write_unlock_bh(&call->state_lock);
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
rxrpc_propose_ack_processing_op); rxrpc_propose_ack_processing_op);
...@@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) ...@@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
after_eq(top, call->ackr_seen + 2) || after_eq(top, call->ackr_seen + 2) ||
(hard_ack == top && after(hard_ack, call->ackr_consumed))) (hard_ack == top && after(hard_ack, call->ackr_consumed)))
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
true, false, true, true,
rxrpc_propose_ack_rotate_rx); rxrpc_propose_ack_rotate_rx);
if (call->ackr_reason) if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
rxrpc_send_ack_packet(call, false); rxrpc_send_ack_packet(call, false, NULL);
} }
} }
......
...@@ -21,22 +21,6 @@ ...@@ -21,22 +21,6 @@
#include <net/af_rxrpc.h> #include <net/af_rxrpc.h>
#include "ar-internal.h" #include "ar-internal.h"
enum rxrpc_command {
RXRPC_CMD_SEND_DATA, /* send data message */
RXRPC_CMD_SEND_ABORT, /* request abort generation */
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
};
struct rxrpc_send_params {
s64 tx_total_len; /* Total Tx data length (if send data) */
unsigned long user_call_ID; /* User's call ID */
u32 abort_code; /* Abort code to Tx (if abort) */
enum rxrpc_command command : 8; /* The command to implement */
bool exclusive; /* Shared or exclusive call */
bool upgrade; /* If the connection is upgradeable */
};
/* /*
* Wait for space to appear in the Tx queue or a signal to occur. * Wait for space to appear in the Tx queue or a signal to occur.
*/ */
...@@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ...@@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_notify_end_tx_t notify_end_tx) rxrpc_notify_end_tx_t notify_end_tx)
{ {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long now;
rxrpc_seq_t seq = sp->hdr.seq; rxrpc_seq_t seq = sp->hdr.seq;
int ret, ix; int ret, ix;
u8 annotation = RXRPC_TX_ANNO_UNACK; u8 annotation = RXRPC_TX_ANNO_UNACK;
...@@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ...@@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
break; break;
case RXRPC_CALL_SERVER_ACK_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST:
call->state = RXRPC_CALL_SERVER_SEND_REPLY; call->state = RXRPC_CALL_SERVER_SEND_REPLY;
call->ack_at = call->expire_at; now = jiffies;
WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY) if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0; call->ackr_reason = 0;
__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
ktime_get_real());
if (!last) if (!last)
break; break;
/* Fall through */ /* Fall through */
...@@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, ...@@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
_debug("need instant resend %d", ret); _debug("need instant resend %d", ret);
rxrpc_instant_resend(call, ix); rxrpc_instant_resend(call, ix);
} else { } else {
ktime_t now = ktime_get_real(), resend_at; unsigned long now = jiffies, resend_at;
resend_at = ktime_add_ms(now, rxrpc_resend_timeout); if (call->peer->rtt_usage > 1)
resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
if (ktime_before(resend_at, call->resend_at)) { else
call->resend_at = resend_at; resend_at = rxrpc_resend_timeout;
rxrpc_set_timer(call, rxrpc_timer_set_for_send, now); if (resend_at < 1)
} resend_at = 1;
resend_at = now + rxrpc_resend_timeout;
WRITE_ONCE(call->resend_at, resend_at);
rxrpc_reduce_call_timer(call, resend_at, now,
rxrpc_timer_set_for_send);
} }
rxrpc_free_skb(skb, rxrpc_skb_tx_freed); rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
...@@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, ...@@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
do { do {
/* Check to see if there's a ping ACK to reply to. */ /* Check to see if there's a ping ACK to reply to. */
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
rxrpc_send_ack_packet(call, false); rxrpc_send_ack_packet(call, false, NULL);
if (!skb) { if (!skb) {
size_t size, chunk, max, space; size_t size, chunk, max, space;
...@@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) ...@@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
if (msg->msg_flags & MSG_CMSG_COMPAT) { if (msg->msg_flags & MSG_CMSG_COMPAT) {
if (len != sizeof(u32)) if (len != sizeof(u32))
return -EINVAL; return -EINVAL;
p->user_call_ID = *(u32 *)CMSG_DATA(cmsg); p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
} else { } else {
if (len != sizeof(unsigned long)) if (len != sizeof(unsigned long))
return -EINVAL; return -EINVAL;
p->user_call_ID = *(unsigned long *) p->call.user_call_ID = *(unsigned long *)
CMSG_DATA(cmsg); CMSG_DATA(cmsg);
} }
got_user_ID = true; got_user_ID = true;
...@@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) ...@@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
break; break;
case RXRPC_TX_LENGTH: case RXRPC_TX_LENGTH:
if (p->tx_total_len != -1 || len != sizeof(__s64)) if (p->call.tx_total_len != -1 || len != sizeof(__s64))
return -EINVAL;
p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
if (p->call.tx_total_len < 0)
return -EINVAL; return -EINVAL;
p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg); break;
if (p->tx_total_len < 0)
case RXRPC_SET_CALL_TIMEOUT:
if (len & 3 || len < 4 || len > 12)
return -EINVAL; return -EINVAL;
memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
p->call.nr_timeouts = len / 4;
if (p->call.timeouts.hard > INT_MAX / HZ)
return -ERANGE;
if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
return -ERANGE;
if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
return -ERANGE;
break; break;
default: default:
...@@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) ...@@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
if (!got_user_ID) if (!got_user_ID)
return -EINVAL; return -EINVAL;
if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL; return -EINVAL;
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
...@@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, ...@@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
cp.exclusive = rx->exclusive | p->exclusive; cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade; cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service; cp.service_id = srx->srx_service;
call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID, call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
p->tx_total_len, GFP_KERNEL);
/* The socket is now unlocked */ /* The socket is now unlocked */
_leave(" = %p\n", call); _leave(" = %p\n", call);
...@@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ...@@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
{ {
enum rxrpc_call_state state; enum rxrpc_call_state state;
struct rxrpc_call *call; struct rxrpc_call *call;
unsigned long now, j;
int ret; int ret;
struct rxrpc_send_params p = { struct rxrpc_send_params p = {
.tx_total_len = -1, .call.tx_total_len = -1,
.user_call_ID = 0, .call.user_call_ID = 0,
.abort_code = 0, .call.nr_timeouts = 0,
.command = RXRPC_CMD_SEND_DATA, .abort_code = 0,
.exclusive = false, .command = RXRPC_CMD_SEND_DATA,
.upgrade = true, .exclusive = false,
.upgrade = false,
}; };
_enter(""); _enter("");
...@@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ...@@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = -EINVAL; ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock; goto error_release_sock;
call = rxrpc_accept_call(rx, p.user_call_ID, NULL); call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
/* The socket is now unlocked. */ /* The socket is now unlocked. */
if (IS_ERR(call)) if (IS_ERR(call))
return PTR_ERR(call); return PTR_ERR(call);
rxrpc_put_call(call, rxrpc_call_put); ret = 0;
return 0; goto out_put_unlock;
} }
call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
if (!call) { if (!call) {
ret = -EBADSLT; ret = -EBADSLT;
if (p.command != RXRPC_CMD_SEND_DATA) if (p.command != RXRPC_CMD_SEND_DATA)
...@@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ...@@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
goto error_put; goto error_put;
} }
if (p.tx_total_len != -1) { if (p.call.tx_total_len != -1) {
ret = -EINVAL; ret = -EINVAL;
if (call->tx_total_len != -1 || if (call->tx_total_len != -1 ||
call->tx_pending || call->tx_pending ||
call->tx_top != 0) call->tx_top != 0)
goto error_put; goto error_put;
call->tx_total_len = p.tx_total_len; call->tx_total_len = p.call.tx_total_len;
}
}
switch (p.call.nr_timeouts) {
case 3:
j = msecs_to_jiffies(p.call.timeouts.normal);
if (p.call.timeouts.normal > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_rx_timo, j);
/* Fall through */
case 2:
j = msecs_to_jiffies(p.call.timeouts.idle);
if (p.call.timeouts.idle > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_req_timo, j);
/* Fall through */
case 1:
if (p.call.timeouts.hard > 0) {
j = msecs_to_jiffies(p.call.timeouts.hard);
now = jiffies;
j += now;
WRITE_ONCE(call->expect_term_by, j);
rxrpc_reduce_call_timer(call, j, now,
rxrpc_timer_set_for_hard);
} }
break;
} }
state = READ_ONCE(call->state); state = READ_ONCE(call->state);
...@@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ...@@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = rxrpc_send_data(rx, call, msg, len, NULL); ret = rxrpc_send_data(rx, call, msg, len, NULL);
} }
out_put_unlock:
mutex_unlock(&call->user_mutex); mutex_unlock(&call->user_mutex);
error_put: error_put:
rxrpc_put_call(call, rxrpc_call_put); rxrpc_put_call(call, rxrpc_call_put);
......
...@@ -21,6 +21,8 @@ static const unsigned int four = 4; ...@@ -21,6 +21,8 @@ static const unsigned int four = 4;
static const unsigned int thirtytwo = 32; static const unsigned int thirtytwo = 32;
static const unsigned int n_65535 = 65535; static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
/* /*
* RxRPC operating parameters. * RxRPC operating parameters.
...@@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; ...@@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
* information on the individual parameters. * information on the individual parameters.
*/ */
static struct ctl_table rxrpc_sysctl_table[] = { static struct ctl_table rxrpc_sysctl_table[] = {
/* Values measured in milliseconds */ /* Values measured in milliseconds but used in jiffies */
{ {
.procname = "req_ack_delay", .procname = "req_ack_delay",
.data = &rxrpc_requested_ack_delay, .data = &rxrpc_requested_ack_delay,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&zero, .extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
}, },
{ {
.procname = "soft_ack_delay", .procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay, .data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one, .extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
}, },
{ {
.procname = "idle_ack_delay", .procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay, .data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one, .extra1 = (void *)&one_jiffy,
}, .extra2 = (void *)&max_jiffies,
{
.procname = "resend_timeout",
.data = &rxrpc_resend_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = (void *)&one,
}, },
{ {
.procname = "idle_conn_expiry", .procname = "idle_conn_expiry",
.data = &rxrpc_conn_idle_client_expiry, .data = &rxrpc_conn_idle_client_expiry,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies, .proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one, .extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
}, },
{ {
.procname = "idle_conn_fast_expiry", .procname = "idle_conn_fast_expiry",
.data = &rxrpc_conn_idle_client_fast_expiry, .data = &rxrpc_conn_idle_client_fast_expiry,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies, .proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one, .extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
}, },
/* Values measured in seconds but used in jiffies */
{ {
.procname = "max_call_lifetime", .procname = "resend_timeout",
.data = &rxrpc_max_call_lifetime, .data = &rxrpc_resend_timeout,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_doulongvec_ms_jiffies_minmax,
.extra1 = (void *)&one, .extra1 = (void *)&one_jiffy,
.extra2 = (void *)&max_jiffies,
}, },
/* Non-time values */ /* Non-time values */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment