call_event.c 11.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7
/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 */

8 9
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

10 11 12 13
#include <linux/module.h>
#include <linux/circ_buf.h>
#include <linux/net.h>
#include <linux/skbuff.h>
14
#include <linux/slab.h>
15 16 17 18 19
#include <linux/udp.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

20 21 22
/*
 * Propose a PING ACK be sent.
 */
23 24
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
			enum rxrpc_propose_ack_trace why)
25
{
26 27
	unsigned long now = jiffies;
	unsigned long ping_at = now + rxrpc_idle_ack_delay;
28

29 30 31 32 33 34
	spin_lock_bh(&call->lock);

	if (time_before(ping_at, call->ping_at)) {
		WRITE_ONCE(call->ping_at, ping_at);
		rxrpc_reduce_call_timer(call, ping_at, now,
					rxrpc_timer_set_for_ping);
35
		trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
36
	}
37 38

	spin_unlock_bh(&call->lock);
39 40
}

41
/*
42
 * Propose a DELAY ACK be sent in the future.
43
 */
44 45 46
static void __rxrpc_propose_delay_ACK(struct rxrpc_call *call,
				      rxrpc_serial_t serial,
				      enum rxrpc_propose_ack_trace why)
47
{
48
	unsigned long expiry = rxrpc_soft_ack_delay;
49
	unsigned long now = jiffies, ack_at;
50

51
	call->ackr_serial = serial;
52

53 54
	if (rxrpc_soft_ack_delay < expiry)
		expiry = rxrpc_soft_ack_delay;
55 56 57 58 59 60 61
	if (call->peer->srtt_us != 0)
		ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
	else
		ack_at = expiry;

	ack_at += READ_ONCE(call->tx_backoff);
	ack_at += now;
62 63
	if (time_before(ack_at, call->delay_ack_at)) {
		WRITE_ONCE(call->delay_ack_at, ack_at);
64 65
		rxrpc_reduce_call_timer(call, ack_at, now,
					rxrpc_timer_set_for_ack);
66
	}
67

68
	trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
69 70 71
}

/*
72
 * Propose a DELAY ACK be sent, locking the call structure
73
 */
74 75
void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t  serial,
			     enum rxrpc_propose_ack_trace why)
76
{
77
	spin_lock_bh(&call->lock);
78
	__rxrpc_propose_delay_ACK(call, serial, why);
79
	spin_unlock_bh(&call->lock);
80 81
}

82 83 84 85 86 87 88 89 90 91 92
/*
 * Queue an ACK for immediate transmission.
 */
void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
		    rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
{
	struct rxrpc_local *local = call->conn->params.local;
	struct rxrpc_txbuf *txb;

	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
		return;
93 94 95 96 97
	if (ack_reason == RXRPC_ACK_DELAY &&
	    test_and_set_bit(RXRPC_CALL_DELAY_ACK_PENDING, &call->flags)) {
		trace_rxrpc_drop_ack(call, why, ack_reason, serial, false);
		return;
	}
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137

	rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);

	txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
				in_softirq() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
	if (!txb) {
		kleave(" = -ENOMEM");
		return;
	}

	txb->ack_why		= why;
	txb->wire.seq		= 0;
	txb->wire.type		= RXRPC_PACKET_TYPE_ACK;
	txb->wire.flags		|= RXRPC_SLOW_START_OK;
	txb->ack.bufferSpace	= 0;
	txb->ack.maxSkew	= 0;
	txb->ack.firstPacket	= 0;
	txb->ack.previousPacket	= 0;
	txb->ack.serial		= htonl(serial);
	txb->ack.reason		= ack_reason;
	txb->ack.nAcks		= 0;

	if (!rxrpc_try_get_call(call, rxrpc_call_got)) {
		rxrpc_put_txbuf(txb, rxrpc_txbuf_put_nomem);
		return;
	}

	spin_lock_bh(&local->ack_tx_lock);
	list_add_tail(&txb->tx_link, &local->ack_tx_queue);
	spin_unlock_bh(&local->ack_tx_lock);
	trace_rxrpc_send_ack(call, why, ack_reason, serial);

	if (in_task()) {
		rxrpc_transmit_ack_packets(call->peer->local);
	} else {
		rxrpc_get_local(local);
		rxrpc_queue_local(local);
	}
}

138 139 140 141 142 143 144 145
/*
 * Handle congestion being detected by the retransmit timeout.
 */
static void rxrpc_congestion_timeout(struct rxrpc_call *call)
{
	set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
}

146
/*
147
 * Perform retransmission of NAK'd and unack'd packets.
148
 */
David Howells's avatar
David Howells committed
149
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
150 151
{
	struct sk_buff *skb;
152
	unsigned long resend_at;
153
	rxrpc_seq_t cursor, seq, top;
154
	ktime_t now, max_age, oldest, ack_ts;
155
	int ix;
156
	u8 annotation, anno_type, retrans = 0, unacked = 0;
157

158
	_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
159

David Howells's avatar
David Howells committed
160
	now = ktime_get_real();
161
	max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
162

163 164
	spin_lock_bh(&call->lock);

165 166 167 168 169 170 171 172 173 174
	cursor = call->tx_hard_ack;
	top = call->tx_top;
	ASSERT(before_eq(cursor, top));
	if (cursor == top)
		goto out_unlock;

	/* Scan the packet list without dropping the lock and decide which of
	 * the packets in the Tx buffer we're going to resend and what the new
	 * resend timeout will be.
	 */
David Howells's avatar
David Howells committed
175
	trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
176
	oldest = now;
177
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
178 179
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
180 181 182
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		annotation &= ~RXRPC_TX_ANNO_MASK;
		if (anno_type == RXRPC_TX_ANNO_ACK)
183
			continue;
184

185
		skb = call->rxtx_buffer[ix];
186
		rxrpc_see_skb(skb, rxrpc_skb_seen);
187

188
		if (anno_type == RXRPC_TX_ANNO_UNACK) {
189 190 191
			if (ktime_after(skb->tstamp, max_age)) {
				if (ktime_before(skb->tstamp, oldest))
					oldest = skb->tstamp;
192
				continue;
193
			}
194 195
			if (!(annotation & RXRPC_TX_ANNO_RESENT))
				unacked++;
196 197
		}

198
		/* Okay, we need to retransmit a packet. */
199
		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
200
		retrans++;
201 202
		trace_rxrpc_retransmit(call, seq, annotation | anno_type,
				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
203
	}
204

205
	resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
206
	resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
David Howells's avatar
David Howells committed
207
	WRITE_ONCE(call->resend_at, resend_at);
208

209 210 211 212 213 214 215 216
	if (unacked)
		rxrpc_congestion_timeout(call);

	/* If there was nothing that needed retransmission then it's likely
	 * that an ACK got lost somewhere.  Send a ping to find out instead of
	 * retransmitting data.
	 */
	if (!retrans) {
217
		rxrpc_reduce_call_timer(call, resend_at, now_j,
David Howells's avatar
David Howells committed
218
					rxrpc_timer_set_for_resend);
219 220
		spin_unlock_bh(&call->lock);
		ack_ts = ktime_sub(now, call->acks_latest_ts);
221
		if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
222
			goto out;
223 224
		rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
			       rxrpc_propose_ack_ping_for_lost_ack);
225 226 227
		goto out;
	}

228 229 230 231 232
	/* Now go through the Tx window and perform the retransmissions.  We
	 * have to drop the lock for each send.  If an ACK comes in whilst the
	 * lock is dropped, it may clear some of the retransmission markers for
	 * packets that it soft-ACKs.
	 */
233
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
234 235
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
236 237
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		if (anno_type != RXRPC_TX_ANNO_RETRANS)
238
			continue;
239

240 241 242 243 244
		/* We need to reset the retransmission state, but we need to do
		 * so before we drop the lock as a new ACK/NAK may come in and
		 * confuse things
		 */
		annotation &= ~RXRPC_TX_ANNO_MASK;
245
		annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
246 247
		call->rxtx_annotations[ix] = annotation;

248
		skb = call->rxtx_buffer[ix];
249 250 251
		if (!skb)
			continue;

252
		rxrpc_get_skb(skb, rxrpc_skb_got);
253 254
		spin_unlock_bh(&call->lock);

255
		rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
256
		if (rxrpc_send_data_packet(call, skb, true) < 0) {
257
			rxrpc_free_skb(skb, rxrpc_skb_freed);
258 259
			return;
		}
260

261 262
		if (rxrpc_is_client_call(call))
			rxrpc_expose_client_call(call);
263

264
		rxrpc_free_skb(skb, rxrpc_skb_freed);
265
		spin_lock_bh(&call->lock);
266 267
		if (after(call->tx_hard_ack, seq))
			seq = call->tx_hard_ack;
268
	}
269 270 271

out_unlock:
	spin_unlock_bh(&call->lock);
272
out:
273
	_leave("");
274 275 276
}

/*
277
 * Handle retransmission and deferred ACK/abort generation.
278 279 280 281 282
 */
void rxrpc_process_call(struct work_struct *work)
{
	struct rxrpc_call *call =
		container_of(work, struct rxrpc_call, processor);
David Howells's avatar
David Howells committed
283
	unsigned long now, next, t;
284
	unsigned int iterations = 0;
285
	rxrpc_serial_t ackr_serial;
286

287 288
	rxrpc_see_call(call);

289
	//printk("\n--------------------\n");
290 291
	_enter("{%d,%s,%lx}",
	       call->debug_id, rxrpc_call_states[call->state], call->events);
292

293
recheck_state:
294 295 296 297 298
	/* Limit the number of times we do this before returning to the manager */
	iterations++;
	if (iterations > 5)
		goto requeue;

299
	if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
300
		rxrpc_send_abort_packet(call);
301
		goto recheck_state;
302 303
	}

304
	if (call->state == RXRPC_CALL_COMPLETE) {
305
		rxrpc_delete_call_timer(call);
306
		goto out_put;
307 308
	}

David Howells's avatar
David Howells committed
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
	/* Work out if any timeouts tripped */
	now = jiffies;
	t = READ_ONCE(call->expect_rx_by);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
	}

	t = READ_ONCE(call->expect_req_by);
	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
	    time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
	}

	t = READ_ONCE(call->expect_term_by);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
	}

330
	t = READ_ONCE(call->delay_ack_at);
David Howells's avatar
David Howells committed
331 332
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
333 334 335 336
		cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
		ackr_serial = xchg(&call->ackr_serial, 0);
		rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
			       rxrpc_propose_ack_ping_for_lost_ack);
David Howells's avatar
David Howells committed
337 338
	}

339 340 341 342 343 344 345
	t = READ_ONCE(call->ack_lost_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
		cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
		set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
	}

346 347 348 349
	t = READ_ONCE(call->keepalive_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
		cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
350 351
		rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
			       rxrpc_propose_ack_ping_for_keepalive);
352 353
	}

David Howells's avatar
David Howells committed
354 355 356 357
	t = READ_ONCE(call->ping_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
		cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
358 359
		rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
			       rxrpc_propose_ack_ping_for_keepalive);
David Howells's avatar
David Howells committed
360 361 362 363 364 365 366 367 368 369 370
	}

	t = READ_ONCE(call->resend_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
		cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
		set_bit(RXRPC_CALL_EV_RESEND, &call->events);
	}

	/* Process events */
	if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
371 372 373
		if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
		    (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
			trace_rxrpc_call_reset(call);
374
			rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
375
		} else {
376
			rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
377
		}
378
		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
379
		goto recheck_state;
380 381
	}

382 383
	if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
		call->acks_lost_top = call->tx_top;
384 385
		rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
			       rxrpc_propose_ack_ping_for_lost_ack);
386 387
	}

388 389
	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) &&
	    call->state != RXRPC_CALL_CLIENT_RECV_REPLY) {
390
		rxrpc_resend(call, now);
391
		goto recheck_state;
392 393
	}

David Howells's avatar
David Howells committed
394 395 396 397
	/* Make sure the timer is restarted */
	next = call->expect_rx_by;

#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
David Howells's avatar
David Howells committed
398

David Howells's avatar
David Howells committed
399 400
	set(call->expect_req_by);
	set(call->expect_term_by);
401
	set(call->delay_ack_at);
402
	set(call->ack_lost_at);
David Howells's avatar
David Howells committed
403
	set(call->resend_at);
404
	set(call->keepalive_at);
David Howells's avatar
David Howells committed
405 406 407 408 409 410 411
	set(call->ping_at);

	now = jiffies;
	if (time_after_eq(now, next))
		goto recheck_state;

	rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
412 413

	/* other events may have been raised since we started checking */
414 415
	if (call->events && call->state < RXRPC_CALL_COMPLETE)
		goto requeue;
416

417 418 419
out_put:
	rxrpc_put_call(call, rxrpc_call_put);
out:
420
	_leave("");
421 422 423 424 425
	return;

requeue:
	__rxrpc_queue_call(call);
	goto out;
426
}