drp.c 24.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Ultra Wide Band
 * Dynamic Reservation Protocol handling
 *
 * Copyright (C) 2005-2006 Intel Corporation
 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version
 * 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include <linux/kthread.h>
#include <linux/freezer.h>
23
#include <linux/slab.h>
24 25 26
#include <linux/delay.h>
#include "uwb-internal.h"

27 28 29

/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
enum uwb_drp_conflict_action {
Lucas De Marchi's avatar
Lucas De Marchi committed
30
	/* Reservation is maintained, no action needed */
31
	UWB_DRP_CONFLICT_MANTAIN = 0,
32

33 34 35 36 37 38
	/* the device shall not transmit frames in conflicting MASs in
	 * the following superframe. If the device is the reservation
	 * target, it shall also set the Reason Code in its DRP IE to
	 * Conflict in its beacon in the following superframe.
	 */
	UWB_DRP_CONFLICT_ACT1,
39

40 41 42 43
	/* the device shall not set the Reservation Status bit to ONE
	 * and shall not transmit frames in conflicting MASs. If the
	 * device is the reservation target, it shall also set the
	 * Reason Code in its DRP IE to Conflict.
44
	 */
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
	UWB_DRP_CONFLICT_ACT2,

	/* the device shall not transmit frames in conflicting MASs in
	 * the following superframe. It shall remove the conflicting
	 * MASs from the reservation or set the Reservation Status to
	 * ZERO in its beacon in the following superframe. If the
	 * device is the reservation target, it shall also set the
	 * Reason Code in its DRP IE to Conflict.
	 */
	UWB_DRP_CONFLICT_ACT3,
};


static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
				    struct uwb_rceb *reply, ssize_t reply_size)
{
	struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62
	unsigned long flags;
63 64 65 66 67 68 69 70

	if (r != NULL) {
		if (r->bResultCode != UWB_RC_RES_SUCCESS)
			dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
				uwb_rc_strerror(r->bResultCode), r->bResultCode);
	} else
		dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");

71
	spin_lock_irqsave(&rc->rsvs_lock, flags);
72 73
	if (rc->set_drp_ie_pending > 1) {
		rc->set_drp_ie_pending = 0;
74
		uwb_rsv_queue_update(rc);
75
	} else {
76
		rc->set_drp_ie_pending = 0;
77
	}
78
	spin_unlock_irqrestore(&rc->rsvs_lock, flags);
79 80
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94
/**
 * Construct and send the SET DRP IE
 *
 * @rc:         UWB Host controller
 * @returns:    >= 0 number of bytes still available in the beacon
 *              < 0 errno code on error.
 *
 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
 * device to include in its beacon at the same time. We thus have to
 * traverse all reservations and include the DRP IEs of all PENDING
 * and NEGOTIATED reservations in a SET DRP command for transmission.
 *
 * A DRP Availability IE is appended.
 *
David Vrabel's avatar
David Vrabel committed
95
 * rc->rsvs_mutex is held
96 97 98 99 100
 *
 * FIXME We currently ignore the returned value indicating the remaining space
 * in beacon. This could be used to deny reservation requests earlier if
 * determined that they would cause the beacon space to be exceeded.
 */
David Vrabel's avatar
David Vrabel committed
101
int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
102 103 104 105
{
	int result;
	struct uwb_rc_cmd_set_drp_ie *cmd;
	struct uwb_rsv *rsv;
106
	struct uwb_rsv_move *mv;
107 108 109 110 111 112
	int num_bytes = 0;
	u8 *IEDataptr;

	result = -ENOMEM;
	/* First traverse all reservations to determine memory needed. */
	list_for_each_entry(rsv, &rc->reservations, rc_node) {
113
		if (rsv->drp_ie != NULL) {
114
			num_bytes += rsv->drp_ie->hdr.length + 2;
115 116 117
			if (uwb_rsv_has_two_drp_ies(rsv) &&
				(rsv->mv.companion_drp_ie != NULL)) {
				mv = &rsv->mv;
118 119
				num_bytes +=
					mv->companion_drp_ie->hdr.length + 2;
120 121
			}
		}
122 123 124 125 126 127 128 129 130 131
	}
	num_bytes += sizeof(rc->drp_avail.ie);
	cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
	if (cmd == NULL)
		goto error;
	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
	cmd->wIELength = num_bytes;
	IEDataptr = (u8 *)&cmd->IEData[0];

132 133 134 135 136
	/* FIXME: DRV avail IE is not always needed */
	/* put DRP avail IE first */
	memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
	IEDataptr += sizeof(struct uwb_ie_drp_avail);

137 138 139 140 141 142
	/* Next traverse all reservations to place IEs in allocated memory. */
	list_for_each_entry(rsv, &rc->reservations, rc_node) {
		if (rsv->drp_ie != NULL) {
			memcpy(IEDataptr, rsv->drp_ie,
			       rsv->drp_ie->hdr.length + 2);
			IEDataptr += rsv->drp_ie->hdr.length + 2;
143

144 145 146 147 148
			if (uwb_rsv_has_two_drp_ies(rsv) &&
				(rsv->mv.companion_drp_ie != NULL)) {
				mv = &rsv->mv;
				memcpy(IEDataptr, mv->companion_drp_ie,
				       mv->companion_drp_ie->hdr.length + 2);
149 150
				IEDataptr +=
					mv->companion_drp_ie->hdr.length + 2;
151
			}
152 153 154
		}
	}

155 156 157 158 159
	result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
				&cmd->rccb, sizeof(*cmd) + num_bytes,
				UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
				uwb_rc_set_drp_cmd_done, NULL);

160 161
	rc->set_drp_ie_pending = 1;

162 163 164 165 166
	kfree(cmd);
error:
	return result;
}

167 168 169 170 171 172 173
/*
 * Evaluate the action to perform using conflict resolution rules
 *
 * Return a uwb_drp_conflict_action.
 */
static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
				    struct uwb_rsv *rsv, int our_status)
174
{
175 176 177 178 179 180 181
	int our_tie_breaker = rsv->tiebreaker;
	int our_type        = rsv->type;
	int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;

	int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
	int ext_status      = uwb_ie_drp_status(ext_drp_ie);
	int ext_type        = uwb_ie_drp_type(ext_drp_ie);
182 183


184 185 186 187 188 189 190 191 192
	/* [ECMA-368 2nd Edition] 17.4.6 */
	if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
		return UWB_DRP_CONFLICT_MANTAIN;
	}

	/* [ECMA-368 2nd Edition] 17.4.6-1 */
	if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
		return UWB_DRP_CONFLICT_MANTAIN;
	}
193

194 195 196 197 198 199 200 201 202 203
	/* [ECMA-368 2nd Edition] 17.4.6-2 */
	if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
		/* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
		return UWB_DRP_CONFLICT_ACT1;
	}

	/* [ECMA-368 2nd Edition] 17.4.6-3 */
	if (our_status == 0 && ext_status == 1) {
		return UWB_DRP_CONFLICT_ACT2;
	}
204

205 206 207 208
	/* [ECMA-368 2nd Edition] 17.4.6-4 */
	if (our_status == 1 && ext_status == 0) {
		return UWB_DRP_CONFLICT_MANTAIN;
	}
209

210 211 212 213 214 215 216 217 218 219 220
	/* [ECMA-368 2nd Edition] 17.4.6-5a */
	if (our_tie_breaker == ext_tie_breaker &&
	    our_beacon_slot <  ext_beacon_slot) {
		return UWB_DRP_CONFLICT_MANTAIN;
	}

	/* [ECMA-368 2nd Edition] 17.4.6-5b */
	if (our_tie_breaker != ext_tie_breaker &&
	    our_beacon_slot >  ext_beacon_slot) {
		return UWB_DRP_CONFLICT_MANTAIN;
	}
221

222 223 224 225 226 227 228 229 230 231 232
	if (our_status == 0) {
		if (our_tie_breaker == ext_tie_breaker) {
			/* [ECMA-368 2nd Edition] 17.4.6-6a */
			if (our_beacon_slot > ext_beacon_slot) {
				return UWB_DRP_CONFLICT_ACT2;
			}
		} else  {
			/* [ECMA-368 2nd Edition] 17.4.6-6b */
			if (our_beacon_slot < ext_beacon_slot) {
				return UWB_DRP_CONFLICT_ACT2;
			}
233
		}
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	} else {
		if (our_tie_breaker == ext_tie_breaker) {
			/* [ECMA-368 2nd Edition] 17.4.6-7a */
			if (our_beacon_slot > ext_beacon_slot) {
				return UWB_DRP_CONFLICT_ACT3;
			}
		} else {
			/* [ECMA-368 2nd Edition] 17.4.6-7b */
			if (our_beacon_slot < ext_beacon_slot) {
				return UWB_DRP_CONFLICT_ACT3;
			}
		}
	}
	return UWB_DRP_CONFLICT_MANTAIN;
}

250 251 252
static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
				   int ext_beacon_slot,
				   struct uwb_rsv *rsv,
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
				   struct uwb_mas_bm *conflicting_mas)
{
	struct uwb_rc *rc = rsv->rc;
	struct uwb_rsv_move *mv = &rsv->mv;
	struct uwb_drp_backoff_win *bow = &rc->bow;
	int action;

	action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));

	if (uwb_rsv_is_owner(rsv)) {
		switch(action) {
		case UWB_DRP_CONFLICT_ACT2:
			/* try move */
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
			if (bow->can_reserve_extra_mases == false)
				uwb_rsv_backoff_win_increment(rc);
269

270 271 272 273 274 275 276 277 278 279 280 281 282 283
			break;
		case UWB_DRP_CONFLICT_ACT3:
			uwb_rsv_backoff_win_increment(rc);
			/* drop some mases with reason modified */
			/* put in the companion the mases to be dropped */
			bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
		default:
			break;
		}
	} else {
		switch(action) {
		case UWB_DRP_CONFLICT_ACT2:
		case UWB_DRP_CONFLICT_ACT3:
284
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
285 286 287 288 289
		default:
			break;
		}

	}
290

291 292 293 294 295 296 297 298 299 300
}

static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
				      struct uwb_rsv *rsv, bool companion_only,
				      struct uwb_mas_bm *conflicting_mas)
{
	struct uwb_rc *rc = rsv->rc;
	struct uwb_drp_backoff_win *bow = &rc->bow;
	struct uwb_rsv_move *mv = &rsv->mv;
	int action;
301

302 303 304 305 306 307 308
	if (companion_only) {
		/* status of companion is 0 at this point */
		action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
		if (uwb_rsv_is_owner(rsv)) {
			switch(action) {
			case UWB_DRP_CONFLICT_ACT2:
			case UWB_DRP_CONFLICT_ACT3:
309 310
				uwb_rsv_set_state(rsv,
						UWB_RSV_STATE_O_ESTABLISHED);
311 312 313
				rsv->needs_release_companion_mas = false;
				if (bow->can_reserve_extra_mases == false)
					uwb_rsv_backoff_win_increment(rc);
314 315
				uwb_drp_avail_release(rsv->rc,
						&rsv->mv.companion_mas);
316
			}
317
		} else { /* rsv is target */
318 319 320
			switch(action) {
			case UWB_DRP_CONFLICT_ACT2:
			case UWB_DRP_CONFLICT_ACT3:
321 322
				uwb_rsv_set_state(rsv,
					UWB_RSV_STATE_T_EXPANDING_CONFLICT);
323 324 325
                                /* send_drp_avail_ie = true; */
			}
		}
326
	} else { /* also base part of the reservation is conflicting */
327 328 329 330 331 332 333 334
		if (uwb_rsv_is_owner(rsv)) {
			uwb_rsv_backoff_win_increment(rc);
			/* remove companion part */
			uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);

			/* drop some mases with reason modified */

			/* put in the companion the mases to be dropped */
335 336
			bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
					conflicting_mas->bm, UWB_NUM_MAS);
337 338 339 340 341 342 343 344 345
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
		} else { /* it is a target rsv */
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
                        /* send_drp_avail_ie = true; */
		}
	}
}

static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
346
					struct uwb_rc_evt_drp *drp_evt,
347 348 349 350 351 352 353 354
					struct uwb_ie_drp *drp_ie,
					struct uwb_mas_bm *conflicting_mas)
{
	struct uwb_rsv_move *mv;

	/* check if the conflicting reservation has two drp_ies */
	if (uwb_rsv_has_two_drp_ies(rsv)) {
		mv = &rsv->mv;
355 356 357 358 359
		if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
								UWB_NUM_MAS)) {
			handle_conflict_expanding(drp_ie,
						drp_evt->beacon_slot_number,
						rsv, false, conflicting_mas);
360
		} else {
361 362 363 364 365
			if (bitmap_intersects(mv->companion_mas.bm,
					conflicting_mas->bm, UWB_NUM_MAS)) {
				handle_conflict_expanding(
					drp_ie, drp_evt->beacon_slot_number,
					rsv, true, conflicting_mas);
366 367
			}
		}
368 369 370 371
	} else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
							UWB_NUM_MAS)) {
		handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
					rsv, conflicting_mas);
372 373 374
	}
}

375
static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
376
					    struct uwb_rc_evt_drp *drp_evt,
377 378 379 380
					    struct uwb_ie_drp *drp_ie,
					    struct uwb_mas_bm *conflicting_mas)
{
	struct uwb_rsv *rsv;
381

382
	list_for_each_entry(rsv, &rc->reservations, rc_node) {
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
		uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
							conflicting_mas);
	}
}

static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
	struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
	struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
{
	struct uwb_rsv_move *mv = &rsv->mv;
	int status;

	status = uwb_ie_drp_status(drp_ie);

	if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
		uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
		return;
	}

	if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
		/* drp_ie is companion */
		if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
			/* stroke companion */
			uwb_rsv_set_state(rsv,
				UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
		}
	} else {
		if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
			if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
				/* FIXME: there is a conflict, find
				 * the conflicting reservations and
				 * take a sensible action. Consider
				 * that in drp_ie there is the
				 * "neighbour" */
				uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
						drp_ie, mas);
			} else {
				/* accept the extra reservation */
				bitmap_copy(mv->companion_mas.bm, mas->bm,
								UWB_NUM_MAS);
				uwb_rsv_set_state(rsv,
					UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
			}
		} else {
			if (status) {
				uwb_rsv_set_state(rsv,
						UWB_RSV_STATE_T_ACCEPTED);
			}
		}

433 434
	}
}
435

436 437 438 439 440
/*
 * Based on the DRP IE, transition a target reservation to a new
 * state.
 */
static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
441
		   struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
442 443
{
	struct device *dev = &rc->uwb_dev.dev;
444
	struct uwb_rsv_move *mv = &rsv->mv;
445 446
	int status;
	enum uwb_drp_reason reason_code;
447
	struct uwb_mas_bm mas;
448

449 450
	status = uwb_ie_drp_status(drp_ie);
	reason_code = uwb_ie_drp_reason_code(drp_ie);
451
	uwb_drp_ie_to_bm(&mas, drp_ie);
452

453 454
	switch (reason_code) {
	case UWB_DRP_REASON_ACCEPTED:
455
		uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
456 457 458 459 460 461
		break;

	case UWB_DRP_REASON_MODIFIED:
		/* check to see if we have already modified the reservation */
		if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
462 463
			break;
		}
464 465 466 467

		/* find if the owner wants to expand or reduce */
		if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
			/* owner is reducing */
468 469
			bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
				UWB_NUM_MAS);
470 471 472 473 474 475 476 477 478
			uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
		}

		bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
		uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
		break;
	default:
		dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
			 reason_code, status);
479 480 481
	}
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
						struct uwb_mas_bm *mas)
{
	struct uwb_rsv_move *mv = &rsv->mv;

	switch (rsv->state) {
	case UWB_RSV_STATE_O_PENDING:
	case UWB_RSV_STATE_O_INITIATED:
	case UWB_RSV_STATE_O_ESTABLISHED:
		uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
		break;
	case UWB_RSV_STATE_O_MODIFIED:
		if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
		else
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
		break;

	case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
		if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
		else
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
		break;
	case UWB_RSV_STATE_O_MOVE_EXPANDING:
		if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
			/* Companion reservation accepted */
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
		} else {
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
		}
		break;
	case UWB_RSV_STATE_O_MOVE_COMBINING:
		if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
		else
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
		break;
	default:
		break;
	}
}
524 525 526 527 528
/*
 * Based on the DRP IE, transition an owner reservation to a new
 * state.
 */
static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
529 530
				  struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
				  struct uwb_rc_evt_drp *drp_evt)
531 532 533 534
{
	struct device *dev = &rc->uwb_dev.dev;
	int status;
	enum uwb_drp_reason reason_code;
535
	struct uwb_mas_bm mas;
536 537 538

	status = uwb_ie_drp_status(drp_ie);
	reason_code = uwb_ie_drp_reason_code(drp_ie);
539
	uwb_drp_ie_to_bm(&mas, drp_ie);
540 541 542 543

	if (status) {
		switch (reason_code) {
		case UWB_DRP_REASON_ACCEPTED:
544
			uwb_drp_process_owner_accepted(rsv, &mas);
545 546 547 548 549 550 551 552 553 554 555 556 557 558
			break;
		default:
			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
				 reason_code, status);
		}
	} else {
		switch (reason_code) {
		case UWB_DRP_REASON_PENDING:
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
			break;
		case UWB_DRP_REASON_DENIED:
			uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
			break;
		case UWB_DRP_REASON_CONFLICT:
559 560 561 562
			/* resolve the conflict */
			bitmap_complement(mas.bm, src->last_availability_bm,
					  UWB_NUM_MAS);
			uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
563 564 565 566 567 568 569 570
			break;
		default:
			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
				 reason_code, status);
		}
	}
}

571 572 573 574 575 576 577 578 579 580 581 582 583
static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
{
	unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
	mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
}

static void uwb_cnflt_update_work(struct work_struct *work)
{
	struct uwb_cnflt_alien *cnflt = container_of(work,
						     struct uwb_cnflt_alien,
						     cnflt_update_work);
	struct uwb_cnflt_alien *c;
	struct uwb_rc *rc = cnflt->rc;
584

585
	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
586

587 588 589 590 591 592 593 594
	mutex_lock(&rc->rsvs_mutex);

	list_del(&cnflt->rc_node);

	/* update rc global conflicting alien bitmap */
	bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);

	list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
595 596
		bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
						c->mas.bm, UWB_NUM_MAS);
597
	}
598 599 600

	queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
					usecs_to_jiffies(delay_us));
601 602 603 604 605 606 607 608 609 610 611 612

	kfree(cnflt);
	mutex_unlock(&rc->rsvs_mutex);
}

static void uwb_cnflt_timer(unsigned long arg)
{
	struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;

	queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
}

613
/*
614 615
 * We have received an DRP_IE of type Alien BP and we need to make
 * sure we do not transmit in conflicting MASs.
616
 */
617 618 619 620 621 622
static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
{
	struct device *dev = &rc->uwb_dev.dev;
	struct uwb_mas_bm mas;
	struct uwb_cnflt_alien *cnflt;
	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
623

624
	uwb_drp_ie_to_bm(&mas, drp_ie);
625

626 627 628 629 630 631 632 633 634 635 636 637 638
	list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
		if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
			/* Existing alien BP reservation conflicting
			 * bitmap, just reset the timer */
			uwb_cnflt_alien_stroke_timer(cnflt);
			return;
		}
	}

	/* New alien BP reservation conflicting bitmap */

	/* alloc and initialize new uwb_cnflt_alien */
	cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
639
	if (!cnflt) {
640
		dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
641 642 643
		return;
	}

644 645 646 647 648 649 650
	INIT_LIST_HEAD(&cnflt->rc_node);
	init_timer(&cnflt->timer);
	cnflt->timer.function = uwb_cnflt_timer;
	cnflt->timer.data     = (unsigned long)cnflt;

	cnflt->rc = rc;
	INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
651

652 653 654 655 656 657 658 659
	bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);

	list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);

	/* update rc global conflicting alien bitmap */
	bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);

	queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
660

661 662 663 664 665
	/* start the timer */
	uwb_cnflt_alien_stroke_timer(cnflt);
}

static void uwb_drp_process_not_involved(struct uwb_rc *rc,
666
					 struct uwb_rc_evt_drp *drp_evt,
667 668 669
					 struct uwb_ie_drp *drp_ie)
{
	struct uwb_mas_bm mas;
670

671 672 673 674 675 676 677
	uwb_drp_ie_to_bm(&mas, drp_ie);
	uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
}

static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
				     struct uwb_rc_evt_drp *drp_evt,
				     struct uwb_ie_drp *drp_ie)
678 679 680 681 682 683 684 685 686 687 688 689
{
	struct uwb_rsv *rsv;

	rsv = uwb_rsv_find(rc, src, drp_ie);
	if (!rsv) {
		/*
		 * No reservation? It's either for a recently
		 * terminated reservation; or the DRP IE couldn't be
		 * processed (e.g., an invalid IE or out of memory).
		 */
		return;
	}
690

691 692 693 694 695 696 697 698
	/*
	 * Do nothing with DRP IEs for reservations that have been
	 * terminated.
	 */
	if (rsv->state == UWB_RSV_STATE_NONE) {
		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
		return;
	}
699

700
	if (uwb_ie_drp_owner(drp_ie))
701 702 703
		uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
	else
		uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
704

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
}


static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
{
	return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
}

/*
 * Process a received DRP IE.
 */
static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
			    struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
{
	if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
		uwb_drp_handle_alien_drp(rc, drp_ie);
	else if (uwb_drp_involves_us(rc, drp_ie))
		uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
723
	else
724
		uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
725 726
}

727 728 729 730 731 732 733 734 735
/*
 * Process a received DRP Availability IE
 */
static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
					 struct uwb_ie_drp_avail *drp_availability_ie)
{
	bitmap_copy(src->last_availability_bm,
		    drp_availability_ie->bmp, UWB_NUM_MAS);
}
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756

/*
 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
 * from a device.
 */
static
void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
			 size_t ielen, struct uwb_dev *src_dev)
{
	struct device *dev = &rc->uwb_dev.dev;
	struct uwb_ie_hdr *ie_hdr;
	void *ptr;

	ptr = drp_evt->ie_data;
	for (;;) {
		ie_hdr = uwb_ie_next(&ptr, &ielen);
		if (!ie_hdr)
			break;

		switch (ie_hdr->element_id) {
		case UWB_IE_DRP_AVAILABILITY:
757
			uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
758 759
			break;
		case UWB_IE_DRP:
760
			uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
			break;
		default:
			dev_warn(dev, "unexpected IE in DRP notification\n");
			break;
		}
	}

	if (ielen > 0)
		dev_warn(dev, "%d octets remaining in DRP notification\n",
			 (int)ielen);
}

/**
 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
 * @evt: the DRP_IE event from the radio controller
 *
 * This processes DRP notifications from the radio controller, either
 * initiating a new reservation or transitioning an existing
 * reservation into a different state.
 *
 * DRP notifications can occur for three different reasons:
 *
 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
Lucas De Marchi's avatar
Lucas De Marchi committed
784
 *   the target or source have been received.
785 786 787 788
 *
 *   These DRP IEs could be new or for an existing reservation.
 *
 *   If the DRP IE for an existing reservation ceases to be to
Lucas De Marchi's avatar
Lucas De Marchi committed
789
 *   received for at least mMaxLostBeacons, the reservation should be
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
 *   considered to be terminated.  Note that the TERMINATE reason (see
 *   below) may not always be signalled (e.g., the remote device has
 *   two or more reservations established with the RC).
 *
 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
 *   group conflict with the RC's reservations.
 *
 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
 *   from a device (i.e., it's terminated all reservations).
 *
 * Only the software state of the reservations is changed; the setting
 * of the radio controller's DRP IEs is done after all the events in
 * an event buffer are processed.  This saves waiting multiple times
 * for the SET_DRP_IE command to complete.
 */
int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
{
	struct device *dev = &evt->rc->uwb_dev.dev;
	struct uwb_rc *rc = evt->rc;
	struct uwb_rc_evt_drp *drp_evt;
	size_t ielength, bytes_left;
	struct uwb_dev_addr src_addr;
	struct uwb_dev *src_dev;

	/* Is there enough data to decode the event (and any IEs in
	   its payload)? */
	if (evt->notif.size < sizeof(*drp_evt)) {
		dev_err(dev, "DRP event: Not enough data to decode event "
			"[%zu bytes left, %zu needed]\n",
			evt->notif.size, sizeof(*drp_evt));
		return 0;
	}
	bytes_left = evt->notif.size - sizeof(*drp_evt);
	drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
	ielength = le16_to_cpu(drp_evt->ie_length);
	if (bytes_left != ielength) {
		dev_err(dev, "DRP event: Not enough data in payload [%zu"
			"bytes left, %zu declared in the event]\n",
			bytes_left, ielength);
		return 0;
	}

	memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
	src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
	if (!src_dev) {
		/*
		 * A DRP notification from an unrecognized device.
		 *
		 * This is probably from a WUSB device that doesn't
		 * have an EUI-48 and therefore doesn't show up in the
		 * UWB device database.  It's safe to simply ignore
		 * these.
		 */
		return 0;
	}

	mutex_lock(&rc->rsvs_mutex);

848 849
	/* We do not distinguish from the reason */
	uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
850 851 852 853 854 855

	mutex_unlock(&rc->rsvs_mutex);

	uwb_dev_put(src_dev);
	return 0;
}