be_cmds.c 105 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla's avatar
Sathya Perla committed
3 4 5 6 7 8 9 10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13 14 15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16 17
 */

18
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
19
#include "be.h"
20
#include "be_cmds.h"
Sathya Perla's avatar
Sathya Perla committed
21

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
static char *be_port_misconfig_evt_desc[] = {
	"A valid SFP module detected",
	"Optics faulted/ incorrectly installed/ not installed.",
	"Optics of two types installed.",
	"Incompatible optics.",
	"Unknown port SFP status"
};

static char *be_port_misconfig_remedy_desc[] = {
	"",
	"Reseat optics. If issue not resolved, replace",
	"Remove one optic or install matching pair of optics",
	"Replace with compatible optics for card to function",
	""
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static struct be_cmd_priv_map cmd_priv_map[] = {
	{
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_SET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_ETH_GET_PPORT_STATS,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_PHY_DETAILS,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	}
};

71
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
72 73 74 75 76 77 78 79 80 81 82 83 84 85
{
	int i;
	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
	u32 cmd_privileges = adapter->cmd_privileges;

	for (i = 0; i < num_entries; i++)
		if (opcode == cmd_priv_map[i].opcode &&
		    subsystem == cmd_priv_map[i].subsystem)
			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
				return false;

	return true;
}

86 87 88 89
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}
90

91
static void be_mcc_notify(struct be_adapter *adapter)
92
{
93
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
94 95
	u32 val = 0;

96
	if (be_check_error(adapter, BE_ERROR_ANY))
97 98
		return;

99 100
	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
101 102

	wmb();
103
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
104 105 106 107 108
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
109
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
110
{
111 112
	u32 flags;

113
	if (compl->flags != 0) {
114 115 116 117 118
		flags = le32_to_cpu(compl->flags);
		if (flags & CQE_FLAGS_VALID_MASK) {
			compl->flags = flags;
			return true;
		}
119
	}
120
	return false;
121 122 123
}

/* Need to reset the entire word that houses the valid bit */
124
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
125 126 127 128
{
	compl->flags = 0;
}

129 130 131 132 133 134 135 136 137
static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
{
	unsigned long addr;

	addr = tag1;
	addr = ((addr << 16) << 16) | tag0;
	return (void *)addr;
}

138 139 140 141 142
static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
{
	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
143
	    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
144 145 146 147 148 149 150 151
	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
		return true;
	else
		return false;
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
/* Place holder for all the async MCC cmds wherein the caller is not in a busy
 * loop (has not issued be_mcc_notify_wait())
 */
static void be_async_cmd_process(struct be_adapter *adapter,
				 struct be_mcc_compl *compl,
				 struct be_cmd_resp_hdr *resp_hdr)
{
	enum mcc_base_status base_status = base_status(compl->status);
	u8 opcode = 0, subsystem = 0;

	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
		complete(&adapter->et_cmd_compl);
		return;
	}

	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
	    subsystem == CMD_SUBSYSTEM_COMMON) {
		adapter->flash_status = compl->status;
		complete(&adapter->et_cmd_compl);
		return;
	}

	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
	    subsystem == CMD_SUBSYSTEM_ETH &&
	    base_status == MCC_STATUS_SUCCESS) {
		be_parse_stats(adapter);
		adapter->stats_cmd_sent = false;
		return;
	}

	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
	    subsystem == CMD_SUBSYSTEM_COMMON) {
		if (base_status == MCC_STATUS_SUCCESS) {
			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
							(void *)resp_hdr;
195
			adapter->hwmon_info.be_on_die_temp =
196 197 198
						resp->on_die_temperature;
		} else {
			adapter->be_get_temp_freq = 0;
199 200
			adapter->hwmon_info.be_on_die_temp =
						BE_INVALID_DIE_TEMP;
201 202 203 204 205
		}
		return;
	}
}

206
static int be_mcc_compl_process(struct be_adapter *adapter,
207
				struct be_mcc_compl *compl)
208
{
209 210
	enum mcc_base_status base_status;
	enum mcc_addl_status addl_status;
211 212
	struct be_cmd_resp_hdr *resp_hdr;
	u8 opcode = 0, subsystem = 0;
213 214 215 216 217

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

218 219
	base_status = base_status(compl->status);
	addl_status = addl_status(compl->status);
220

221 222 223 224 225 226
	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

227
	be_async_cmd_process(adapter, compl, resp_hdr);
228

229 230
	if (base_status != MCC_STATUS_SUCCESS &&
	    !be_skip_err_log(opcode, base_status, addl_status)) {
231
		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
232
			dev_warn(&adapter->pdev->dev,
233
				 "VF is not privileged to issue opcode %d-%d\n",
234
				 opcode, subsystem);
235
		} else {
236 237
			dev_err(&adapter->pdev->dev,
				"opcode %d-%d failed:status %d-%d\n",
238
				opcode, subsystem, base_status, addl_status);
239
		}
240
	}
241
	return compl->status;
242 243
}

244
/* Link state evt is a string of bytes; no need for endian swapping */
245
static void be_async_link_state_process(struct be_adapter *adapter,
246
					struct be_mcc_compl *compl)
247
{
248 249 250
	struct be_async_event_link_state *evt =
			(struct be_async_event_link_state *)compl;

251
	/* When link status changes, link speed must be re-queried from FW */
252
	adapter->phy.link_speed = -1;
253

254 255 256 257 258 259
	/* On BEx the FW does not send a separate link status
	 * notification for physical and logical link.
	 * On other chips just process the logical link
	 * status notification
	 */
	if (!BEx_chip(adapter) &&
260 261 262
	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
		return;

263 264 265 266
	/* For the initial link status do not rely on the ASYNC event as
	 * it may not be received in some cases.
	 */
	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
267 268
		be_link_status_update(adapter,
				      evt->port_link_status & LINK_STATUS_MASK);
269 270
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
						  struct be_mcc_compl *compl)
{
	struct be_async_event_misconfig_port *evt =
			(struct be_async_event_misconfig_port *)compl;
	u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
	struct device *dev = &adapter->pdev->dev;
	u8 port_misconfig_evt;

	port_misconfig_evt =
		((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);

	/* Log an error message that would allow a user to determine
	 * whether the SFPs have an issue
	 */
	dev_info(dev, "Port %c: %s %s", adapter->port_name,
		 be_port_misconfig_evt_desc[port_misconfig_evt],
		 be_port_misconfig_remedy_desc[port_misconfig_evt]);

	if (port_misconfig_evt == INCOMPATIBLE_SFP)
		adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
}

294 295
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
296
					       struct be_mcc_compl *compl)
297
{
298 299 300
	struct be_async_event_grp5_cos_priority *evt =
			(struct be_async_event_grp5_cos_priority *)compl;

301 302
	if (evt->valid) {
		adapter->vlan_prio_bmap = evt->available_priority_bmap;
303
		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
304 305 306 307 308
		adapter->recommended_prio =
			evt->reco_default_priority << VLAN_PRIO_SHIFT;
	}
}

309
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
310
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
311
					    struct be_mcc_compl *compl)
312
{
313 314 315
	struct be_async_event_grp5_qos_link_speed *evt =
			(struct be_async_event_grp5_qos_link_speed *)compl;

316 317 318
	if (adapter->phy.link_speed >= 0 &&
	    evt->physical_port == adapter->port_num)
		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
319 320
}

321 322
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
323
					     struct be_mcc_compl *compl)
324
{
325 326 327
	struct be_async_event_grp5_pvid_state *evt =
			(struct be_async_event_grp5_pvid_state *)compl;

328
	if (evt->enabled) {
329
		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
330 331
		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
	} else {
332
		adapter->pvid = 0;
333
	}
334 335
}

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
#define MGMT_ENABLE_MASK	0x4
static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
					     struct be_mcc_compl *compl)
{
	struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
	u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);

	if (evt_dw1 & MGMT_ENABLE_MASK) {
		adapter->flags |= BE_FLAGS_OS2BMC;
		adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
	} else {
		adapter->flags &= ~BE_FLAGS_OS2BMC;
	}
}

351
static void be_async_grp5_evt_process(struct be_adapter *adapter,
352
				      struct be_mcc_compl *compl)
353
{
354 355
	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
				ASYNC_EVENT_TYPE_MASK;
356 357 358

	switch (event_type) {
	case ASYNC_EVENT_COS_PRIORITY:
359 360
		be_async_grp5_cos_priority_process(adapter, compl);
		break;
361
	case ASYNC_EVENT_QOS_SPEED:
362 363
		be_async_grp5_qos_speed_process(adapter, compl);
		break;
364
	case ASYNC_EVENT_PVID_STATE:
365 366
		be_async_grp5_pvid_state_process(adapter, compl);
		break;
367 368 369 370
	/* Async event to disable/enable os2bmc and/or mac-learning */
	case ASYNC_EVENT_FW_CONTROL:
		be_async_grp5_fw_control_process(adapter, compl);
		break;
371 372 373 374 375
	default:
		break;
	}
}

376
static void be_async_dbg_evt_process(struct be_adapter *adapter,
377
				     struct be_mcc_compl *cmp)
378 379
{
	u8 event_type = 0;
380
	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
381

382 383
	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
			ASYNC_EVENT_TYPE_MASK;
384 385 386 387 388 389 390 391

	switch (event_type) {
	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
		if (evt->valid)
			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
	break;
	default:
392 393
		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
			 event_type);
394 395 396 397
	break;
	}
}

398 399 400 401 402 403 404 405 406 407
static void be_async_sliport_evt_process(struct be_adapter *adapter,
					 struct be_mcc_compl *cmp)
{
	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
			ASYNC_EVENT_TYPE_MASK;

	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
		be_async_port_misconfig_event_process(adapter, cmp);
}

408
static inline bool is_link_state_evt(u32 flags)
409
{
410 411
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
			ASYNC_EVENT_CODE_LINK_STATE;
412
}
413

414
static inline bool is_grp5_evt(u32 flags)
415
{
416 417
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
			ASYNC_EVENT_CODE_GRP_5;
418 419
}

420
static inline bool is_dbg_evt(u32 flags)
421
{
422 423 424 425
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
			ASYNC_EVENT_CODE_QNQ;
}

426 427 428 429 430 431
static inline bool is_sliport_evt(u32 flags)
{
	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
		ASYNC_EVENT_CODE_SLIPORT;
}

432 433 434 435 436 437 438 439 440
static void be_mcc_event_process(struct be_adapter *adapter,
				 struct be_mcc_compl *compl)
{
	if (is_link_state_evt(compl->flags))
		be_async_link_state_process(adapter, compl);
	else if (is_grp5_evt(compl->flags))
		be_async_grp5_evt_process(adapter, compl);
	else if (is_dbg_evt(compl->flags))
		be_async_dbg_evt_process(adapter, compl);
441 442
	else if (is_sliport_evt(compl->flags))
		be_async_sliport_evt_process(adapter, compl);
443 444
}

445
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
446
{
447
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
448
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
449 450 451 452 453 454 455 456

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

457 458 459 460 461 462 463 464 465 466 467 468
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
469 470
	spin_lock_bh(&adapter->mcc_cq_lock);

471
	adapter->mcc_obj.rearm_cq = false;
472 473 474
	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);

	spin_unlock_bh(&adapter->mcc_cq_lock);
475 476
}

Sathya Perla's avatar
Sathya Perla committed
477
int be_process_mcc(struct be_adapter *adapter)
478
{
479
	struct be_mcc_compl *compl;
Sathya Perla's avatar
Sathya Perla committed
480
	int num = 0, status = 0;
481
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
482

483
	spin_lock(&adapter->mcc_cq_lock);
484

485
	while ((compl = be_mcc_compl_get(adapter))) {
486
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
487
			be_mcc_event_process(adapter, compl);
488
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
489 490
			status = be_mcc_compl_process(adapter, compl);
			atomic_dec(&mcc_obj->q.used);
491 492 493 494
		}
		be_mcc_compl_use(compl);
		num++;
	}
495

Sathya Perla's avatar
Sathya Perla committed
496 497 498
	if (num)
		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);

499
	spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla's avatar
Sathya Perla committed
500
	return status;
501 502
}

503
/* Wait till no more pending mcc requests are present */
504
static int be_mcc_wait_compl(struct be_adapter *adapter)
505
{
506
#define mcc_timeout		120000 /* 12s timeout */
Sathya Perla's avatar
Sathya Perla committed
507
	int i, status = 0;
508 509
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

510
	for (i = 0; i < mcc_timeout; i++) {
511
		if (be_check_error(adapter, BE_ERROR_ANY))
512 513
			return -EIO;

514
		local_bh_disable();
Sathya Perla's avatar
Sathya Perla committed
515
		status = be_process_mcc(adapter);
516
		local_bh_enable();
517

518
		if (atomic_read(&mcc_obj->q.used) == 0)
519 520 521
			break;
		udelay(100);
	}
522
	if (i == mcc_timeout) {
523
		dev_err(&adapter->pdev->dev, "FW not responding\n");
524
		be_set_error(adapter, BE_ERROR_FW);
525
		return -EIO;
526
	}
527
	return status;
528 529 530
}

/* Notify MCC requests and wait for completion */
531
static int be_mcc_notify_wait(struct be_adapter *adapter)
532
{
533 534 535 536 537 538 539 540 541 542 543
	int status;
	struct be_mcc_wrb *wrb;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
	u16 index = mcc_obj->q.head;
	struct be_cmd_resp_hdr *resp;

	index_dec(&index, mcc_obj->q.len);
	wrb = queue_index_node(&mcc_obj->q, index);

	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);

544
	be_mcc_notify(adapter);
545 546 547 548 549

	status = be_mcc_wait_compl(adapter);
	if (status == -EIO)
		goto out;

550 551 552
	status = (resp->base_status |
		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
		   CQE_ADDL_STATUS_SHIFT));
553 554
out:
	return status;
555 556
}

557
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla's avatar
Sathya Perla committed
558
{
559
	int msecs = 0;
Sathya Perla's avatar
Sathya Perla committed
560 561 562
	u32 ready;

	do {
563
		if (be_check_error(adapter, BE_ERROR_ANY))
564 565
			return -EIO;

566
		ready = ioread32(db);
567
		if (ready == 0xffffffff)
568 569 570
			return -1;

		ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla's avatar
Sathya Perla committed
571 572 573
		if (ready)
			break;

574
		if (msecs > 4000) {
575
			dev_err(&adapter->pdev->dev, "FW not responding\n");
576
			be_set_error(adapter, BE_ERROR_FW);
577
			be_detect_error(adapter);
Sathya Perla's avatar
Sathya Perla committed
578 579 580
			return -1;
		}

581
		msleep(1);
582
		msecs++;
Sathya Perla's avatar
Sathya Perla committed
583 584 585 586 587 588 589
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
590
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla's avatar
Sathya Perla committed
591
 */
592
static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
593 594 595
{
	int status;
	u32 val = 0;
596 597
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla's avatar
Sathya Perla committed
598
	struct be_mcc_mailbox *mbox = mbox_mem->va;
599
	struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla's avatar
Sathya Perla committed
600

601 602 603 604 605
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

Sathya Perla's avatar
Sathya Perla committed
606 607 608 609 610 611
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
612
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
613 614 615 616 617 618 619 620
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

621
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
622 623 624
	if (status != 0)
		return status;

625
	/* A cq entry has been made now */
626 627 628
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
629 630 631
		if (status)
			return status;
	} else {
632
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla's avatar
Sathya Perla committed
633 634
		return -1;
	}
635
	return 0;
Sathya Perla's avatar
Sathya Perla committed
636 637
}

638
static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
639
{
640 641
	u32 sem;

642 643
	if (BEx_chip(adapter))
		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla's avatar
Sathya Perla committed
644
	else
645 646 647 648
		pci_read_config_dword(adapter->pdev,
				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);

	return sem & POST_STAGE_MASK;
Sathya Perla's avatar
Sathya Perla committed
649 650
}

651
static int lancer_wait_ready(struct be_adapter *adapter)
652 653 654
{
#define SLIPORT_READY_TIMEOUT 30
	u32 sliport_status;
655
	int i;
656 657 658 659

	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
660
			return 0;
661

662 663 664
		if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
		    !(sliport_status & SLIPORT_STATUS_RN_MASK))
			return -EIO;
665

666
		msleep(1000);
667
	}
668

669
	return sliport_status ? : -1;
670 671 672
}

int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
673
{
674 675
	u16 stage;
	int status, timeout = 0;
676
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
677

678 679
	if (lancer_chip(adapter)) {
		status = lancer_wait_ready(adapter);
680 681 682 683 684
		if (status) {
			stage = status;
			goto err;
		}
		return 0;
685 686
	}

687
	do {
688 689 690 691
		/* There's no means to poll POST state on BE2/3 VFs */
		if (BEx_chip(adapter) && be_virtfn(adapter))
			return 0;

692
		stage = be_POST_stage_get(adapter);
693
		if (stage == POST_STAGE_ARMFW_RDY)
694
			return 0;
695

696
		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
697 698 699
		if (msleep_interruptible(2000)) {
			dev_err(dev, "Waiting for POST aborted\n");
			return -EINTR;
700
		}
701
		timeout += 2;
702
	} while (timeout < 60);
Sathya Perla's avatar
Sathya Perla committed
703

704 705
err:
	dev_err(dev, "POST timeout; stage=%#x\n", stage);
706
	return -ETIMEDOUT;
Sathya Perla's avatar
Sathya Perla committed
707 708 709 710 711 712 713
}

static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

714
static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
715 716 717 718
{
	wrb->tag0 = addr & 0xFFFFFFFF;
	wrb->tag1 = upper_32_bits(addr);
}
Sathya Perla's avatar
Sathya Perla committed
719 720

/* Don't touch the hdr after it's prepared */
721 722
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
723 724 725
				   u8 subsystem, u8 opcode, int cmd_len,
				   struct be_mcc_wrb *wrb,
				   struct be_dma_mem *mem)
Sathya Perla's avatar
Sathya Perla committed
726
{
727 728
	struct be_sge *sge;

Sathya Perla's avatar
Sathya Perla committed
729 730 731
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
732
	req_hdr->version = 0;
733
	fill_wrb_tags(wrb, (ulong) req_hdr);
734 735 736 737 738 739 740 741 742 743 744
	wrb->payload_length = cmd_len;
	if (mem) {
		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
			MCC_WRB_SGE_CNT_SHIFT;
		sge = nonembedded_sgl(wrb);
		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
		sge->len = cpu_to_le32(mem->size);
	} else
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	be_dws_cpu_to_le(wrb, 8);
Sathya Perla's avatar
Sathya Perla committed
745 746 747
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
748
				      struct be_dma_mem *mem)
Sathya Perla's avatar
Sathya Perla committed
749 750 751 752 753 754 755 756 757 758 759
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

760
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
761
{
762 763 764 765 766
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
Sathya Perla's avatar
Sathya Perla committed
767 768
}

769
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
770
{
771 772 773
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

774 775 776
	if (!mccq->created)
		return NULL;

777
	if (atomic_read(&mccq->used) >= mccq->len)
778 779
		return NULL;

780 781 782 783
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
784 785 786
	return wrb;
}

787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
static bool use_mcc(struct be_adapter *adapter)
{
	return adapter->mcc_obj.q.created;
}

/* Must be used only in process context */
static int be_cmd_lock(struct be_adapter *adapter)
{
	if (use_mcc(adapter)) {
		spin_lock_bh(&adapter->mcc_lock);
		return 0;
	} else {
		return mutex_lock_interruptible(&adapter->mbox_lock);
	}
}

/* Must be used only in process context */
static void be_cmd_unlock(struct be_adapter *adapter)
{
	if (use_mcc(adapter))
		spin_unlock_bh(&adapter->mcc_lock);
	else
		return mutex_unlock(&adapter->mbox_lock);
}

static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
				      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;

	if (use_mcc(adapter)) {
		dest_wrb = wrb_from_mccq(adapter);
		if (!dest_wrb)
			return NULL;
	} else {
		dest_wrb = wrb_from_mbox(adapter);
	}

	memcpy(dest_wrb, wrb, sizeof(*wrb));
	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));

	return dest_wrb;
}

/* Must be used only in process context */
static int be_cmd_notify_wait(struct be_adapter *adapter,
			      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;
	int status;

	status = be_cmd_lock(adapter);
	if (status)
		return status;

	dest_wrb = be_cmd_copy(adapter, wrb);
	if (!dest_wrb)
		return -EBUSY;

	if (use_mcc(adapter))
		status = be_mcc_notify_wait(adapter);
	else
		status = be_mbox_notify_wait(adapter);

	if (!status)
		memcpy(wrb, dest_wrb, sizeof(*wrb));

	be_cmd_unlock(adapter);
	return status;
}

859 860 861 862 863 864 865 866
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

867 868 869
	if (lancer_chip(adapter))
		return 0;

870 871
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
872 873

	wrb = (u8 *)wrb_from_mbox(adapter);
874 875 876 877 878 879 880 881
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;
882 883 884

	status = be_mbox_notify_wait(adapter);

885
	mutex_unlock(&adapter->mbox_lock);
886 887 888 889 890 891 892 893 894 895 896
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

897 898 899
	if (lancer_chip(adapter))
		return 0;

900 901
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
902 903 904 905 906 907 908 909 910 911 912 913 914

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

915
	mutex_unlock(&adapter->mbox_lock);
916 917
	return status;
}
918

919
int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla's avatar
Sathya Perla committed
920
{
921 922
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
923 924
	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
	int status, ver = 0;
Sathya Perla's avatar
Sathya Perla committed
925

926 927
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
928 929 930

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
931

932
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
933 934
			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
935

936 937 938 939 940
	/* Support for EQ_CREATEv2 available only SH-R onwards */
	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
		ver = 2;

	req->hdr.version = ver;
Sathya Perla's avatar
Sathya Perla committed
941 942 943 944 945 946
	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
947
		      __ilog2_u32(eqo->q.len / 256));
Sathya Perla's avatar
Sathya Perla committed
948 949 950 951
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

952
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
953
	if (!status) {
954
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
955

956 957 958 959
		eqo->q.id = le16_to_cpu(resp->eq_id);
		eqo->msix_idx =
			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
		eqo->q.created = true;
Sathya Perla's avatar
Sathya Perla committed
960
	}
961

962
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
963 964 965
	return status;
}

966
/* Use MCC */
967
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
968
			  bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla's avatar
Sathya Perla committed
969
{
970 971
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
Sathya Perla's avatar
Sathya Perla committed
972 973
	int status;

974
	spin_lock_bh(&adapter->mcc_lock);
975

976 977 978 979 980
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
981
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
982

983
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
984 985
			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
			       NULL);
986
	req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla's avatar
Sathya Perla committed
987 988 989
	if (permanent) {
		req->permanent = 1;
	} else {
990
		req->if_id = cpu_to_le16((u16)if_handle);
991
		req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla's avatar
Sathya Perla committed
992 993 994
		req->permanent = 0;
	}

995
	status = be_mcc_notify_wait(adapter);
996 997
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
998

Sathya Perla's avatar
Sathya Perla committed
999
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1000
	}
Sathya Perla's avatar
Sathya Perla committed
1001

1002 1003
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1004 1005 1006
	return status;
}

1007
/* Uses synchronous MCCQ */
1008
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1009
		    u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
1010
{
1011 1012
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
Sathya Perla's avatar
Sathya Perla committed
1013 1014
	int status;

1015 1016 1017
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1018 1019 1020 1021
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1022
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1023

1024
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1025 1026
			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
1027

1028
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
1029 1030 1031
	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

1032
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1033 1034
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1035

Sathya Perla's avatar
Sathya Perla committed
1036 1037 1038
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

1039
err:
1040
	spin_unlock_bh(&adapter->mcc_lock);
1041 1042 1043 1044

	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
		status = -EPERM;

Sathya Perla's avatar
Sathya Perla committed
1045 1046 1047
	return status;
}

1048
/* Uses synchronous MCCQ */
1049
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla's avatar
Sathya Perla committed
1050
{
1051 1052
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
Sathya Perla's avatar
Sathya Perla committed
1053 1054
	int status;

1055 1056 1057
	if (pmac_id == -1)
		return 0;

1058 1059 1060
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1061 1062 1063 1064
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1065
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1066

1067
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1068 1069
			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
			       wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1070

1071
	req->hdr.domain = dom;
Sathya Perla's avatar
Sathya Perla committed
1072 1073 1074
	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

1075 1076
	status = be_mcc_notify_wait(adapter);

1077
err:
1078
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1079 1080 1081
	return status;
}

1082
/* Uses Mbox */
Sathya Perla's avatar
Sathya Perla committed
1083
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1084
		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla's avatar
Sathya Perla committed
1085
{
1086 1087
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
Sathya Perla's avatar
Sathya Perla committed
1088
	struct be_dma_mem *q_mem = &cq->dma_mem;
1089
	void *ctxt;
Sathya Perla's avatar
Sathya Perla committed
1090 1091
	int status;

1092 1093
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1094 1095 1096 1097

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
Sathya Perla's avatar
Sathya Perla committed
1098

1099
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1100 1101
			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
1102 1103

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1104 1105

	if (BEx_chip(adapter)) {
1106
		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1107
			      coalesce_wm);
1108
		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1109
			      ctxt, no_delay);
1110
		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1111
			      __ilog2_u32(cq->len / 256));
1112 1113 1114
		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1115 1116 1117
	} else {
		req->hdr.version = 2;
		req->page_size = 1; /* 1 for 4K */
1118 1119 1120 1121 1122 1123 1124

		/* coalesce-wm field in this cmd is not relevant to Lancer.
		 * Lancer uses COMMON_MODIFY_CQ to set this field
		 */
		if (!lancer_chip(adapter))
			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
				      ctxt, coalesce_wm);
1125
		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1126
			      no_delay);
1127
		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1128
			      __ilog2_u32(cq->len / 256));
1129
		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1130 1131
		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1132
	}
Sathya Perla's avatar
Sathya Perla committed
1133 1134 1135 1136 1137

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

1138
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1139
	if (!status) {
1140
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1141

Sathya Perla's avatar
Sathya Perla committed
1142 1143 1144
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
1145

1146
	mutex_unlock(&adapter->mbox_lock);
1147 1148 1149 1150 1151 1152 1153

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1154

1155 1156 1157 1158 1159
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

1160
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1161 1162
				  struct be_queue_info *mccq,
				  struct be_queue_info *cq)
1163
{
1164
	struct be_mcc_wrb *wrb;
1165
	struct be_cmd_req_mcc_ext_create *req;
1166
	struct be_dma_mem *q_mem = &mccq->dma_mem;
1167
	void *ctxt;
1168 1169
	int status;

1170 1171
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1172 1173 1174 1175

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
1176

1177
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1178 1179
			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
			       NULL);
1180

1181
	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1182
	if (BEx_chip(adapter)) {
1183 1184
		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1185
			      be_encoded_q_len(mccq->len));
1186
		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
	} else {
		req->hdr.version = 1;
		req->cq_id = cpu_to_le16(cq->id);

		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
			      be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
			      ctxt, cq->id);
		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
			      ctxt, 1);
1198
	}
1199

1200 1201 1202 1203 1204 1205 1206 1207 1208
	/* Subscribe to Link State, Sliport Event and Group 5 Events
	 * (bits 1, 5 and 17 set)
	 */
	req->async_event_bitmap[0] =
			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
				    BIT(ASYNC_EVENT_CODE_GRP_5) |
				    BIT(ASYNC_EVENT_CODE_QNQ) |
				    BIT(ASYNC_EVENT_CODE_SLIPORT));

1209 1210 1211 1212
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

1213
	status = be_mbox_notify_wait(adapter);
1214 1215
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1216

1217 1218 1219
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
1220
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
1221 1222 1223 1224

	return status;
}

1225
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1226 1227
				  struct be_queue_info *mccq,
				  struct be_queue_info *cq)
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
	struct be_dma_mem *q_mem = &mccq->dma_mem;
	void *ctxt;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;

1242
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1243 1244
			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
			       NULL);
1245 1246 1247 1248 1249

	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1250
		      be_encoded_q_len(mccq->len));
1251 1252 1253 1254 1255 1256 1257 1258 1259
	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1260

1261 1262 1263 1264 1265 1266 1267 1268 1269
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}

	mutex_unlock(&adapter->mbox_lock);
	return status;
}

int be_cmd_mccq_create(struct be_adapter *adapter,
1270
		       struct be_queue_info *mccq, struct be_queue_info *cq)
1271 1272 1273 1274
{
	int status;

	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1275
	if (status && BEx_chip(adapter)) {
1276 1277 1278 1279 1280 1281 1282 1283
		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
			"or newer to avoid conflicting priorities between NIC "
			"and FCoE traffic");
		status = be_cmd_mccq_org_create(adapter, mccq, cq);
	}
	return status;
}

1284
int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
Sathya Perla's avatar
Sathya Perla committed
1285
{
1286
	struct be_mcc_wrb wrb = {0};
1287
	struct be_cmd_req_eth_tx_create *req;
1288 1289
	struct be_queue_info *txq = &txo->q;
	struct be_queue_info *cq = &txo->cq;
Sathya Perla's avatar
Sathya Perla committed
1290
	struct be_dma_mem *q_mem = &txq->dma_mem;
1291
	int status, ver = 0;
Sathya Perla's avatar
Sathya Perla committed
1292

1293
	req = embedded_payload(&wrb);
1294
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1295
			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1296

1297 1298
	if (lancer_chip(adapter)) {
		req->hdr.version = 1;
1299 1300 1301 1302 1303
	} else if (BEx_chip(adapter)) {
		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
			req->hdr.version = 2;
	} else { /* For SH */
		req->hdr.version = 2;
1304 1305
	}

1306 1307
	if (req->hdr.version > 0)
		req->if_id = cpu_to_le16(adapter->if_handle);
Sathya Perla's avatar
Sathya Perla committed
1308 1309 1310
	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1311 1312
	req->cq_id = cpu_to_le16(cq->id);
	req->queue_size = be_encoded_q_len(txq->len);
Sathya Perla's avatar
Sathya Perla committed
1313
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1314 1315
	ver = req->hdr.version;

1316
	status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla's avatar
Sathya Perla committed
1317
	if (!status) {
1318
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1319

Sathya Perla's avatar
Sathya Perla committed
1320
		txq->id = le16_to_cpu(resp->cid);
1321 1322 1323 1324
		if (ver == 2)
			txo->db_offset = le32_to_cpu(resp->db_offset);
		else
			txo->db_offset = DB_TXULP1_OFFSET;
Sathya Perla's avatar
Sathya Perla committed
1325 1326
		txq->created = true;
	}
1327

Sathya Perla's avatar
Sathya Perla committed
1328 1329 1330
	return status;
}

1331
/* Uses MCC */
1332
int be_cmd_rxq_create(struct be_adapter *adapter,
1333 1334
		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
		      u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla's avatar
Sathya Perla committed
1335
{
1336 1337
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
Sathya Perla's avatar
Sathya Perla committed
1338 1339 1340
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

1341
	spin_lock_bh(&adapter->mcc_lock);
1342

1343 1344 1345 1346 1347
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1348
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1349

1350
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1351
			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1352 1353 1354 1355 1356 1357

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
Sathya Perla's avatar
Sathya Perla committed
1358
	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla's avatar
Sathya Perla committed
1359 1360
	req->rss_queue = cpu_to_le32(rss);

1361
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1362 1363
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1364

Sathya Perla's avatar
Sathya Perla committed
1365 1366
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
1367
		*rss_id = resp->rss_id;
Sathya Perla's avatar
Sathya Perla committed
1368
	}
1369

1370 1371
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1372 1373 1374
	return status;
}

1375 1376 1377
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
1378
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1379
		     int queue_type)
Sathya Perla's avatar
Sathya Perla committed
1380
{
1381 1382
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
Sathya Perla's avatar
Sathya Perla committed
1383 1384 1385
	u8 subsys = 0, opcode = 0;
	int status;

1386 1387
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
Sathya Perla's avatar
Sathya Perla committed
1388

1389 1390 1391
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

Sathya Perla's avatar
Sathya Perla committed
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
1409 1410 1411 1412
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
Sathya Perla's avatar
Sathya Perla committed
1413
	default:
1414
		BUG();
Sathya Perla's avatar
Sathya Perla committed
1415
	}
1416

1417
	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1418
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
1419 1420
	req->id = cpu_to_le16(q->id);

1421
	status = be_mbox_notify_wait(adapter);
1422
	q->created = false;
1423

1424
	mutex_unlock(&adapter->mbox_lock);
1425 1426
	return status;
}
Sathya Perla's avatar
Sathya Perla committed
1427

1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
/* Uses MCC */
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

1444
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1445
			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1446 1447 1448
	req->id = cpu_to_le16(q->id);

	status = be_mcc_notify_wait(adapter);
1449
	q->created = false;
1450 1451 1452

err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1453 1454 1455
	return status;
}

1456
/* Create an rx filtering policy configuration on an i/f
1457
 * Will use MBOX only if MCCQ has not been created.
1458
 */
1459
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1460
		     u32 *if_handle, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
1461
{
1462
	struct be_mcc_wrb wrb = {0};
1463
	struct be_cmd_req_if_create *req;
Sathya Perla's avatar
Sathya Perla committed
1464 1465
	int status;

1466
	req = embedded_payload(&wrb);
1467
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1468 1469
			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
			       sizeof(*req), &wrb, NULL);
1470
	req->hdr.domain = domain;
1471 1472
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
1473
	req->pmac_invalid = true;
Sathya Perla's avatar
Sathya Perla committed
1474

1475
	status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla's avatar
Sathya Perla committed
1476
	if (!status) {
1477
		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1478

Sathya Perla's avatar
Sathya Perla committed
1479
		*if_handle = le32_to_cpu(resp->interface_id);
1480 1481

		/* Hack to retrieve VF's pmac-id on BE3 */
1482
		if (BE3_chip(adapter) && be_virtfn(adapter))
1483
			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
Sathya Perla's avatar
Sathya Perla committed
1484 1485 1486 1487
	}
	return status;
}

1488
/* Uses MCCQ */
1489
int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
1490
{
1491 1492
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
Sathya Perla's avatar
Sathya Perla committed
1493 1494
	int status;

1495
	if (interface_id == -1)
1496
		return 0;
1497

1498 1499 1500 1501 1502 1503 1504
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1505
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1506

1507
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1508 1509
			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
			       sizeof(*req), wrb, NULL);
1510
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
1511
	req->interface_id = cpu_to_le32(interface_id);
1512

1513 1514 1515
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1516 1517 1518 1519 1520
	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
1521
 * Uses asynchronous MCC
Sathya Perla's avatar
Sathya Perla committed
1522
 */
1523
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla's avatar
Sathya Perla committed
1524
{
1525
	struct be_mcc_wrb *wrb;
1526
	struct be_cmd_req_hdr *hdr;
1527
	int status = 0;
Sathya Perla's avatar
Sathya Perla committed
1528

1529
	spin_lock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1530

1531
	wrb = wrb_from_mccq(adapter);
1532 1533 1534 1535
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1536
	hdr = nonemb_cmd->va;
Sathya Perla's avatar
Sathya Perla committed
1537

1538
	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1539 1540
			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
			       nonemb_cmd);
1541

1542
	/* version 1 of the cmd is not supported only by BE2 */
1543 1544 1545
	if (BE2_chip(adapter))
		hdr->version = 0;
	if (BE3_chip(adapter) || lancer_chip(adapter))
1546
		hdr->version = 1;
1547 1548
	else
		hdr->version = 2;
1549

1550
	be_mcc_notify(adapter);
Ajit Khaparde's avatar
Ajit Khaparde committed
1551
	adapter->stats_cmd_sent = true;
Sathya Perla's avatar
Sathya Perla committed
1552

1553
err:
1554
	spin_unlock_bh(&adapter->mcc_lock);
1555
	return status;
Sathya Perla's avatar
Sathya Perla committed
1556 1557
}

Selvin Xavier's avatar
Selvin Xavier committed
1558 1559
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1560
			       struct be_dma_mem *nonemb_cmd)
Selvin Xavier's avatar
Selvin Xavier committed
1561 1562 1563 1564 1565
{
	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_pport_stats *req;
	int status = 0;

1566 1567 1568 1569
	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
			    CMD_SUBSYSTEM_ETH))
		return -EPERM;

Selvin Xavier's avatar
Selvin Xavier committed
1570 1571 1572 1573 1574 1575 1576 1577 1578
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;

1579
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1580 1581
			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
			       wrb, nonemb_cmd);
Selvin Xavier's avatar
Selvin Xavier committed
1582

1583
	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier's avatar
Selvin Xavier committed
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
	req->cmd_params.params.reset_stats = 0;

	be_mcc_notify(adapter);
	adapter->stats_cmd_sent = true;

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
static int be_mac_to_link_speed(int mac_speed)
{
	switch (mac_speed) {
	case PHY_LINK_SPEED_ZERO:
		return 0;
	case PHY_LINK_SPEED_10MBPS:
		return 10;
	case PHY_LINK_SPEED_100MBPS:
		return 100;
	case PHY_LINK_SPEED_1GBPS:
		return 1000;
	case PHY_LINK_SPEED_10GBPS:
		return 10000;
1607 1608 1609 1610 1611 1612
	case PHY_LINK_SPEED_20GBPS:
		return 20000;
	case PHY_LINK_SPEED_25GBPS:
		return 25000;
	case PHY_LINK_SPEED_40GBPS:
		return 40000;
1613 1614 1615 1616 1617 1618 1619 1620 1621
	}
	return 0;
}

/* Uses synchronous mcc
 * Returns link_speed in Mbps
 */
int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
			     u8 *link_status, u32 dom)
Sathya Perla's avatar
Sathya Perla committed
1622
{
1623 1624
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
Sathya Perla's avatar
Sathya Perla committed
1625 1626
	int status;

1627 1628
	spin_lock_bh(&adapter->mcc_lock);

1629 1630 1631
	if (link_status)
		*link_status = LINK_DOWN;

1632
	wrb = wrb_from_mccq(adapter);
1633 1634 1635 1636
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1637
	req = embedded_payload(wrb);
1638

1639
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1640 1641
			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
			       sizeof(*req), wrb, NULL);
1642

1643 1644
	/* version 1 of the cmd is not supported only by BE2 */
	if (!BE2_chip(adapter))
1645 1646
		req->hdr.version = 1;

1647
	req->hdr.domain = dom;
Sathya Perla's avatar
Sathya Perla committed
1648

1649
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1650 1651
	if (!status) {
		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1652

1653 1654 1655 1656 1657 1658 1659
		if (link_speed) {
			*link_speed = resp->link_speed ?
				      le16_to_cpu(resp->link_speed) * 10 :
				      be_mac_to_link_speed(resp->mac_speed);

			if (!resp->logical_link_status)
				*link_speed = 0;
1660
		}
1661 1662
		if (link_status)
			*link_status = resp->logical_link_status;
Sathya Perla's avatar
Sathya Perla committed
1663 1664
	}

1665
err:
1666
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1667 1668 1669
	return status;
}

1670 1671 1672 1673 1674
/* Uses synchronous mcc */
int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_cntl_addnl_attribs *req;
1675
	int status = 0;
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

1686
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1687 1688
			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
			       sizeof(*req), wrb, NULL);
1689

1690
	be_mcc_notify(adapter);
1691 1692 1693 1694 1695 1696

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
/* Uses synchronous mcc */
int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fat *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

1713
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1714 1715
			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
			       NULL);
1716 1717 1718 1719
	req->fat_operation = cpu_to_le32(QUERY_FAT);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1720

1721
		if (log_size && resp->log_size)
1722 1723
			*log_size = le32_to_cpu(resp->log_size) -
					sizeof(u32);
1724 1725 1726 1727 1728 1729
	}
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1730
int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1731 1732 1733 1734
{
	struct be_dma_mem get_fat_cmd;
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fat *req;
1735 1736
	u32 offset = 0, total_size, buf_size,
				log_offset = sizeof(u32), payload_len;
1737
	int status = 0;
1738 1739

	if (buf_len == 0)
1740
		return -EIO;
1741 1742 1743

	total_size = buf_len;

1744 1745
	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1746 1747
					      get_fat_cmd.size,
					      &get_fat_cmd.dma);
1748 1749
	if (!get_fat_cmd.va) {
		dev_err(&adapter->pdev->dev,
1750
			"Memory allocation failure while reading FAT data\n");
1751
		return -ENOMEM;
1752 1753
	}

1754 1755 1756 1757 1758 1759
	spin_lock_bh(&adapter->mcc_lock);

	while (total_size) {
		buf_size = min(total_size, (u32)60*1024);
		total_size -= buf_size;

1760 1761 1762
		wrb = wrb_from_mccq(adapter);
		if (!wrb) {
			status = -EBUSY;
1763 1764 1765 1766
			goto err;
		}
		req = get_fat_cmd.va;

1767
		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1768
		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1769 1770
				       OPCODE_COMMON_MANAGE_FAT, payload_len,
				       wrb, &get_fat_cmd);
1771 1772 1773 1774 1775 1776 1777 1778 1779

		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
		req->read_log_offset = cpu_to_le32(log_offset);
		req->read_log_length = cpu_to_le32(buf_size);
		req->data_buffer_size = cpu_to_le32(buf_size);

		status = be_mcc_notify_wait(adapter);
		if (!status) {
			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1780

1781
			memcpy(buf + offset,
1782 1783
			       resp->data_buffer,
			       le32_to_cpu(resp->read_log_length));
1784
		} else {
1785
			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1786 1787
			goto err;
		}
1788 1789 1790 1791
		offset += buf_size;
		log_offset += buf_size;
	}
err:
1792
	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1793
			    get_fat_cmd.va, get_fat_cmd.dma);
1794
	spin_unlock_bh(&adapter->mcc_lock);
1795
	return status;
1796 1797
}

1798
/* Uses synchronous mcc */
1799
int be_cmd_get_fw_ver(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
1800
{
1801 1802
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fw_version *req;
Sathya Perla's avatar
Sathya Perla committed
1803 1804
	int status;

1805
	spin_lock_bh(&adapter->mcc_lock);
1806

1807 1808 1809 1810 1811
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
Sathya Perla's avatar
Sathya Perla committed
1812

1813
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1814

1815
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1816 1817
			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
			       NULL);
1818
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1819 1820
	if (!status) {
		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1821

1822 1823 1824 1825
		strlcpy(adapter->fw_ver, resp->firmware_version_string,
			sizeof(adapter->fw_ver));
		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
			sizeof(adapter->fw_on_flash));
Sathya Perla's avatar
Sathya Perla committed
1826
	}
1827 1828
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1829 1830 1831
	return status;
}

1832 1833 1834
/* set the EQ delay interval of an EQ to specified value
 * Uses async mcc
 */
1835 1836
static int __be_cmd_modify_eqd(struct be_adapter *adapter,
			       struct be_set_eqd *set_eqd, int num)
Sathya Perla's avatar
Sathya Perla committed
1837
{
1838 1839
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_modify_eq_delay *req;
1840
	int status = 0, i;
Sathya Perla's avatar
Sathya Perla committed
1841

1842 1843 1844
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1845 1846 1847 1848
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1849
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1850

1851
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1852 1853
			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
1854

1855 1856 1857 1858 1859 1860 1861
	req->num_eq = cpu_to_le32(num);
	for (i = 0; i < num; i++) {
		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
		req->set_eqd[i].phase = 0;
		req->set_eqd[i].delay_multiplier =
				cpu_to_le32(set_eqd[i].delay_multiplier);
	}
Sathya Perla's avatar
Sathya Perla committed
1862

1863
	be_mcc_notify(adapter);
1864
err:
1865
	spin_unlock_bh(&adapter->mcc_lock);
1866
	return status;
Sathya Perla's avatar
Sathya Perla committed
1867 1868
}

1869 1870 1871 1872 1873
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
		      int num)
{
	int num_eqs, i = 0;

1874 1875 1876 1877 1878
	while (num) {
		num_eqs = min(num, 8);
		__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
		i += num_eqs;
		num -= num_eqs;
1879 1880 1881 1882 1883
	}

	return 0;
}

1884
/* Uses sycnhronous mcc */
1885
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1886
		       u32 num, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
1887
{
1888 1889
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_vlan_config *req;
Sathya Perla's avatar
Sathya Perla committed
1890 1891
	int status;

1892 1893 1894
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
1895 1896 1897 1898
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1899
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1900

1901
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1902 1903
			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
			       wrb, NULL);
1904
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
1905 1906

	req->interface_id = if_id;
1907
	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
Sathya Perla's avatar
Sathya Perla committed
1908
	req->num_vlan = num;
1909 1910
	memcpy(req->normal_vlan, vtag_array,
	       req->num_vlan * sizeof(vtag_array[0]));
Sathya Perla's avatar
Sathya Perla committed
1911

1912
	status = be_mcc_notify_wait(adapter);
1913
err:
1914
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1915 1916 1917
	return status;
}

1918
static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla's avatar
Sathya Perla committed
1919
{
1920
	struct be_mcc_wrb *wrb;
1921 1922
	struct be_dma_mem *mem = &adapter->rx_filter;
	struct be_cmd_req_rx_filter *req = mem->va;
1923
	int status;
Sathya Perla's avatar
Sathya Perla committed
1924

1925
	spin_lock_bh(&adapter->mcc_lock);
1926

1927
	wrb = wrb_from_mccq(adapter);
1928 1929 1930 1931
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1932
	memset(req, 0, sizeof(*req));
1933
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1934 1935
			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
			       wrb, mem);
Sathya Perla's avatar
Sathya Perla committed
1936

1937
	req->if_id = cpu_to_le32(adapter->if_handle);
1938 1939 1940 1941
	req->if_flags_mask = cpu_to_le32(flags);
	req->if_flags = (value == ON) ? req->if_flags_mask : 0;

	if (flags & BE_IF_FLAGS_MULTICAST) {
1942
		struct netdev_hw_addr *ha;
1943
		int i = 0;
1944

1945 1946 1947
		/* Reset mcast promisc mode if already set by setting mask
		 * and not setting flags field
		 */
1948 1949
		req->if_flags_mask |=
			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1950
				    be_if_cap_flags(adapter));
1951
		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1952 1953
		netdev_for_each_mc_addr(ha, adapter->netdev)
			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
Sathya Perla's avatar
Sathya Perla committed
1954 1955
	}

1956
	status = be_mcc_notify_wait(adapter);
1957
err:
1958
	spin_unlock_bh(&adapter->mcc_lock);
1959
	return status;
Sathya Perla's avatar
Sathya Perla committed
1960 1961
}

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
{
	struct device *dev = &adapter->pdev->dev;

	if ((flags & be_if_cap_flags(adapter)) != flags) {
		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
			 be_if_cap_flags(adapter));
	}
	flags &= be_if_cap_flags(adapter);

	return __be_cmd_rx_filter(adapter, flags, value);
}

1976
/* Uses synchrounous mcc */
1977
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla's avatar
Sathya Perla committed
1978
{
1979 1980
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_flow_control *req;
Sathya Perla's avatar
Sathya Perla committed
1981 1982
	int status;

1983 1984 1985 1986
	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
			    CMD_SUBSYSTEM_COMMON))
		return -EPERM;

1987
	spin_lock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1988

1989
	wrb = wrb_from_mccq(adapter);
1990 1991 1992 1993
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1994
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1995

1996
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1997 1998
			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
			       wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1999

2000
	req->hdr.version = 1;
Sathya Perla's avatar
Sathya Perla committed
2001 2002 2003
	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
	req->rx_flow_control = cpu_to_le16((u16)rx_fc);

2004
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
2005

2006
err:
2007
	spin_unlock_bh(&adapter->mcc_lock);
2008 2009 2010 2011

	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
		return  -EOPNOTSUPP;

Sathya Perla's avatar
Sathya Perla committed
2012 2013 2014
	return status;
}

2015
/* Uses sycn mcc */
2016
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla's avatar
Sathya Perla committed
2017
{
2018 2019
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_flow_control *req;
Sathya Perla's avatar
Sathya Perla committed
2020 2021
	int status;

2022 2023 2024 2025
	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
			    CMD_SUBSYSTEM_COMMON))
		return -EPERM;

2026
	spin_lock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
2027

2028
	wrb = wrb_from_mccq(adapter);
2029 2030 2031 2032
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2033
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
2034

2035
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2036 2037
			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
			       wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
2038

2039
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
2040 2041 2042
	if (!status) {
		struct be_cmd_resp_get_flow_control *resp =
						embedded_payload(wrb);
2043

Sathya Perla's avatar
Sathya Perla committed
2044 2045 2046 2047
		*tx_fc = le16_to_cpu(resp->tx_flow_control);
		*rx_fc = le16_to_cpu(resp->rx_flow_control);
	}

2048
err:
2049
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
2050 2051 2052
	return status;
}

2053
/* Uses mbox */
2054
int be_cmd_query_fw_cfg(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
2055
{
2056 2057
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_query_fw_cfg *req;
Sathya Perla's avatar
Sathya Perla committed
2058 2059
	int status;

2060 2061
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
Sathya Perla's avatar
Sathya Perla committed
2062

2063 2064
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
2065

2066
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2067 2068
			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
			       sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
2069

2070
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
2071 2072
	if (!status) {
		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2073

2074 2075 2076 2077
		adapter->port_num = le32_to_cpu(resp->phys_port);
		adapter->function_mode = le32_to_cpu(resp->function_mode);
		adapter->function_caps = le32_to_cpu(resp->function_caps);
		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2078 2079 2080
		dev_info(&adapter->pdev->dev,
			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
			 adapter->function_mode, adapter->function_caps);
Sathya Perla's avatar
Sathya Perla committed
2081 2082
	}

2083
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
2084 2085
	return status;
}
2086

2087
/* Uses mbox */
2088 2089
int be_cmd_reset_function(struct be_adapter *adapter)
{
2090 2091
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_hdr *req;
2092 2093
	int status;

2094
	if (lancer_chip(adapter)) {
2095 2096
		iowrite32(SLI_PORT_CONTROL_IP_MASK,
			  adapter->db + SLIPORT_CONTROL_OFFSET);
2097
		status = lancer_wait_ready(adapter);
2098
		if (status)
2099 2100 2101 2102 2103
			dev_err(&adapter->pdev->dev,
				"Adapter in non recoverable error\n");
		return status;
	}

2104 2105
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
2106

2107 2108
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
2109

2110
	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2111 2112
			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
			       NULL);
2113

2114
	status = be_mbox_notify_wait(adapter);
2115

2116
	mutex_unlock(&adapter->mbox_lock);
2117 2118
	return status;
}
2119

2120
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2121
		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2122 2123 2124 2125 2126
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_rss_config *req;
	int status;

2127 2128 2129
	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
		return 0;

2130
	spin_lock_bh(&adapter->mcc_lock);
2131

2132 2133 2134 2135 2136
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2137 2138
	req = embedded_payload(wrb);

2139
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2140
			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2141 2142

	req->if_id = cpu_to_le32(adapter->if_handle);
2143 2144
	req->enable_rss = cpu_to_le16(rss_hash_opts);
	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2145

2146
	if (!BEx_chip(adapter))
2147 2148
		req->hdr.version = 1;

2149
	memcpy(req->cpu_table, rsstable, table_size);
2150
	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2151 2152
	be_dws_cpu_to_le(req->hash, sizeof(req->hash));

2153 2154 2155
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
2156 2157 2158
	return status;
}

2159 2160
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2161
			    u8 bcn, u8 sts, u8 state)
2162 2163 2164 2165 2166 2167 2168 2169
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_enable_disable_beacon *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2170 2171 2172 2173
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2174 2175
	req = embedded_payload(wrb);

2176
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2177 2178
			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
			       sizeof(*req), wrb, NULL);
2179 2180 2181 2182 2183 2184 2185 2186

	req->port_num = port_num;
	req->beacon_state = state;
	req->beacon_duration = bcn;
	req->status_duration = sts;

	status = be_mcc_notify_wait(adapter);

2187
err:
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Uses sync mcc */
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_beacon_state *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2202 2203 2204 2205
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2206 2207
	req = embedded_payload(wrb);

2208
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2209 2210
			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
			       wrb, NULL);
2211 2212 2213 2214 2215 2216 2217

	req->port_num = port_num;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_beacon_state *resp =
						embedded_payload(wrb);
2218

2219 2220 2221
		*state = resp->beacon_state;
	}

2222
err:
2223 2224 2225 2226
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
				      u8 page_num, u8 *data)
{
	struct be_dma_mem cmd;
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_port_type *req;
	int status;

	if (page_num > TR_PAGE_A2)
		return -EINVAL;

	cmd.size = sizeof(struct be_cmd_resp_port_type);
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
	if (!cmd.va) {
		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
		return -ENOMEM;
	}
	memset(cmd.va, 0, cmd.size);

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd.va;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_READ_TRANSRECV_DATA,
			       cmd.size, wrb, &cmd);

	req->port = cpu_to_le32(adapter->hba_port_num);
	req->page_num = cpu_to_le32(page_num);
	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_port_type *resp = cmd.va;

		memcpy(data, resp->page_data, PAGE_DATA_LEN);
	}
err:
	spin_unlock_bh(&adapter->mcc_lock);
	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
	return status;
}

2274
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2275 2276 2277
			    u32 data_size, u32 data_offset,
			    const char *obj_name, u32 *data_written,
			    u8 *change_status, u8 *addn_status)
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
{
	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_write_object *req;
	struct lancer_cmd_resp_write_object *resp;
	void *ctxt = NULL;
	int status;

	spin_lock_bh(&adapter->mcc_lock);
	adapter->flash_status = 0;

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err_unlock;
	}

	req = embedded_payload(wrb);

2296
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2297 2298 2299
			       OPCODE_COMMON_WRITE_OBJECT,
			       sizeof(struct lancer_cmd_req_write_object), wrb,
			       NULL);
2300 2301 2302

	ctxt = &req->context;
	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2303
		      write_length, ctxt, data_size);
2304 2305 2306

	if (data_size == 0)
		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2307
			      eof, ctxt, 1);
2308 2309
	else
		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2310
			      eof, ctxt, 0);
2311 2312 2313

	be_dws_cpu_to_le(ctxt, sizeof(req->context));
	req->write_offset = cpu_to_le32(data_offset);
2314
	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2315 2316 2317
	req->descriptor_count = cpu_to_le32(1);
	req->buf_len = cpu_to_le32(data_size);
	req->addr_low = cpu_to_le32((cmd->dma +
2318 2319
				     sizeof(struct lancer_cmd_req_write_object))
				    & 0xFFFFFFFF);
2320 2321 2322 2323 2324 2325
	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
				sizeof(struct lancer_cmd_req_write_object)));

	be_mcc_notify(adapter);
	spin_unlock_bh(&adapter->mcc_lock);

2326
	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2327
					 msecs_to_jiffies(60000)))
2328
		status = -ETIMEDOUT;
2329 2330 2331 2332
	else
		status = adapter->flash_status;

	resp = embedded_payload(wrb);
2333
	if (!status) {
2334
		*data_written = le32_to_cpu(resp->actual_write_len);
2335 2336
		*change_status = resp->change_status;
	} else {
2337
		*addn_status = resp->additional_status;
2338
	}
2339 2340 2341 2342 2343 2344 2345 2346

	return status;

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
int be_cmd_query_cable_type(struct be_adapter *adapter)
{
	u8 page_data[PAGE_DATA_LEN];
	int status;

	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
						   page_data);
	if (!status) {
		switch (adapter->phy.interface_type) {
		case PHY_TYPE_QSFP:
			adapter->phy.cable_type =
				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
			break;
		case PHY_TYPE_SFP_PLUS_10GB:
			adapter->phy.cable_type =
				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
			break;
		default:
			adapter->phy.cable_type = 0;
			break;
		}
	}
	return status;
}

2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
int be_cmd_query_sfp_info(struct be_adapter *adapter)
{
	u8 page_data[PAGE_DATA_LEN];
	int status;

	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
						   page_data);
	if (!status) {
		strlcpy(adapter->phy.vendor_name, page_data +
			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
		strlcpy(adapter->phy.vendor_pn,
			page_data + SFP_VENDOR_PN_OFFSET,
			SFP_VENDOR_NAME_LEN - 1);
	}

	return status;
}

2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
{
	struct lancer_cmd_req_delete_object *req;
	struct be_mcc_wrb *wrb;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_DELETE_OBJECT,
			       sizeof(*req), wrb, NULL);

2410
	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2411 2412 2413 2414 2415 2416 2417

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2418
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2419 2420
			   u32 data_size, u32 data_offset, const char *obj_name,
			   u32 *data_read, u32 *eof, u8 *addn_status)
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
{
	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_read_object *req;
	struct lancer_cmd_resp_read_object *resp;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err_unlock;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2438 2439 2440
			       OPCODE_COMMON_READ_OBJECT,
			       sizeof(struct lancer_cmd_req_read_object), wrb,
			       NULL);
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464

	req->desired_read_len = cpu_to_le32(data_size);
	req->read_offset = cpu_to_le32(data_offset);
	strcpy(req->object_name, obj_name);
	req->descriptor_count = cpu_to_le32(1);
	req->buf_len = cpu_to_le32(data_size);
	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));

	status = be_mcc_notify_wait(adapter);

	resp = embedded_payload(wrb);
	if (!status) {
		*data_read = le32_to_cpu(resp->actual_read_len);
		*eof = le32_to_cpu(resp->eof);
	} else {
		*addn_status = resp->additional_status;
	}

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2465
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2466 2467
			  u32 flash_type, u32 flash_opcode, u32 img_offset,
			  u32 buf_size)
2468
{
2469
	struct be_mcc_wrb *wrb;
2470
	struct be_cmd_write_flashrom *req;
2471 2472
	int status;

2473
	spin_lock_bh(&adapter->mcc_lock);
2474
	adapter->flash_status = 0;
2475 2476

	wrb = wrb_from_mccq(adapter);
2477 2478
	if (!wrb) {
		status = -EBUSY;
2479
		goto err_unlock;
2480 2481
	}
	req = cmd->va;
2482

2483
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2484 2485
			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
			       cmd);
2486 2487

	req->params.op_type = cpu_to_le32(flash_type);
2488 2489 2490
	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
		req->params.offset = cpu_to_le32(img_offset);

2491 2492 2493
	req->params.op_code = cpu_to_le32(flash_opcode);
	req->params.data_buf_size = cpu_to_le32(buf_size);

2494 2495 2496
	be_mcc_notify(adapter);
	spin_unlock_bh(&adapter->mcc_lock);

2497 2498
	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
					 msecs_to_jiffies(40000)))
2499
		status = -ETIMEDOUT;
2500 2501
	else
		status = adapter->flash_status;
2502

2503 2504 2505 2506
	return status;

err_unlock:
	spin_unlock_bh(&adapter->mcc_lock);
2507 2508
	return status;
}
2509

2510
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2511
			 u16 img_optype, u32 img_offset, u32 crc_offset)
2512
{
2513
	struct be_cmd_read_flash_crc *req;
2514
	struct be_mcc_wrb *wrb;
2515 2516 2517 2518 2519
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2520 2521 2522 2523
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2524 2525
	req = embedded_payload(wrb);

2526
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2527 2528
			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
			       wrb, NULL);
2529

2530 2531 2532 2533 2534 2535
	req->params.op_type = cpu_to_le32(img_optype);
	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
		req->params.offset = cpu_to_le32(img_offset + crc_offset);
	else
		req->params.offset = cpu_to_le32(crc_offset);

2536
	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2537
	req->params.data_buf_size = cpu_to_le32(0x4);
2538 2539 2540

	status = be_mcc_notify_wait(adapter);
	if (!status)
2541
		memcpy(flashed_crc, req->crc, 4);
2542

2543
err:
2544 2545 2546
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2547

2548
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2549
			    struct be_dma_mem *nonemb_cmd)
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;

2564
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2565 2566
			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
			       wrb, nonemb_cmd);
2567 2568 2569 2570 2571 2572 2573 2574
	memcpy(req->magic_mac, mac, ETH_ALEN);

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2575

2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
			u8 loopback_type, u8 enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_lmode *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

2593
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2594 2595
			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
			       wrb, NULL);
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607

	req->src_port = port_num;
	req->dest_port = port_num;
	req->loopback_type = loopback_type;
	req->loopback_state = enable;

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2608
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2609 2610
			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
			 u64 pattern)
2611 2612 2613
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_loopback_test *req;
2614
	struct be_cmd_resp_loopback_test *resp;
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

2627
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2628 2629
			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
			       NULL);
2630

2631
	req->hdr.timeout = cpu_to_le32(15);
2632 2633 2634 2635 2636 2637 2638
	req->pattern = cpu_to_le64(pattern);
	req->src_port = cpu_to_le32(port_num);
	req->dest_port = cpu_to_le32(port_num);
	req->pkt_size = cpu_to_le32(pkt_size);
	req->num_pkts = cpu_to_le32(num_pkts);
	req->loopback_type = cpu_to_le32(loopback_type);

2639 2640 2641
	be_mcc_notify(adapter);

	spin_unlock_bh(&adapter->mcc_lock);
2642

2643 2644 2645 2646 2647
	wait_for_completion(&adapter->et_cmd_compl);
	resp = embedded_payload(wrb);
	status = le32_to_cpu(resp->status);

	return status;
2648 2649 2650 2651 2652 2653
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2654
			u32 byte_cnt, struct be_dma_mem *cmd)
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_ddrdma_test *req;
	int status;
	int i, j = 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = cmd->va;
2669
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2670 2671
			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
			       cmd);
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685

	req->pattern = cpu_to_le64(pattern);
	req->byte_count = cpu_to_le32(byte_cnt);
	for (i = 0; i < byte_cnt; i++) {
		req->snd_buff[i] = (u8)(pattern >> (j*8));
		j++;
		if (j > 7)
			j = 0;
	}

	status = be_mcc_notify_wait(adapter);

	if (!status) {
		struct be_cmd_resp_ddrdma_test *resp;
2686

2687 2688
		resp = cmd->va;
		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2689
		    resp->snd_err) {
2690 2691 2692 2693 2694 2695 2696 2697
			status = -1;
		}
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2698

2699
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2700
			    struct be_dma_mem *nonemb_cmd)
2701 2702 2703 2704 2705 2706 2707 2708
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_seeprom_read *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
2709 2710 2711 2712
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2713 2714
	req = nonemb_cmd->va;

2715
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2716 2717
			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
			       nonemb_cmd);
2718 2719 2720

	status = be_mcc_notify_wait(adapter);

2721
err:
2722 2723 2724
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2725

2726
int be_cmd_get_phy_info(struct be_adapter *adapter)
2727 2728 2729
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_phy_info *req;
2730
	struct be_dma_mem cmd;
2731 2732
	int status;

2733 2734 2735 2736
	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
			    CMD_SUBSYSTEM_COMMON))
		return -EPERM;

2737 2738 2739 2740 2741 2742 2743
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
2744
	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2745
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2746 2747 2748 2749 2750
	if (!cmd.va) {
		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
		status = -ENOMEM;
		goto err;
	}
2751

2752
	req = cmd.va;
2753

2754
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2755 2756
			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
			       wrb, &cmd);
2757 2758

	status = be_mcc_notify_wait(adapter);
2759 2760 2761
	if (!status) {
		struct be_phy_info *resp_phy_info =
				cmd.va + sizeof(struct be_cmd_req_hdr);
2762

2763 2764
		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
		adapter->phy.interface_type =
2765
			le16_to_cpu(resp_phy_info->interface_type);
2766 2767 2768 2769 2770 2771
		adapter->phy.auto_speeds_supported =
			le16_to_cpu(resp_phy_info->auto_speeds_supported);
		adapter->phy.fixed_speeds_supported =
			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
		adapter->phy.misc_params =
			le32_to_cpu(resp_phy_info->misc_params);
2772 2773 2774 2775 2776 2777

		if (BE2_chip(adapter)) {
			adapter->phy.fixed_speeds_supported =
				BE_SUPPORTED_SPEED_10GBPS |
				BE_SUPPORTED_SPEED_1GBPS;
		}
2778
	}
2779
	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2780 2781 2782 2783
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2784

2785
static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_qos *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

2801
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2802
			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2803 2804

	req->hdr.domain = domain;
2805 2806
	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
	req->max_bps_nic = cpu_to_le32(bps);
2807 2808 2809 2810 2811 2812 2813

	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824

int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cntl_attribs *req;
	struct be_cmd_resp_cntl_attribs *resp;
	int status;
	int payload_len = max(sizeof(*req), sizeof(*resp));
	struct mgmt_controller_attrib *attribs;
	struct be_dma_mem attribs_cmd;

Suresh Reddy's avatar
Suresh Reddy committed
2825 2826 2827
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

2828 2829 2830
	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2831
					      &attribs_cmd.dma);
2832
	if (!attribs_cmd.va) {
2833
		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddy's avatar
Suresh Reddy committed
2834 2835
		status = -ENOMEM;
		goto err;
2836 2837 2838 2839 2840 2841 2842 2843 2844
	}

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = attribs_cmd.va;

2845
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2846 2847
			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
			       wrb, &attribs_cmd);
2848 2849 2850

	status = be_mbox_notify_wait(adapter);
	if (!status) {
2851
		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2852 2853 2854 2855 2856
		adapter->hba_port_num = attribs->hba_attribs.phy_port;
	}

err:
	mutex_unlock(&adapter->mbox_lock);
Suresh Reddy's avatar
Suresh Reddy committed
2857 2858 2859
	if (attribs_cmd.va)
		pci_free_consistent(adapter->pdev, attribs_cmd.size,
				    attribs_cmd.va, attribs_cmd.dma);
2860 2861
	return status;
}
2862 2863

/* Uses mbox */
2864
int be_cmd_req_native_mode(struct be_adapter *adapter)
2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_func_cap *req;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

2881
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2882 2883
			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
			       sizeof(*req), wrb, NULL);
2884 2885 2886 2887 2888 2889 2890 2891

	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
				CAPABILITY_BE3_NATIVE_ERX_API);
	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2892

2893 2894
		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
					CAPABILITY_BE3_NATIVE_ERX_API;
Sathya Perla's avatar
Sathya Perla committed
2895 2896 2897
		if (!adapter->be3_native)
			dev_warn(&adapter->pdev->dev,
				 "adapter not in advanced mode\n");
2898 2899 2900 2901 2902
	}
err:
	mutex_unlock(&adapter->mbox_lock);
	return status;
}
2903

2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931
/* Get privilege(s) for a function */
int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
			     u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_fn_privileges *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
			       wrb, NULL);

	req->hdr.domain = domain;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_fn_privileges *resp =
						embedded_payload(wrb);
2932

2933
		*privilege = le32_to_cpu(resp->privilege_mask);
2934 2935 2936 2937 2938 2939 2940

		/* In UMC mode FW does not return right privileges.
		 * Override with correct privilege equivalent to PF.
		 */
		if (BEx_chip(adapter) && be_is_mc(adapter) &&
		    be_physfn(adapter))
			*privilege = MAX_PRIVILEGES;
2941 2942 2943 2944 2945 2946 2947
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
/* Set privilege(s) for a function */
int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
			     u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_fn_privileges *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
			       wrb, NULL);
	req->hdr.domain = domain;
	if (lancer_chip(adapter))
		req->privileges_lancer = cpu_to_le32(privileges);
	else
		req->privileges = cpu_to_le32(privileges);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

2980 2981 2982 2983
/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
 * pmac_id_valid: false => pmac_id or MAC address is requested.
 *		  If pmac_id is returned, pmac_id_valid is returned as true
 */
2984
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2985 2986
			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
			     u8 domain)
2987 2988 2989 2990 2991
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_mac_list *req;
	int status;
	int mac_count;
2992 2993 2994 2995 2996 2997
	struct be_dma_mem get_mac_list_cmd;
	int i;

	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2998 2999
						   get_mac_list_cmd.size,
						   &get_mac_list_cmd.dma);
3000 3001 3002

	if (!get_mac_list_cmd.va) {
		dev_err(&adapter->pdev->dev,
3003
			"Memory allocation failure during GET_MAC_LIST\n");
3004 3005
		return -ENOMEM;
	}
3006 3007 3008 3009 3010 3011

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
3012
		goto out;
3013
	}
3014 3015

	req = get_mac_list_cmd.va;
3016 3017

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3018 3019
			       OPCODE_COMMON_GET_MAC_LIST,
			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3020
	req->hdr.domain = domain;
3021
	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3022 3023
	if (*pmac_id_valid) {
		req->mac_id = cpu_to_le32(*pmac_id);
3024
		req->iface_id = cpu_to_le16(if_handle);
3025 3026 3027 3028
		req->perm_override = 0;
	} else {
		req->perm_override = 1;
	}
3029 3030 3031 3032

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_mac_list *resp =
3033
						get_mac_list_cmd.va;
3034 3035 3036 3037 3038 3039 3040

		if (*pmac_id_valid) {
			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
			       ETH_ALEN);
			goto out;
		}

3041 3042
		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
		/* Mac list returned could contain one or more active mac_ids
3043
		 * or one or more true or pseudo permanent mac addresses.
3044 3045
		 * If an active mac_id is present, return first active mac_id
		 * found.
3046
		 */
3047
		for (i = 0; i < mac_count; i++) {
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057
			struct get_list_macaddr *mac_entry;
			u16 mac_addr_size;
			u32 mac_id;

			mac_entry = &resp->macaddr_list[i];
			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
			/* mac_id is a 32 bit value and mac_addr size
			 * is 6 bytes
			 */
			if (mac_addr_size == sizeof(u32)) {
3058
				*pmac_id_valid = true;
3059 3060 3061
				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
				*pmac_id = le32_to_cpu(mac_id);
				goto out;
3062 3063
			}
		}
3064
		/* If no active mac_id found, return first mac addr */
3065
		*pmac_id_valid = false;
3066
		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3067
		       ETH_ALEN);
3068 3069
	}

3070
out:
3071
	spin_unlock_bh(&adapter->mcc_lock);
3072
	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
3073
			    get_mac_list_cmd.va, get_mac_list_cmd.dma);
3074 3075 3076
	return status;
}

3077 3078
int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
			  u8 *mac, u32 if_handle, bool active, u32 domain)
3079
{
3080 3081 3082
	if (!active)
		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
					 if_handle, domain);
3083
	if (BEx_chip(adapter))
3084
		return be_cmd_mac_addr_query(adapter, mac, false,
3085
					     if_handle, curr_pmac_id);
3086 3087 3088
	else
		/* Fetch the MAC address using pmac_id */
		return be_cmd_get_mac_from_list(adapter, mac, &active,
3089 3090
						&curr_pmac_id,
						if_handle, domain);
3091 3092
}

3093 3094 3095 3096 3097
int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
{
	int status;
	bool pmac_valid = false;

3098
	eth_zero_addr(mac);
3099

3100 3101 3102 3103 3104 3105 3106 3107
	if (BEx_chip(adapter)) {
		if (be_physfn(adapter))
			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
						       0);
		else
			status = be_cmd_mac_addr_query(adapter, mac, false,
						       adapter->if_handle, 0);
	} else {
3108
		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3109
						  NULL, adapter->if_handle, 0);
3110 3111
	}

3112 3113 3114
	return status;
}

3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126
/* Uses synchronous MCCQ */
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
			u8 mac_count, u32 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_mac_list *req;
	int status;
	struct be_dma_mem cmd;

	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
3127
				    &cmd.dma, GFP_KERNEL);
3128
	if (!cmd.va)
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
		return -ENOMEM;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd.va;
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3141 3142
			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
			       wrb, &cmd);
3143 3144 3145 3146 3147 3148 3149 3150 3151

	req->hdr.domain = domain;
	req->mac_count = mac_count;
	if (mac_count)
		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);

	status = be_mcc_notify_wait(adapter);

err:
3152
	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3153 3154 3155
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
3156

3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168
/* Wrapper to delete any active MACs and provision the new mac.
 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
 * current list are active.
 */
int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
{
	bool active_mac = false;
	u8 old_mac[ETH_ALEN];
	u32 pmac_id;
	int status;

	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3169 3170
					  &pmac_id, if_id, dom);

3171 3172 3173 3174 3175 3176
	if (!status && active_mac)
		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);

	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
}

3177
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3178
			  u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_hsw_config *req;
	void *ctxt;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);
	ctxt = &req->context;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3197 3198
			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
			       NULL);
3199 3200 3201 3202 3203 3204 3205

	req->hdr.domain = domain;
	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
	if (pvid) {
		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
	}
3206 3207 3208 3209 3210 3211 3212
	if (!BEx_chip(adapter) && hsw_mode) {
		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
			      ctxt, adapter->hba_port_num);
		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
			      ctxt, hsw_mode);
	}
3213

3214 3215 3216 3217 3218 3219 3220 3221
	/* Enable/disable both mac and vlan spoof checking */
	if (!BEx_chip(adapter) && spoofchk) {
		AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
			      ctxt, spoofchk);
		AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
			      ctxt, spoofchk);
	}

3222 3223 3224 3225 3226 3227 3228 3229 3230 3231
	be_dws_cpu_to_le(req->context, sizeof(req->context));
	status = be_mcc_notify_wait(adapter);

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3232
			  u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_hsw_config *req;
	void *ctxt;
	int status;
	u16 vid;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);
	ctxt = &req->context;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3252 3253
			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
			       NULL);
3254 3255

	req->hdr.domain = domain;
3256 3257
	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
		      ctxt, intf_id);
3258
	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3259

3260
	if (!BEx_chip(adapter) && mode) {
3261 3262 3263 3264
		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
			      ctxt, adapter->hba_port_num);
		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
	}
3265 3266 3267 3268 3269 3270
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_hsw_config *resp =
						embedded_payload(wrb);
3271

3272
		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3273
		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3274
				    pvid, &resp->context);
3275 3276 3277 3278 3279
		if (pvid)
			*pvid = le16_to_cpu(vid);
		if (mode)
			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
					      port_fwd_type, &resp->context);
3280 3281 3282 3283
		if (spoofchk)
			*spoofchk =
				AMAP_GET_BITS(struct amap_get_hsw_resp_context,
					      spoofchk, &resp->context);
3284 3285 3286 3287 3288 3289 3290
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

3291 3292 3293 3294
static bool be_is_wol_excluded(struct be_adapter *adapter)
{
	struct pci_dev *pdev = adapter->pdev;

3295
	if (be_virtfn(adapter))
3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308
		return true;

	switch (pdev->subsystem_device) {
	case OC_SUBSYS_DEVICE_ID1:
	case OC_SUBSYS_DEVICE_ID2:
	case OC_SUBSYS_DEVICE_ID3:
	case OC_SUBSYS_DEVICE_ID4:
		return true;
	default:
		return false;
	}
}

3309 3310 3311 3312
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3313
	int status = 0;
3314 3315
	struct be_dma_mem cmd;

3316 3317 3318 3319
	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
			    CMD_SUBSYSTEM_ETH))
		return -EPERM;

3320 3321 3322
	if (be_is_wol_excluded(adapter))
		return status;

Suresh Reddy's avatar
Suresh Reddy committed
3323 3324 3325
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

3326 3327
	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3328
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3329
	if (!cmd.va) {
3330
		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddy's avatar
Suresh Reddy committed
3331 3332
		status = -ENOMEM;
		goto err;
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
	}

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd.va;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3345
			       sizeof(*req), wrb, &cmd);
3346 3347 3348 3349 3350 3351 3352

	req->hdr.version = 1;
	req->query_options = BE_GET_WOL_CAP;

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3353

3354
		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3355 3356

		adapter->wol_cap = resp->wol_settings;
3357 3358
		if (adapter->wol_cap & BE_WOL_CAP)
			adapter->wol_en = true;
3359 3360 3361
	}
err:
	mutex_unlock(&adapter->mbox_lock);
Suresh Reddy's avatar
Suresh Reddy committed
3362 3363
	if (cmd.va)
		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3364
	return status;
3365 3366

}
3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389

int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
{
	struct be_dma_mem extfat_cmd;
	struct be_fat_conf_params *cfgs;
	int status;
	int i, j;

	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
					     &extfat_cmd.dma);
	if (!extfat_cmd.va)
		return -ENOMEM;

	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
	if (status)
		goto err;

	cfgs = (struct be_fat_conf_params *)
			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3390

3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
		for (j = 0; j < num_modes; j++) {
			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
				cfgs->module[i].trace_lvl[j].dbg_lvl =
							cpu_to_le32(level);
		}
	}

	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
err:
	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
			    extfat_cmd.dma);
	return status;
}

int be_cmd_get_fw_log_level(struct be_adapter *adapter)
{
	struct be_dma_mem extfat_cmd;
	struct be_fat_conf_params *cfgs;
	int status, j;
	int level = 0;

	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
					     &extfat_cmd.dma);

	if (!extfat_cmd.va) {
		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
			__func__);
		goto err;
	}

	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
	if (!status) {
		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
						sizeof(struct be_cmd_resp_hdr));
3427

3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438
		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
		}
	}
	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
			    extfat_cmd.dma);
err:
	return level;
}

3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
				   struct be_dma_mem *cmd)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_ext_fat_caps *req;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd->va;
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
			       cmd->size, wrb, cmd);
	req->parameter_type = cpu_to_le32(1);

	status = be_mbox_notify_wait(adapter);
err:
	mutex_unlock(&adapter->mbox_lock);
	return status;
}

int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
				   struct be_dma_mem *cmd,
				   struct be_fat_conf_params *configs)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_ext_fat_caps *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd->va;
	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
			       cmd->size, wrb, cmd);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
3493
}
3494

3495
int be_cmd_query_port_name(struct be_adapter *adapter)
3496 3497
{
	struct be_cmd_req_get_port_name *req;
3498
	struct be_mcc_wrb *wrb;
3499 3500
	int status;

3501 3502
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
3503

3504
	wrb = wrb_from_mbox(adapter);
3505 3506 3507 3508 3509
	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
			       NULL);
3510 3511
	if (!BEx_chip(adapter))
		req->hdr.version = 1;
3512

3513
	status = be_mbox_notify_wait(adapter);
3514 3515
	if (!status) {
		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3516

3517
		adapter->port_name = resp->port_name[adapter->hba_port_num];
3518
	} else {
3519
		adapter->port_name = adapter->hba_port_num + '0';
3520
	}
3521 3522

	mutex_unlock(&adapter->mbox_lock);
3523 3524 3525
	return status;
}

3526 3527 3528 3529 3530 3531 3532 3533
/* Descriptor type */
enum {
	FUNC_DESC = 1,
	VFT_DESC = 2
};

static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
					       int desc_type)
3534
{
3535
	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3536
	struct be_nic_res_desc *nic;
3537 3538 3539
	int i;

	for (i = 0; i < desc_count; i++) {
3540
		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3541 3542 3543 3544 3545 3546 3547
		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
			nic = (struct be_nic_res_desc *)hdr;
			if (desc_type == FUNC_DESC ||
			    (desc_type == VFT_DESC &&
			     nic->flags & (1 << VFT_SHIFT)))
				return nic;
		}
3548

3549 3550
		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
		hdr = (void *)hdr + hdr->desc_len;
3551
	}
3552 3553 3554
	return NULL;
}

3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
{
	return be_get_nic_desc(buf, desc_count, VFT_DESC);
}

static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
{
	return be_get_nic_desc(buf, desc_count, FUNC_DESC);
}

3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578
static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
						 u32 desc_count)
{
	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
	struct be_pcie_res_desc *pcie;
	int i;

	for (i = 0; i < desc_count; i++) {
		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
			pcie = (struct be_pcie_res_desc	*)hdr;
			if (pcie->pf_num == devfn)
				return pcie;
		}
3579

3580 3581 3582
		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
		hdr = (void *)hdr + hdr->desc_len;
	}
3583
	return NULL;
3584 3585
}

3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600
static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
{
	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
	int i;

	for (i = 0; i < desc_count; i++) {
		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
			return (struct be_port_res_desc *)hdr;

		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
		hdr = (void *)hdr + hdr->desc_len;
	}
	return NULL;
}

3601 3602 3603 3604 3605 3606 3607 3608 3609 3610
static void be_copy_nic_desc(struct be_resources *res,
			     struct be_nic_res_desc *desc)
{
	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
	res->max_vlans = le16_to_cpu(desc->vlan_count);
	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
	res->max_tx_qs = le16_to_cpu(desc->txq_count);
	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
	res->max_rx_qs = le16_to_cpu(desc->rq_count);
	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3611 3612 3613
	res->max_cq_count = le16_to_cpu(desc->cq_count);
	res->max_iface_count = le16_to_cpu(desc->iface_count);
	res->max_mcc_count = le16_to_cpu(desc->mcc_count);
3614 3615 3616 3617 3618
	/* Clear flags that driver is not interested in */
	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
				BE_IF_CAP_FLAGS_WANT;
}

3619
/* Uses Mbox */
3620
int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3621 3622 3623 3624 3625 3626
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_func_config *req;
	int status;
	struct be_dma_mem cmd;

Suresh Reddy's avatar
Suresh Reddy committed
3627 3628 3629
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

3630 3631
	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3632
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3633 3634
	if (!cmd.va) {
		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
Suresh Reddy's avatar
Suresh Reddy committed
3635 3636
		status = -ENOMEM;
		goto err;
3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
	}

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = cmd.va;

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_FUNC_CONFIG,
			       cmd.size, wrb, &cmd);

3651 3652 3653
	if (skyhawk_chip(adapter))
		req->hdr.version = 1;

3654 3655 3656 3657
	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_func_config *resp = cmd.va;
		u32 desc_count = le32_to_cpu(resp->desc_count);
3658
		struct be_nic_res_desc *desc;
3659

3660
		desc = be_get_func_nic_desc(resp->func_param, desc_count);
3661 3662 3663 3664 3665
		if (!desc) {
			status = -EINVAL;
			goto err;
		}

3666
		adapter->pf_number = desc->pf_num;
3667
		be_copy_nic_desc(res, desc);
3668 3669 3670
	}
err:
	mutex_unlock(&adapter->mbox_lock);
Suresh Reddy's avatar
Suresh Reddy committed
3671 3672
	if (cmd.va)
		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3673 3674 3675
	return status;
}

3676
/* Will use MBOX only if MCCQ has not been created */
3677
int be_cmd_get_profile_config(struct be_adapter *adapter,
3678
			      struct be_resources *res, u8 query, u8 domain)
3679
{
3680
	struct be_cmd_resp_get_profile_config *resp;
3681
	struct be_cmd_req_get_profile_config *req;
3682
	struct be_nic_res_desc *vf_res;
3683
	struct be_pcie_res_desc *pcie;
3684
	struct be_port_res_desc *port;
3685
	struct be_nic_res_desc *nic;
3686
	struct be_mcc_wrb wrb = {0};
3687
	struct be_dma_mem cmd;
3688
	u16 desc_count;
3689 3690 3691
	int status;

	memset(&cmd, 0, sizeof(struct be_dma_mem));
3692 3693 3694
	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
	if (!cmd.va)
3695 3696
		return -ENOMEM;

3697 3698 3699 3700 3701 3702 3703 3704 3705 3706
	req = cmd.va;
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_PROFILE_CONFIG,
			       cmd.size, &wrb, &cmd);

	req->hdr.domain = domain;
	if (!lancer_chip(adapter))
		req->hdr.version = 1;
	req->type = ACTIVE_PROFILE_TYPE;

3707 3708 3709 3710 3711 3712 3713
	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
	 * descriptors with all bits set to "1" for the fields which can be
	 * modified using SET_PROFILE_CONFIG cmd.
	 */
	if (query == RESOURCE_MODIFIABLE)
		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;

3714
	status = be_cmd_notify_wait(adapter, &wrb);
3715 3716
	if (status)
		goto err;
3717

3718
	resp = cmd.va;
3719
	desc_count = le16_to_cpu(resp->desc_count);
3720

3721 3722
	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
				desc_count);
3723
	if (pcie)
3724
		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3725

3726 3727 3728 3729
	port = be_get_port_desc(resp->func_param, desc_count);
	if (port)
		adapter->mc_type = port->mc_type;

3730
	nic = be_get_func_nic_desc(resp->func_param, desc_count);
3731 3732 3733
	if (nic)
		be_copy_nic_desc(res, nic);

3734 3735 3736
	vf_res = be_get_vft_desc(resp->func_param, desc_count);
	if (vf_res)
		res->vf_if_cap_flags = vf_res->cap_flags;
3737
err:
3738
	if (cmd.va)
3739
		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3740 3741 3742
	return status;
}

3743 3744 3745
/* Will use MBOX only if MCCQ has not been created */
static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
				     int size, int count, u8 version, u8 domain)
3746 3747
{
	struct be_cmd_req_set_profile_config *req;
3748 3749
	struct be_mcc_wrb wrb = {0};
	struct be_dma_mem cmd;
3750 3751
	int status;

3752 3753 3754 3755 3756
	memset(&cmd, 0, sizeof(struct be_dma_mem));
	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
	if (!cmd.va)
		return -ENOMEM;
3757

3758
	req = cmd.va;
3759
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3760 3761
			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
			       &wrb, &cmd);
3762
	req->hdr.version = version;
3763
	req->hdr.domain = domain;
3764
	req->desc_count = cpu_to_le32(count);
3765 3766
	memcpy(req->desc, desc, size);

3767 3768 3769 3770
	status = be_cmd_notify_wait(adapter, &wrb);

	if (cmd.va)
		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3771 3772 3773
	return status;
}

3774
/* Mark all fields invalid */
3775
static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788
{
	memset(nic, 0, sizeof(*nic));
	nic->unicast_mac_count = 0xFFFF;
	nic->mcc_count = 0xFFFF;
	nic->vlan_count = 0xFFFF;
	nic->mcast_mac_count = 0xFFFF;
	nic->txq_count = 0xFFFF;
	nic->rq_count = 0xFFFF;
	nic->rssq_count = 0xFFFF;
	nic->lro_count = 0xFFFF;
	nic->cq_count = 0xFFFF;
	nic->toe_conn_count = 0xFFFF;
	nic->eq_count = 0xFFFF;
3789
	nic->iface_count = 0xFFFF;
3790
	nic->link_param = 0xFF;
3791
	nic->channel_id_param = cpu_to_le16(0xF000);
3792 3793
	nic->acpi_params = 0xFF;
	nic->wol_param = 0x0F;
3794 3795
	nic->tunnel_iface_count = 0xFFFF;
	nic->direct_tenant_iface_count = 0xFFFF;
3796
	nic->bw_min = 0xFFFFFFFF;
3797 3798 3799
	nic->bw_max = 0xFFFFFFFF;
}

3800 3801 3802 3803 3804 3805 3806 3807 3808 3809
/* Mark all fields invalid */
static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
{
	memset(pcie, 0, sizeof(*pcie));
	pcie->sriov_state = 0xFF;
	pcie->pf_state = 0xFF;
	pcie->pf_type = 0xFF;
	pcie->num_vfs = 0xFFFF;
}

3810 3811
int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
		      u8 domain)
3812
{
3813 3814 3815 3816 3817 3818
	struct be_nic_res_desc nic_desc;
	u32 bw_percent;
	u16 version = 0;

	if (BE3_chip(adapter))
		return be_cmd_set_qos(adapter, max_rate / 10, domain);
3819

3820 3821 3822
	be_reset_nic_desc(&nic_desc);
	nic_desc.pf_num = adapter->pf_number;
	nic_desc.vf_num = domain;
3823
	nic_desc.bw_min = 0;
3824
	if (lancer_chip(adapter)) {
3825 3826 3827 3828
		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
					(1 << NOSV_SHIFT);
3829
		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3830
	} else {
3831 3832 3833 3834 3835 3836
		version = 1;
		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
		nic_desc.bw_max = cpu_to_le32(bw_percent);
3837
	}
3838 3839 3840

	return be_cmd_set_profile_config(adapter, &nic_desc,
					 nic_desc.hdr.desc_len,
3841 3842 3843
					 1, version, domain);
}

3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903
static void be_fill_vf_res_template(struct be_adapter *adapter,
				    struct be_resources pool_res,
				    u16 num_vfs, u16 num_vf_qs,
				    struct be_nic_res_desc *nic_vft)
{
	u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
	struct be_resources res_mod = {0};

	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
	 * which are modifiable using SET_PROFILE_CONFIG cmd.
	 */
	be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);

	/* If RSS IFACE capability flags are modifiable for a VF, set the
	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
	 * more than 1 RSSQ is available for a VF.
	 * Otherwise, provision only 1 queue pair for VF.
	 */
	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
		if (num_vf_qs > 1) {
			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
			if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
		} else {
			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
					     BE_IF_FLAGS_DEFQ_RSS);
		}

		nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
	} else {
		num_vf_qs = 1;
	}

	nic_vft->rq_count = cpu_to_le16(num_vf_qs);
	nic_vft->txq_count = cpu_to_le16(num_vf_qs);
	nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
	nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
					(num_vfs + 1));

	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
	 * among the PF and it's VFs, if the fields are changeable
	 */
	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
		nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
							 (num_vfs + 1));

	if (res_mod.max_vlans == FIELD_MODIFIABLE)
		nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
						  (num_vfs + 1));

	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
		nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
						   (num_vfs + 1));

	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
		nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
						 (num_vfs + 1));
}

3904
int be_cmd_set_sriov_config(struct be_adapter *adapter,
3905 3906
			    struct be_resources pool_res, u16 num_vfs,
			    u16 num_vf_qs)
3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
{
	struct {
		struct be_pcie_res_desc pcie;
		struct be_nic_res_desc nic_vft;
	} __packed desc;

	/* PF PCIE descriptor */
	be_reset_pcie_desc(&desc.pcie);
	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3917
	desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3918 3919 3920 3921 3922 3923 3924 3925
	desc.pcie.pf_num = adapter->pdev->devfn;
	desc.pcie.sriov_state = num_vfs ? 1 : 0;
	desc.pcie.num_vfs = cpu_to_le16(num_vfs);

	/* VF NIC Template descriptor */
	be_reset_nic_desc(&desc.nic_vft);
	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3926
	desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3927 3928 3929
	desc.nic_vft.pf_num = adapter->pdev->devfn;
	desc.nic_vft.vf_num = 0;

3930 3931
	be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
				&desc.nic_vft);
3932 3933 3934

	return be_cmd_set_profile_config(adapter, &desc,
					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985
}

int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_manage_iface_filters *req;
	int status;

	if (iface == 0xFFFFFFFF)
		return -1;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
			       wrb, NULL);
	req->op = op;
	req->target_iface_id = cpu_to_le32(iface);

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
{
	struct be_port_res_desc port_desc;

	memset(&port_desc, 0, sizeof(port_desc));
	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
	port_desc.link_num = adapter->hba_port_num;
	if (port) {
		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
					(1 << RCVID_SHIFT);
		port_desc.nv_port = swab16(port);
	} else {
		port_desc.nv_flags = NV_TYPE_DISABLED;
		port_desc.nv_port = 0;
	}

	return be_cmd_set_profile_config(adapter, &port_desc,
3986
					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3987 3988
}

3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
		     int vf_num)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_get_iface_list *req;
	struct be_cmd_resp_get_iface_list *resp;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
			       wrb, NULL);
	req->hdr.domain = vf_num + 1;

	status = be_mcc_notify_wait(adapter);
	if (!status) {
		resp = (struct be_cmd_resp_get_iface_list *)req;
		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
	}

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
static int lancer_wait_idle(struct be_adapter *adapter)
{
#define SLIPORT_IDLE_TIMEOUT 30
	u32 reg_val;
	int status = 0, i;

	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
			break;

		ssleep(1);
	}

	if (i == SLIPORT_IDLE_TIMEOUT)
		status = -1;

	return status;
}

int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
{
	int status = 0;

	status = lancer_wait_idle(adapter);
	if (status)
		return status;

	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);

	return status;
}

/* Routine to check whether dump image is present or not */
bool dump_present(struct be_adapter *adapter)
{
	u32 sliport_status = 0;

	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
}

int lancer_initiate_dump(struct be_adapter *adapter)
{
4066
	struct device *dev = &adapter->pdev->dev;
4067 4068
	int status;

4069 4070 4071 4072 4073
	if (dump_present(adapter)) {
		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
		return -EEXIST;
	}

4074 4075 4076 4077
	/* give firmware reset and diagnostic dump */
	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
				     PHYSDEV_CONTROL_DD_MASK);
	if (status < 0) {
4078
		dev_err(dev, "FW reset failed\n");
4079 4080 4081 4082 4083 4084 4085 4086
		return status;
	}

	status = lancer_wait_idle(adapter);
	if (status)
		return status;

	if (!dump_present(adapter)) {
4087 4088
		dev_err(dev, "FW dump not generated\n");
		return -EIO;
4089 4090 4091 4092 4093
	}

	return 0;
}

4094 4095 4096 4097 4098 4099 4100 4101
int lancer_delete_dump(struct be_adapter *adapter)
{
	int status;

	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
	return be_cmd_status(status);
}

4102 4103 4104 4105 4106 4107 4108
/* Uses sync mcc */
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_enable_disable_vf *req;
	int status;

4109
	if (BEx_chip(adapter))
4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133
		return 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
			       wrb, NULL);

	req->hdr.domain = domain;
	req->enable = 1;
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158
int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_intr_set *req;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
			       wrb, NULL);

	req->intr_enabled = intr_enable;

	status = be_mbox_notify_wait(adapter);

	mutex_unlock(&adapter->mbox_lock);
	return status;
}

4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184
/* Uses MBOX */
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
{
	struct be_cmd_req_get_active_profile *req;
	struct be_mcc_wrb *wrb;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
			       wrb, NULL);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_get_active_profile *resp =
							embedded_payload(wrb);
4185

4186 4187 4188 4189 4190 4191 4192 4193
		*profile_id = le16_to_cpu(resp->active_profile_id);
	}

err:
	mutex_unlock(&adapter->mbox_lock);
	return status;
}

4194 4195 4196 4197 4198 4199 4200 4201
int be_cmd_set_logical_link_config(struct be_adapter *adapter,
				   int link_state, u8 domain)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_set_ll_link *req;
	int status;

	if (BEx_chip(adapter) || lancer_chip(adapter))
4202
		return -EOPNOTSUPP;
4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}

	req = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
			       sizeof(*req), wrb, NULL);

	req->hdr.version = 1;
	req->hdr.domain = domain;

	if (link_state == IFLA_VF_LINK_STATE_ENABLE)
		req->link_config |= 1;

	if (link_state == IFLA_VF_LINK_STATE_AUTO)
		req->link_config |= 1 << PLINK_TRACK_SHIFT;

	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

4233
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4234
		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4235 4236 4237
{
	struct be_adapter *adapter = netdev_priv(netdev_handle);
	struct be_mcc_wrb *wrb;
4238
	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269
	struct be_cmd_req_hdr *req;
	struct be_cmd_resp_hdr *resp;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);
	resp = embedded_payload(wrb);

	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
			       hdr->opcode, wrb_payload_size, wrb, NULL);
	memcpy(req, wrb_payload, wrb_payload_size);
	be_dws_cpu_to_le(req, wrb_payload_size);

	status = be_mcc_notify_wait(adapter);
	if (cmd_status)
		*cmd_status = (status & 0xffff);
	if (ext_status)
		*ext_status = 0;
	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);