intel_guc_log.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014-2017 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */
24

25 26
#include <linux/debugfs.h>
#include <linux/relay.h>
27 28

#include "intel_guc_log.h"
29 30 31 32 33 34 35
#include "i915_drv.h"

static void guc_log_capture_logs(struct intel_guc *guc);

/**
 * DOC: GuC firmware log
 *
36
 * Firmware log is enabled by setting i915.guc_log_level to the positive level.
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
 * i915_guc_load_status will print out firmware loading status and scratch
 * registers value.
 */

static int guc_log_flush_complete(struct intel_guc *guc)
{
	u32 action[] = {
		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

static int guc_log_flush(struct intel_guc *guc)
{
	u32 action[] = {
		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
		0
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

61
static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
62
{
63
	union guc_log_control control_val = {
64 65 66 67
		{
			.logging_enabled = enable,
			.verbosity = verbosity,
		},
68
	};
69 70
	u32 action[] = {
		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
71
		control_val.value
72 73 74 75 76 77 78 79 80 81 82 83 84 85
	};

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}

/*
 * Sub buffer switch callback. Called whenever relay has to switch to a new
 * sub buffer, relay stays on the same sub buffer if 0 is returned.
 */
static int subbuf_start_callback(struct rchan_buf *buf,
				 void *subbuf,
				 void *prev_subbuf,
				 size_t prev_padding)
{
86 87
	/*
	 * Use no-overwrite mode by default, where relay will stop accepting
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	 * new data if there are no empty sub buffers left.
	 * There is no strict synchronization enforced by relay between Consumer
	 * and Producer. In overwrite mode, there is a possibility of getting
	 * inconsistent/garbled data, the producer could be writing on to the
	 * same sub buffer from which Consumer is reading. This can't be avoided
	 * unless Consumer is fast enough and can always run in tandem with
	 * Producer.
	 */
	if (relay_buf_full(buf))
		return 0;

	return 1;
}

/*
 * file_create() callback. Creates relay file in debugfs.
 */
static struct dentry *create_buf_file_callback(const char *filename,
					       struct dentry *parent,
					       umode_t mode,
					       struct rchan_buf *buf,
					       int *is_global)
{
	struct dentry *buf_file;

113 114
	/*
	 * This to enable the use of a single buffer for the relay channel and
115 116 117 118 119 120 121 122 123
	 * correspondingly have a single file exposed to User, through which
	 * it can collect the logs in order without any post-processing.
	 * Need to set 'is_global' even if parent is NULL for early logging.
	 */
	*is_global = 1;

	if (!parent)
		return NULL;

124 125
	/*
	 * Not using the channel filename passed as an argument, since for each
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	 * channel relay appends the corresponding CPU number to the filename
	 * passed in relay_open(). This should be fine as relay just needs a
	 * dentry of the file associated with the channel buffer and that file's
	 * name need not be same as the filename passed as an argument.
	 */
	buf_file = debugfs_create_file("guc_log", mode,
				       parent, buf, &relay_file_operations);
	return buf_file;
}

/*
 * file_remove() default callback. Removes relay file in debugfs.
 */
static int remove_buf_file_callback(struct dentry *dentry)
{
	debugfs_remove(dentry);
	return 0;
}

/* relay channel callbacks */
static struct rchan_callbacks relay_callbacks = {
	.subbuf_start = subbuf_start_callback,
	.create_buf_file = create_buf_file_callback,
	.remove_buf_file = remove_buf_file_callback,
};

152
static int guc_log_relay_file_create(struct intel_guc *guc)
153 154 155 156 157
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct dentry *log_dir;
	int ret;

158
	if (!i915_modparams.guc_log_level)
159 160
		return 0;

161 162
	mutex_lock(&guc->log.runtime.relay_lock);

163 164 165
	/* For now create the log file in /sys/kernel/debug/dri/0 dir */
	log_dir = dev_priv->drm.primary->debugfs_root;

166 167
	/*
	 * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
168 169 170 171 172 173 174 175 176 177 178 179
	 * not mounted and so can't create the relay file.
	 * The relay API seems to fit well with debugfs only, for availing relay
	 * there are 3 requirements which can be met for debugfs file only in a
	 * straightforward/clean manner :-
	 * i)   Need the associated dentry pointer of the file, while opening the
	 *      relay channel.
	 * ii)  Should be able to use 'relay_file_operations' fops for the file.
	 * iii) Set the 'i_private' field of file's inode to the pointer of
	 *	relay channel buffer.
	 */
	if (!log_dir) {
		DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
180 181
		ret = -ENODEV;
		goto out_unlock;
182 183
	}

184
	ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
185
	if (ret < 0 && ret != -EEXIST) {
186
		DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
187
		goto out_unlock;
188 189
	}

190 191
	ret = 0;

192 193 194 195 196 197 198 199 200 201
out_unlock:
	mutex_unlock(&guc->log.runtime.relay_lock);
	return ret;
}

static bool guc_log_has_relay(struct intel_guc *guc)
{
	lockdep_assert_held(&guc->log.runtime.relay_lock);

	return guc->log.runtime.relay_chan != NULL;
202 203 204 205
}

static void guc_move_to_next_buf(struct intel_guc *guc)
{
206 207
	/*
	 * Make sure the updates made in the sub buffer are visible when
208 209 210 211
	 * Consumer sees the following update to offset inside the sub buffer.
	 */
	smp_wmb();

212 213 214
	if (!guc_log_has_relay(guc))
		return;

215
	/* All data has been written, so now move the offset of sub buffer. */
216
	relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
217 218

	/* Switch to the next sub buffer */
219
	relay_flush(guc->log.runtime.relay_chan);
220 221 222 223
}

static void *guc_get_write_buffer(struct intel_guc *guc)
{
224
	if (!guc_log_has_relay(guc))
225 226
		return NULL;

227 228
	/*
	 * Just get the base address of a new sub buffer and copy data into it
229 230 231 232 233 234 235
	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
	 * buffers are full. Could have used the relay_write() to indirectly
	 * copy the data, but that would have been bit convoluted, as we need to
	 * write to only certain locations inside a sub buffer which cannot be
	 * done without using relay_reserve() along with relay_write(). So its
	 * better to use relay_reserve() alone.
	 */
236
	return relay_reserve(guc->log.runtime.relay_chan, 0);
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
}

static bool guc_check_log_buf_overflow(struct intel_guc *guc,
				       enum guc_log_buffer_type type,
				       unsigned int full_cnt)
{
	unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
	bool overflow = false;

	if (full_cnt != prev_full_cnt) {
		overflow = true;

		guc->log.prev_overflow_count[type] = full_cnt;
		guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;

		if (full_cnt < prev_full_cnt) {
			/* buffer_full_cnt is a 4 bit counter */
			guc->log.total_overflow_count[type] += 16;
		}
		DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
	}

	return overflow;
}

static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
	switch (type) {
	case GUC_ISR_LOG_BUFFER:
		return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
	case GUC_DPC_LOG_BUFFER:
		return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
	case GUC_CRASH_DUMP_LOG_BUFFER:
		return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
	default:
		MISSING_CASE(type);
	}

	return 0;
}

static void guc_read_update_log_buffer(struct intel_guc *guc)
{
	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
	struct guc_log_buffer_state log_buf_state_local;
	enum guc_log_buffer_type type;
	void *src_data, *dst_data;
	bool new_overflow;

287
	if (WARN_ON(!guc->log.runtime.buf_addr))
288 289 290
		return;

	/* Get the pointer to shared GuC log buffer */
291
	log_buf_state = src_data = guc->log.runtime.buf_addr;
292

293 294
	mutex_lock(&guc->log.runtime.relay_lock);

295 296 297
	/* Get the pointer to local buffer to store the logs */
	log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);

298
	if (unlikely(!log_buf_snapshot_state)) {
299 300
		/*
		 * Used rate limited to avoid deluge of messages, logs might be
301 302 303 304 305 306 307 308 309
		 * getting consumed by User at a slow rate.
		 */
		DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
		guc->log.capture_miss_count++;
		mutex_unlock(&guc->log.runtime.relay_lock);

		return;
	}

310 311 312 313 314
	/* Actual logs are present from the 2nd page */
	src_data += PAGE_SIZE;
	dst_data += PAGE_SIZE;

	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
315 316
		/*
		 * Make a copy of the state structure, inside GuC log buffer
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
		 * (which is uncached mapped), on the stack to avoid reading
		 * from it multiple times.
		 */
		memcpy(&log_buf_state_local, log_buf_state,
		       sizeof(struct guc_log_buffer_state));
		buffer_size = guc_get_log_buffer_size(type);
		read_offset = log_buf_state_local.read_ptr;
		write_offset = log_buf_state_local.sampled_write_ptr;
		full_cnt = log_buf_state_local.buffer_full_cnt;

		/* Bookkeeping stuff */
		guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
		new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);

		/* Update the state of shared log buffer */
		log_buf_state->read_ptr = write_offset;
		log_buf_state->flush_to_file = 0;
		log_buf_state++;

		/* First copy the state structure in snapshot buffer */
		memcpy(log_buf_snapshot_state, &log_buf_state_local,
		       sizeof(struct guc_log_buffer_state));

340 341
		/*
		 * The write pointer could have been updated by GuC firmware,
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
		 * after sending the flush interrupt to Host, for consistency
		 * set write pointer value to same value of sampled_write_ptr
		 * in the snapshot buffer.
		 */
		log_buf_snapshot_state->write_ptr = write_offset;
		log_buf_snapshot_state++;

		/* Now copy the actual logs. */
		if (unlikely(new_overflow)) {
			/* copy the whole buffer in case of overflow */
			read_offset = 0;
			write_offset = buffer_size;
		} else if (unlikely((read_offset > buffer_size) ||
				    (write_offset > buffer_size))) {
			DRM_ERROR("invalid log buffer state\n");
			/* copy whole buffer as offsets are unreliable */
			read_offset = 0;
			write_offset = buffer_size;
		}

		/* Just copy the newly written data */
		if (read_offset > write_offset) {
			i915_memcpy_from_wc(dst_data, src_data, write_offset);
			bytes_to_copy = buffer_size - read_offset;
		} else {
			bytes_to_copy = write_offset - read_offset;
		}
		i915_memcpy_from_wc(dst_data + read_offset,
				    src_data + read_offset, bytes_to_copy);

		src_data += buffer_size;
		dst_data += buffer_size;
	}

376 377 378
	guc_move_to_next_buf(guc);

	mutex_unlock(&guc->log.runtime.relay_lock);
379 380 381 382 383
}

static void capture_logs_work(struct work_struct *work)
{
	struct intel_guc *guc =
384
		container_of(work, struct intel_guc, log.runtime.flush_work);
385 386 387 388

	guc_log_capture_logs(guc);
}

389
static bool guc_log_has_runtime(struct intel_guc *guc)
390
{
391
	return guc->log.runtime.buf_addr != NULL;
392 393
}

394
static int guc_log_runtime_create(struct intel_guc *guc)
395 396 397
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	void *vaddr;
398
	int ret;
399 400 401

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

402 403 404
	if (!guc->log.vma)
		return -ENODEV;

405
	GEM_BUG_ON(guc_log_has_runtime(guc));
406

407 408 409 410
	ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
	if (ret)
		return ret;

411 412
	/*
	 * Create a WC (Uncached for read) vmalloc mapping of log
413 414 415 416 417 418 419
	 * buffer pages, so that we can directly get the data
	 * (up-to-date) from memory.
	 */
	vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
	if (IS_ERR(vaddr)) {
		DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
		return PTR_ERR(vaddr);
420 421
	}

422
	guc->log.runtime.buf_addr = vaddr;
423

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	return 0;
}

static void guc_log_runtime_destroy(struct intel_guc *guc)
{
	/*
	 * It's possible that the runtime stuff was never allocated because
	 * GuC log was disabled at the boot time.
	 */
	if (!guc_log_has_runtime(guc))
		return;

	i915_gem_object_unpin_map(guc->log.vma->obj);
	guc->log.runtime.buf_addr = NULL;
}

void intel_guc_log_init_early(struct intel_guc *guc)
{
	mutex_init(&guc->log.runtime.relay_lock);
	INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
}

int intel_guc_log_relay_create(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct rchan *guc_log_relay_chan;
	size_t n_subbufs, subbuf_size;
	int ret;

	if (!i915_modparams.guc_log_level)
		return 0;

	mutex_lock(&guc->log.runtime.relay_lock);

	GEM_BUG_ON(guc_log_has_relay(guc));

460
	 /* Keep the size of sub buffers same as shared log buffer */
461
	subbuf_size = GUC_LOG_SIZE;
462

463 464
	/*
	 * Store up to 8 snapshots, which is large enough to buffer sufficient
465 466 467 468 469
	 * boot time logs and provides enough leeway to User, in terms of
	 * latency, for consuming the logs from relay. Also doesn't take
	 * up too much memory.
	 */
	n_subbufs = 8;
470

471 472
	/*
	 * Create a relay channel, so that we have buffers for storing
473 474 475 476 477 478 479
	 * the GuC firmware logs, the channel will be linked with a file
	 * later on when debugfs is registered.
	 */
	guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
					n_subbufs, &relay_callbacks, dev_priv);
	if (!guc_log_relay_chan) {
		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
480

481
		ret = -ENOMEM;
482
		goto err;
483
	}
484

485
	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
486
	guc->log.runtime.relay_chan = guc_log_relay_chan;
487

488 489
	mutex_unlock(&guc->log.runtime.relay_lock);

490
	return 0;
491

492 493 494 495
err:
	mutex_unlock(&guc->log.runtime.relay_lock);
	/* logging will be off */
	i915_modparams.guc_log_level = 0;
496 497
	return ret;
}
498

499
void intel_guc_log_relay_destroy(struct intel_guc *guc)
500
{
501 502
	mutex_lock(&guc->log.runtime.relay_lock);

503
	/*
504
	 * It's possible that the relay was never allocated because
505 506
	 * GuC log was disabled at the boot time.
	 */
507 508
	if (!guc_log_has_relay(guc))
		goto out_unlock;
509

510
	relay_close(guc->log.runtime.relay_chan);
511 512 513 514
	guc->log.runtime.relay_chan = NULL;

out_unlock:
	mutex_unlock(&guc->log.runtime.relay_lock);
515 516 517 518 519 520 521
}

static int guc_log_late_setup(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

522
	if (!guc_log_has_runtime(guc)) {
523 524 525 526
		/*
		 * If log was disabled at boot time, then setup needed to handle
		 * log buffer flush interrupts would not have been done yet, so
		 * do that now.
527
		 */
528
		ret = intel_guc_log_relay_create(guc);
529 530
		if (ret)
			goto err;
531 532 533 534 535 536 537 538 539

		mutex_lock(&dev_priv->drm.struct_mutex);
		intel_runtime_pm_get(dev_priv);
		ret = guc_log_runtime_create(guc);
		intel_runtime_pm_put(dev_priv);
		mutex_unlock(&dev_priv->drm.struct_mutex);

		if (ret)
			goto err_relay;
540
	}
541

542
	ret = guc_log_relay_file_create(guc);
543
	if (ret)
544
		goto err_runtime;
545 546

	return 0;
547

548
err_runtime:
549
	mutex_lock(&dev_priv->drm.struct_mutex);
550
	guc_log_runtime_destroy(guc);
551 552 553
	mutex_unlock(&dev_priv->drm.struct_mutex);
err_relay:
	intel_guc_log_relay_destroy(guc);
554 555
err:
	/* logging will remain off */
556
	i915_modparams.guc_log_level = 0;
557 558 559 560 561 562 563 564 565
	return ret;
}

static void guc_log_capture_logs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	guc_read_update_log_buffer(guc);

566 567
	/*
	 * Generally device is expected to be active only at this
568 569 570 571 572 573 574 575 576 577 578
	 * time, so get/put should be really quick.
	 */
	intel_runtime_pm_get(dev_priv);
	guc_log_flush_complete(guc);
	intel_runtime_pm_put(dev_priv);
}

static void guc_flush_logs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

579
	if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
580 581 582
		return;

	/* First disable the interrupts, will be renabled afterwards */
583 584
	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_runtime_pm_get(dev_priv);
585
	gen9_disable_guc_interrupts(dev_priv);
586 587
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);
588

589 590
	/*
	 * Before initiating the forceful flush, wait for any pending/ongoing
591 592
	 * flush to complete otherwise forceful flush may not actually happen.
	 */
593
	flush_work(&guc->log.runtime.flush_work);
594 595

	/* Ask GuC to update the log buffer state */
596
	intel_runtime_pm_get(dev_priv);
597
	guc_log_flush(guc);
598
	intel_runtime_pm_put(dev_priv);
599 600 601 602 603

	/* GuC would have updated log buffer by now, so capture it */
	guc_log_capture_logs(guc);
}

604 605 606 607
int intel_guc_log_create(struct intel_guc *guc)
{
	struct i915_vma *vma;
	unsigned long offset;
608
	u32 flags;
609 610 611 612
	int ret;

	GEM_BUG_ON(guc->log.vma);

613 614
	/*
	 * We require SSE 4.1 for fast reads from the GuC log buffer and
615 616 617 618 619 620 621 622
	 * it should be present on the chipsets supporting GuC based
	 * submisssions.
	 */
	if (WARN_ON(!i915_has_memcpy_from_wc())) {
		ret = -EINVAL;
		goto err;
	}

623
	vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
624 625 626 627 628 629 630
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err;
	}

	guc->log.vma = vma;

631
	if (i915_modparams.guc_log_level) {
632
		ret = guc_log_runtime_create(guc);
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
		if (ret < 0)
			goto err_vma;
	}

	/* each allocated unit is a page */
	flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
		(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
		(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
		(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);

	offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
	guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;

	return 0;

err_vma:
	i915_vma_unpin_and_release(&guc->log.vma);
err:
	/* logging will be off */
652
	i915_modparams.guc_log_level = 0;
653 654 655 656 657
	return ret;
}

void intel_guc_log_destroy(struct intel_guc *guc)
{
658
	guc_log_runtime_destroy(guc);
659 660 661
	i915_vma_unpin_and_release(&guc->log.vma);
}

662
int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
663
{
664
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
665 666
	bool enable_logging = control_val > 0;
	u32 verbosity;
667 668
	int ret;

669 670 671
	if (!guc->log.vma)
		return -ENODEV;

672 673
	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
	if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
674 675 676
		return -EINVAL;

	/* This combination doesn't make sense & won't have any effect */
677
	if (!enable_logging && !i915_modparams.guc_log_level)
678 679
		return 0;

680
	verbosity = enable_logging ? control_val - 1 : 0;
681 682 683 684 685

	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
	if (ret)
		return ret;
	intel_runtime_pm_get(dev_priv);
686
	ret = guc_log_control(guc, enable_logging, verbosity);
687 688 689
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

690 691 692 693 694
	if (ret < 0) {
		DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
		return ret;
	}

695 696
	if (enable_logging) {
		i915_modparams.guc_log_level = 1 + verbosity;
697

698 699 700 701
		/*
		 * If log was disabled at boot time, then the relay channel file
		 * wouldn't have been created by now and interrupts also would
		 * not have been enabled. Try again now, just in case.
702
		 */
703
		ret = guc_log_late_setup(guc);
704 705 706 707 708 709
		if (ret < 0) {
			DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
			return ret;
		}

		/* GuC logging is currently the only user of Guc2Host interrupts */
710 711
		mutex_lock(&dev_priv->drm.struct_mutex);
		intel_runtime_pm_get(dev_priv);
712
		gen9_enable_guc_interrupts(dev_priv);
713 714
		intel_runtime_pm_put(dev_priv);
		mutex_unlock(&dev_priv->drm.struct_mutex);
715
	} else {
716 717
		/*
		 * Once logging is disabled, GuC won't generate logs & send an
718 719 720 721 722 723 724
		 * interrupt. But there could be some data in the log buffer
		 * which is yet to be captured. So request GuC to update the log
		 * buffer state and then collect the left over logs.
		 */
		guc_flush_logs(guc);

		/* As logging is disabled, update log level to reflect that */
725
		i915_modparams.guc_log_level = 0;
726 727 728 729 730 731 732
	}

	return ret;
}

void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
733
	if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
734 735 736 737 738 739 740
		return;

	guc_log_late_setup(&dev_priv->guc);
}

void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
741 742
	struct intel_guc *guc = &dev_priv->guc;

743
	if (!USES_GUC_SUBMISSION(dev_priv))
744 745 746
		return;

	mutex_lock(&dev_priv->drm.struct_mutex);
747
	/* GuC logging is currently the only user of Guc2Host interrupts */
748
	intel_runtime_pm_get(dev_priv);
749
	gen9_disable_guc_interrupts(dev_priv);
750 751
	intel_runtime_pm_put(dev_priv);

752
	guc_log_runtime_destroy(guc);
753
	mutex_unlock(&dev_priv->drm.struct_mutex);
754 755

	intel_guc_log_relay_destroy(guc);
756
}