ivpu_drv.h 5.88 KB
Newer Older
1 2 3 4 5 6 7 8 9
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2020-2023 Intel Corporation
 */

#ifndef __IVPU_DRV_H__
#define __IVPU_DRV_H__

#include <drm/drm_device.h>
10
#include <drm/drm_drv.h>
11 12 13 14 15 16 17 18
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_print.h>

#include <linux/pci.h>
#include <linux/xarray.h>
#include <uapi/drm/ivpu_accel.h>

19
#include "ivpu_mmu_context.h"
20
#include "ivpu_ipc.h"
21

22
#define DRIVER_NAME "intel_vpu"
23
#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
24 25 26
#define DRIVER_DATE "20230117"

#define PCI_DEVICE_ID_MTL   0x7d1d
27
#define PCI_DEVICE_ID_ARL   0xad1d
28
#define PCI_DEVICE_ID_LNL   0x643e
29

30
#define IVPU_HW_37XX	37
31
#define IVPU_HW_40XX	40
32

33 34 35 36 37
#define IVPU_GLOBAL_CONTEXT_MMU_SSID   0
/* SSID 1 is used by the VPU to represent reserved context */
#define IVPU_RESERVED_CONTEXT_MMU_SSID 1
#define IVPU_USER_CONTEXT_MIN_SSID     2
#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 63)
38

39 40 41
#define IVPU_MIN_DB 1
#define IVPU_MAX_DB 255

42
#define IVPU_NUM_ENGINES 2
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS  2
#define IVPU_PLATFORM_FPGA    3
#define IVPU_PLATFORM_INVALID 8

#define IVPU_DBG_REG	 BIT(0)
#define IVPU_DBG_IRQ	 BIT(1)
#define IVPU_DBG_MMU	 BIT(2)
#define IVPU_DBG_FILE	 BIT(3)
#define IVPU_DBG_MISC	 BIT(4)
#define IVPU_DBG_FW_BOOT BIT(5)
#define IVPU_DBG_PM	 BIT(6)
#define IVPU_DBG_IPC	 BIT(7)
#define IVPU_DBG_BO	 BIT(8)
#define IVPU_DBG_JOB	 BIT(9)
#define IVPU_DBG_JSM	 BIT(10)
#define IVPU_DBG_KREF	 BIT(11)
#define IVPU_DBG_RPM	 BIT(12)
62
#define IVPU_DBG_MMU_MAP BIT(13)
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

#define ivpu_err(vdev, fmt, ...) \
	drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)

#define ivpu_err_ratelimited(vdev, fmt, ...) \
	drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)

#define ivpu_warn(vdev, fmt, ...) \
	drm_warn(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)

#define ivpu_warn_ratelimited(vdev, fmt, ...) \
	drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)

#define ivpu_info(vdev, fmt, ...) drm_info(&(vdev)->drm, fmt, ##__VA_ARGS__)

#define ivpu_dbg(vdev, type, fmt, args...) do {                                \
	if (unlikely(IVPU_DBG_##type & ivpu_dbg_mask))                         \
		dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args);          \
} while (0)

#define IVPU_WA(wa_name) (vdev->wa.wa_name)

85 86 87 88 89
#define IVPU_PRINT_WA(wa_name) do {					\
	if (IVPU_WA(wa_name))						\
		ivpu_dbg(vdev, MISC, "Using WA: " #wa_name "\n");	\
} while (0)

90 91 92
struct ivpu_wa_table {
	bool punit_disabled;
	bool clear_runtime_mem;
93
	bool d3hot_after_power_off;
94
	bool interrupt_clear_with_0;
95
	bool disable_clock_relinquish;
96
	bool disable_d0i3_msg;
97 98 99
};

struct ivpu_hw_info;
100
struct ivpu_mmu_info;
101
struct ivpu_fw_info;
102
struct ivpu_ipc_info;
103
struct ivpu_pm_info;
104 105 106 107 108 109 110 111 112 113

struct ivpu_device {
	struct drm_device drm;
	void __iomem *regb;
	void __iomem *regv;
	u32 platform;
	u32 irq;

	struct ivpu_wa_table wa;
	struct ivpu_hw_info *hw;
114
	struct ivpu_mmu_info *mmu;
115
	struct ivpu_fw_info *fw;
116
	struct ivpu_ipc_info *ipc;
117
	struct ivpu_pm_info *pm;
118

119
	struct ivpu_mmu_context gctx;
120
	struct ivpu_mmu_context rctx;
121
	struct mutex context_list_lock; /* Protects user context addition/removal */
122 123 124
	struct xarray context_xa;
	struct xa_limit context_xa_limit;

125 126
	struct xarray db_xa;

127 128 129
	struct mutex bo_list_lock; /* Protects bo_list */
	struct list_head bo_list;

130
	struct xarray submitted_jobs_xa;
131
	struct ivpu_ipc_consumer job_done_consumer;
132

133 134
	atomic64_t unique_id_counter;

135 136 137 138 139
	struct {
		int boot;
		int jsm;
		int tdr;
		int reschedule_suspend;
140
		int autosuspend;
141
		int d0i3_entry_msg;
142 143 144 145 146 147 148 149 150 151
	} timeout;
};

/*
 * file_priv has its own refcount (ref) that allows user space to close the fd
 * without blocking even if VPU is still processing some jobs.
 */
struct ivpu_file_priv {
	struct kref ref;
	struct ivpu_device *vdev;
152 153
	struct mutex lock; /* Protects cmdq */
	struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
154 155
	struct ivpu_mmu_context ctx;
	bool has_mmu_faults;
156
	bool bound;
157 158 159 160 161
};

extern int ivpu_dbg_mask;
extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
162
extern bool ivpu_disable_mmu_cont_pages;
163

164 165 166 167 168
#define IVPU_TEST_MODE_FW_TEST            BIT(0)
#define IVPU_TEST_MODE_NULL_HW            BIT(1)
#define IVPU_TEST_MODE_NULL_SUBMISSION    BIT(2)
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE   BIT(4)
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE    BIT(5)
169 170
extern int ivpu_test_mode;

171 172
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
void ivpu_file_priv_put(struct ivpu_file_priv **link);
173 174

int ivpu_boot(struct ivpu_device *vdev);
175
int ivpu_shutdown(struct ivpu_device *vdev);
176
void ivpu_prepare_for_reset(struct ivpu_device *vdev);
177 178 179 180 181 182 183 184 185 186 187

static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
	return to_pci_dev(vdev->drm.dev)->revision;
}

static inline u16 ivpu_device_id(struct ivpu_device *vdev)
{
	return to_pci_dev(vdev->drm.dev)->device;
}

188 189 190 191
static inline int ivpu_hw_gen(struct ivpu_device *vdev)
{
	switch (ivpu_device_id(vdev)) {
	case PCI_DEVICE_ID_MTL:
192
	case PCI_DEVICE_ID_ARL:
193
		return IVPU_HW_37XX;
194 195
	case PCI_DEVICE_ID_LNL:
		return IVPU_HW_40XX;
196
	default:
197
		ivpu_err(vdev, "Unknown NPU device\n");
198 199 200 201
		return 0;
	}
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
{
	return container_of(dev, struct ivpu_device, drm);
}

static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
{
	struct xa_limit ctx_limit = vdev->context_xa_limit;

	return (ctx_limit.max - ctx_limit.min + 1);
}

static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
{
	WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
	return vdev->platform;
}

static inline bool ivpu_is_silicon(struct ivpu_device *vdev)
{
	return ivpu_get_platform(vdev) == IVPU_PLATFORM_SILICON;
}

static inline bool ivpu_is_simics(struct ivpu_device *vdev)
{
	return ivpu_get_platform(vdev) == IVPU_PLATFORM_SIMICS;
}

static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
{
	return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
}

#endif /* __IVPU_DRV_H__ */