Commit 805446c8 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Introduce concept of a sub-platform

Concept of a sub-platform already exist in our code (like ULX and ULT
platform variants and similar),implemented via the macros which check a
list of device ids to determine a match.

With this patch we consolidate device ids checking into a single function
called during early driver load.

A few low bits in the platform mask are reserved for sub-platform
identification and defined as a per-platform namespace.

At the same time it future proofs the platform_mask handling by preparing
the code for easy extending, and tidies the very verbose WARN strings
generated when IS_PLATFORM macros are embedded into a WARN type
statements.

v2: Fixed IS_SUBPLATFORM. Updated commit msg.
v3: Chris was right, there is an ordering problem.

v4:
 * Catch-up with new sub-platforms.
 * Rebase for RUNTIME_INFO.
 * Drop subplatform mask union tricks and convert platform_mask to an
   array for extensibility.

v5:
 * Fix subplatform check.
 * Protect against forgetting to expand subplatform bits.
 * Remove platform enum tallying.
 * Add subplatform to error state. (Chris)
 * Drop macros and just use static inlines.
 * Remove redundant IRONLAKE_M. (Ville)

v6:
 * Split out Ironlake change.
 * Optimize subplatform check.
 * Use __always_inline. (Lucas)
 * Add platform_mask comment. (Paulo)
 * Pass stored runtime info in error capture. (Chris)

v7:
 * Rebased for new AML ULX device id.
 * Bump platform mask array size for EHL.
 * Stop mentioning device ids in intel_device_subplatform_init by using
   the trick of splitting macros i915_pciids.h. (Jani)
 * AML seems to be either a subplatform of KBL or CFL so express it like
   that.

v8:
 * Use one device id table per subplatform. (Jani)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Jose Souza <jose.souza@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190327142328.31780-1-tvrtko.ursulin@linux.intel.com
parent 4ae61358
......@@ -868,6 +868,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
......@@ -1760,10 +1762,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(INTEL_INFO(dev_priv)->platform),
intel_subplatform(RUNTIME_INFO(dev_priv),
INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
......@@ -1806,8 +1810,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(device_info, match_info, sizeof(*device_info));
RUNTIME_INFO(i915)->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
BITS_PER_TYPE(device_info->platform_mask));
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
......
......@@ -2302,7 +2302,67 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
{
const unsigned int pbits =
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
/* Expand the platform_mask array if this fails. */
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
pbits * ARRAY_SIZE(info->platform_mask));
return p / pbits;
}
static __always_inline unsigned int
__platform_mask_bit(const struct intel_runtime_info *info,
enum intel_platform p)
{
const unsigned int pbits =
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
return p % pbits + INTEL_SUBPLATFORM_BITS;
}
static inline u32
intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
}
static __always_inline bool
IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
{
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
const unsigned int pi = __platform_mask_index(info, p);
const unsigned int pb = __platform_mask_bit(info, p);
BUILD_BUG_ON(!__builtin_constant_p(p));
return info->platform_mask[pi] & BIT(pb);
}
static __always_inline bool
IS_SUBPLATFORM(const struct drm_i915_private *i915,
enum intel_platform p, unsigned int s)
{
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
const unsigned int pi = __platform_mask_index(info, p);
const unsigned int pb = __platform_mask_bit(info, p);
const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
const u32 mask = info->platform_mask[pi];
BUILD_BUG_ON(!__builtin_constant_p(p));
BUILD_BUG_ON(!__builtin_constant_p(s));
BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
/* Shift and test on the MSB position so sign flag can be used. */
return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
}
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
......@@ -2341,43 +2401,32 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
(INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
#define IS_BDW_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
INTEL_DEVID(dev_priv) == 0x1913 || \
INTEL_DEVID(dev_priv) == 0x1916 || \
INTEL_DEVID(dev_priv) == 0x1921 || \
INTEL_DEVID(dev_priv) == 0x1926)
#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
INTEL_DEVID(dev_priv) == 0x1915 || \
INTEL_DEVID(dev_priv) == 0x191E)
#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
INTEL_DEVID(dev_priv) == 0x5913 || \
INTEL_DEVID(dev_priv) == 0x5916 || \
INTEL_DEVID(dev_priv) == 0x5921 || \
INTEL_DEVID(dev_priv) == 0x5926)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
INTEL_DEVID(dev_priv) == 0x87C0 || \
INTEL_DEVID(dev_priv) == 0x87CA)
#define IS_HSW_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
#define IS_SKL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_SKL_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
#define IS_KBL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KBL_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
#define IS_AML_ULX(dev_priv) \
(IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
......@@ -2388,16 +2437,16 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
INTEL_DEVID(dev_priv) != 0x8A51)
#define IS_CNL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ICL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
......
......@@ -677,6 +677,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
err_printf(m, "Subplatform: 0x%x\n",
intel_subplatform(&error->runtime_info,
error->device_info.platform));
err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
......
......@@ -32,7 +32,7 @@
#include "i915_globals.h"
#include "i915_selftest.h"
#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define I845_PIPE_OFFSETS \
......
......@@ -714,6 +714,99 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return 0;
}
#undef INTEL_VGA_DEVICE
#define INTEL_VGA_DEVICE(id, info) (id)
static const u16 subplatform_ult_ids[] = {
INTEL_HSW_ULT_GT1_IDS(0),
INTEL_HSW_ULT_GT2_IDS(0),
INTEL_HSW_ULT_GT3_IDS(0),
INTEL_BDW_ULT_GT1_IDS(0),
INTEL_BDW_ULT_GT2_IDS(0),
INTEL_BDW_ULT_GT3_IDS(0),
INTEL_BDW_ULT_RSVD_IDS(0),
INTEL_SKL_ULT_GT1_IDS(0),
INTEL_SKL_ULT_GT2_IDS(0),
INTEL_SKL_ULT_GT3_IDS(0),
INTEL_KBL_ULT_GT1_IDS(0),
INTEL_KBL_ULT_GT2_IDS(0),
INTEL_KBL_ULT_GT3_IDS(0),
INTEL_CFL_U_GT2_IDS(0),
INTEL_CFL_U_GT3_IDS(0),
INTEL_WHL_U_GT1_IDS(0),
INTEL_WHL_U_GT2_IDS(0),
INTEL_WHL_U_GT3_IDS(0)
};
static const u16 subplatform_ulx_ids[] = {
INTEL_HSW_ULX_GT1_IDS(0),
INTEL_HSW_ULX_GT2_IDS(0),
INTEL_BDW_ULX_GT1_IDS(0),
INTEL_BDW_ULX_GT2_IDS(0),
INTEL_BDW_ULX_GT3_IDS(0),
INTEL_BDW_ULX_RSVD_IDS(0),
INTEL_SKL_ULX_GT1_IDS(0),
INTEL_SKL_ULX_GT2_IDS(0),
INTEL_KBL_ULX_GT1_IDS(0),
INTEL_KBL_ULX_GT2_IDS(0)
};
static const u16 subplatform_aml_ids[] = {
INTEL_AML_KBL_GT2_IDS(0),
INTEL_AML_CFL_GT2_IDS(0)
};
static const u16 subplatform_portf_ids[] = {
INTEL_CNL_PORT_F_IDS(0),
INTEL_ICL_PORT_F_IDS(0)
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
for (; num; num--, p++) {
if (*p == id)
return true;
}
return false;
}
void intel_device_info_subplatform_init(struct drm_i915_private *i915)
{
const struct intel_device_info *info = INTEL_INFO(i915);
const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
const unsigned int pi = __platform_mask_index(rinfo, info->platform);
const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
u16 devid = INTEL_DEVID(i915);
u32 mask;
/* Make sure IS_<platform> checks are working. */
RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
/* Find and mark subplatform bits based on the PCI device id. */
if (find_devid(devid, subplatform_ult_ids,
ARRAY_SIZE(subplatform_ult_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ULT);
} else if (find_devid(devid, subplatform_ulx_ids,
ARRAY_SIZE(subplatform_ulx_ids))) {
mask = BIT(INTEL_SUBPLATFORM_ULX);
if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
/* ULX machines are also considered ULT. */
mask |= BIT(INTEL_SUBPLATFORM_ULT);
}
} else if (find_devid(devid, subplatform_aml_ids,
ARRAY_SIZE(subplatform_aml_ids))) {
mask = BIT(INTEL_SUBPLATFORM_AML);
} else if (find_devid(devid, subplatform_portf_ids,
ARRAY_SIZE(subplatform_portf_ids))) {
mask = BIT(INTEL_SUBPLATFORM_PORTF);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
}
/**
* intel_device_info_runtime_init - initialize runtime info
* @dev_priv: the i915 device
......
......@@ -77,6 +77,21 @@ enum intel_platform {
INTEL_MAX_PLATFORMS
};
/*
* Subplatform bits share the same namespace per parent platform. In other words
* it is fine for the same bit to be used on multiple parent platforms.
*/
#define INTEL_SUBPLATFORM_BITS (3)
/* HSW/BDW/SKL/KBL/CFL */
#define INTEL_SUBPLATFORM_ULT (0)
#define INTEL_SUBPLATFORM_ULX (1)
#define INTEL_SUBPLATFORM_AML (2)
/* CNL/ICL */
#define INTEL_SUBPLATFORM_PORTF (0)
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
......@@ -160,7 +175,6 @@ struct intel_device_info {
intel_engine_mask_t engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
u32 platform_mask;
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
......@@ -197,6 +211,16 @@ struct intel_device_info {
};
struct intel_runtime_info {
/*
* Platform mask is used for optimizing or-ed IS_PLATFORM calls into
* into single runtime conditionals, and also to provide groundwork
* for future per platform, or per SKU build optimizations.
*
* Array can be extended when necessary if the corresponding
* BUILD_BUG_ON is hit.
*/
u32 platform_mask[2];
u16 device_id;
u8 num_sprites[I915_MAX_PIPES];
......@@ -267,6 +291,7 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment