Commit e6b17f5c authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-2023-08-10' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v6.6:

UAPI Changes:

 * nouveau:
   * Provide several GETPARAM ioctls
   * Provide VM_BIND ioctls

Cross-subsystem Changes:

 * fbdev: Convert many drivers to fbdev I/O-memory helpers

 * media/vivid: Convert to fbdev I/O-memory helpers

 * vfio-dev/mdpy-fb: Convert to fbdev I/O-memory helpers

Core Changes:

 * Documentation fixes

 * Do not select framebuffer console for fbdev emulation, fixes Kconfig
   dependencies

 * exec:
   * Add test cases for calling drm_exec() multiple times
   * Fix memory leak in sleftests
   * Build fixes

 * gem:
   * Fix lockdep checking

 * ttm:
   * Add Kunit tests
   * Cleanups

Driver Changes:

 * atmel-hlcdc:
   * Support inverted pixclock polarity, required by several SoCs

 * bridge:
   * dw-hdmi: Update EDID on HDMI detection
   * sitronix-st7789v: Support panel orientation; Support rotation
                       property; Add support for Jasonic
 		       JT240MHQS-HWT-EK-E3 plus DT bindings; Minor
       		       fixes

 * ivpu:
   * Support VPU4
   * Refactorings

 * loongson:
   * Fixes

 * mcde:
   * Cleanups

 * nouveau:
   * Track GPU virtual memory via DRM GPUVA manager, enables Vulkan
     sparse binding/residency

 * panfrost:
   * Fix synchronization in IRQ handling

 * tve200:
   * Cleanups
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20230810084505.GA14039@linux-uq9g
parents 3defb4fe 2799804a
...@@ -18,6 +18,7 @@ properties: ...@@ -18,6 +18,7 @@ properties:
enum: enum:
- edt,et028013dma - edt,et028013dma
- inanbo,t28cp45tn89-v17 - inanbo,t28cp45tn89-v17
- jasonic,jt240mhqs-hwt-ek-e3
- sitronix,st7789v - sitronix,st7789v
reg: true reg: true
...@@ -25,6 +26,7 @@ properties: ...@@ -25,6 +26,7 @@ properties:
power-supply: true power-supply: true
backlight: true backlight: true
port: true port: true
rotation: true
spi-cpha: true spi-cpha: true
spi-cpol: true spi-cpol: true
...@@ -58,6 +60,7 @@ examples: ...@@ -58,6 +60,7 @@ examples:
reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>; reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
backlight = <&pwm_bl>; backlight = <&pwm_bl>;
power-supply = <&power>; power-supply = <&power>;
rotation = <180>;
spi-max-frequency = <100000>; spi-max-frequency = <100000>;
spi-cpol; spi-cpol;
spi-cpha; spi-cpha;
......
...@@ -677,6 +677,8 @@ patternProperties: ...@@ -677,6 +677,8 @@ patternProperties:
description: iWave Systems Technologies Pvt. Ltd. description: iWave Systems Technologies Pvt. Ltd.
"^jadard,.*": "^jadard,.*":
description: Jadard Technology Inc. description: Jadard Technology Inc.
"^jasonic,.*":
description: Jasonic Technology Ltd.
"^jdi,.*": "^jdi,.*":
description: Japan Display Inc. description: Japan Display Inc.
"^jedec,.*": "^jedec,.*":
......
...@@ -6,3 +6,14 @@ drm/i915 uAPI ...@@ -6,3 +6,14 @@ drm/i915 uAPI
============= =============
.. kernel-doc:: include/uapi/drm/i915_drm.h .. kernel-doc:: include/uapi/drm/i915_drm.h
drm/nouveau uAPI
================
VM_BIND / EXEC uAPI
-------------------
.. kernel-doc:: drivers/gpu/drm/nouveau/nouveau_exec.c
:doc: Overview
.. kernel-doc:: include/uapi/drm/nouveau_drm.h
...@@ -520,7 +520,7 @@ DRM Cache Handling and Fast WC memcpy() ...@@ -520,7 +520,7 @@ DRM Cache Handling and Fast WC memcpy()
.. _drm_sync_objects: .. _drm_sync_objects:
DRM Sync Objects DRM Sync Objects
=========================== ================
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:doc: Overview :doc: Overview
......
...@@ -7,7 +7,8 @@ intel_vpu-y := \ ...@@ -7,7 +7,8 @@ intel_vpu-y := \
ivpu_fw.o \ ivpu_fw.o \
ivpu_fw_log.o \ ivpu_fw_log.o \
ivpu_gem.o \ ivpu_gem.o \
ivpu_hw_mtl.o \ ivpu_hw_37xx.o \
ivpu_hw_40xx.o \
ivpu_ipc.o \ ivpu_ipc.o \
ivpu_job.o \ ivpu_job.o \
ivpu_jsm_msg.o \ ivpu_jsm_msg.o \
......
...@@ -115,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link) ...@@ -115,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release); kref_put(&file_priv->ref, file_priv_release);
} }
static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
{
switch (args->index) {
case DRM_IVPU_CAP_METRIC_STREAMER:
args->value = 0;
break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ {
struct ivpu_file_priv *file_priv = file->driver_priv; struct ivpu_file_priv *file_priv = file->driver_priv;
...@@ -144,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f ...@@ -144,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev); args->value = ivpu_get_context_count(vdev);
break; break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user_low.start; args->value = vdev->hw->ranges.user.start;
break; break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY: case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority; args->value = file_priv->priority;
...@@ -174,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f ...@@ -174,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_SKU: case DRM_IVPU_PARAM_SKU:
args->value = vdev->hw->sku; args->value = vdev->hw->sku;
break; break;
case DRM_IVPU_PARAM_CAPABILITIES:
ret = ivpu_get_capabilities(vdev, args);
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -443,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev) ...@@ -443,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */ /* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
/* VPU MTL does not require PCI spec 10m D3hot delay */ /* VPU 37XX does not require 10m D3hot delay */
if (ivpu_is_mtl(vdev)) if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0; pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev); ret = pcim_enable_device(pdev);
...@@ -482,8 +501,13 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -482,8 +501,13 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm) if (!vdev->pm)
return -ENOMEM; return -ENOMEM;
vdev->hw->ops = &ivpu_hw_mtl_ops; if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
vdev->hw->dma_bits = 38; vdev->hw->ops = &ivpu_hw_40xx_ops;
vdev->hw->dma_bits = 48;
} else {
vdev->hw->ops = &ivpu_hw_37xx_ops;
vdev->hw->dma_bits = 38;
}
vdev->platform = IVPU_PLATFORM_INVALID; vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
...@@ -610,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) ...@@ -610,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = { static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ } { }
}; };
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
......
...@@ -23,6 +23,10 @@ ...@@ -23,6 +23,10 @@
#define DRIVER_DATE "20230117" #define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d #define PCI_DEVICE_ID_MTL 0x7d1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
#define IVPU_HW_40XX 40
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent invalid context */ /* SSID 1 is used by the VPU to represent invalid context */
...@@ -76,6 +80,7 @@ struct ivpu_wa_table { ...@@ -76,6 +80,7 @@ struct ivpu_wa_table {
bool clear_runtime_mem; bool clear_runtime_mem;
bool d3hot_after_power_off; bool d3hot_after_power_off;
bool interrupt_clear_with_0; bool interrupt_clear_with_0;
bool disable_clock_relinquish;
}; };
struct ivpu_hw_info; struct ivpu_hw_info;
...@@ -146,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link); ...@@ -146,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev); int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev); int ivpu_shutdown(struct ivpu_device *vdev);
static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
}
static inline u8 ivpu_revision(struct ivpu_device *vdev) static inline u8 ivpu_revision(struct ivpu_device *vdev)
{ {
return to_pci_dev(vdev->drm.dev)->revision; return to_pci_dev(vdev->drm.dev)->revision;
...@@ -161,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev) ...@@ -161,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device; return to_pci_dev(vdev->drm.dev)->device;
} }
static inline int ivpu_hw_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
return IVPU_HW_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
default:
ivpu_err(vdev, "Unknown VPU device\n");
return 0;
}
}
static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev) static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
{ {
return container_of(dev, struct ivpu_device, drm); return container_of(dev, struct ivpu_device, drm);
......
...@@ -43,12 +43,20 @@ static char *ivpu_firmware; ...@@ -43,12 +43,20 @@ static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644); module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/.."); MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
{ IVPU_HW_37XX, "mtl_vpu.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
static int ivpu_fw_request(struct ivpu_device *vdev) static int ivpu_fw_request(struct ivpu_device *vdev)
{ {
static const char * const fw_names[] = {
"mtl_vpu.bin",
"intel/vpu/mtl_vpu_v0.0.bin"
};
int ret = -ENOENT; int ret = -ENOENT;
int i; int i;
...@@ -60,9 +68,12 @@ static int ivpu_fw_request(struct ivpu_device *vdev) ...@@ -60,9 +68,12 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
} }
for (i = 0; i < ARRAY_SIZE(fw_names); i++) { for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev); if (fw_names[i].gen != ivpu_hw_gen(vdev))
continue;
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
if (!ret) { if (!ret) {
vdev->fw->name = fw_names[i]; vdev->fw->name = fw_names[i].name;
return 0; return 0;
} }
} }
...@@ -195,7 +206,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev) ...@@ -195,7 +206,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL; return -EINVAL;
} }
ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size); ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0; return 0;
} }
...@@ -236,7 +247,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) ...@@ -236,7 +247,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
} }
if (fw->shave_nn_size) { if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start, fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED); fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) { if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n"); ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
...@@ -434,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params ...@@ -434,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues * Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR * and log buffers, programmable to L2$ Uncached by VPU MTRR
*/ */
boot_params->shared_region_base = vdev->hw->ranges.global_low.start; boot_params->shared_region_base = vdev->hw->ranges.global.start;
boot_params->shared_region_size = vdev->hw->ranges.global_low.end - boot_params->shared_region_size = vdev->hw->ranges.global.end -
vdev->hw->ranges.global_low.start; vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr; boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2; boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
...@@ -444,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params ...@@ -444,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2; boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2; boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
boot_params->global_aliased_pio_base = boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
vdev->hw->ranges.global_aliased_pio.start; boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
boot_params->global_aliased_pio_size =
ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */ /* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1; boot_params->autoconfig = 1;
...@@ -455,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params ...@@ -455,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
/* Enable L2 cache for first 2GB of high memory */ /* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1; boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start); ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn) if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr; boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
......
...@@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ...@@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret; int ret;
if (!range) { if (!range) {
if (bo->flags & DRM_IVPU_BO_HIGH_MEM) if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.user_high; range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
range = &vdev->hw->ranges.dma;
else else
range = &vdev->hw->ranges.user_low; range = &vdev->hw->ranges.user;
} }
mutex_lock(&ctx->lock); mutex_lock(&ctx->lock);
...@@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla ...@@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size; fixed_range.end = vpu_addr + size;
range = &fixed_range; range = &fixed_range;
} else { } else {
range = &vdev->hw->ranges.global_low; range = &vdev->hw->ranges.global;
} }
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
......
...@@ -38,11 +38,10 @@ struct ivpu_addr_range { ...@@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info { struct ivpu_hw_info {
const struct ivpu_hw_ops *ops; const struct ivpu_hw_ops *ops;
struct { struct {
struct ivpu_addr_range global_low; struct ivpu_addr_range global;
struct ivpu_addr_range global_high; struct ivpu_addr_range user;
struct ivpu_addr_range user_low; struct ivpu_addr_range shave;
struct ivpu_addr_range user_high; struct ivpu_addr_range dma;
struct ivpu_addr_range global_aliased_pio;
} ranges; } ranges;
struct { struct {
u8 min_ratio; u8 min_ratio;
...@@ -60,7 +59,8 @@ struct ivpu_hw_info { ...@@ -60,7 +59,8 @@ struct ivpu_hw_info {
int dma_bits; int dma_bits;
}; };
extern const struct ivpu_hw_ops ivpu_hw_mtl_ops; extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
static inline int ivpu_hw_info_init(struct ivpu_device *vdev) static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include "ivpu_drv.h" #include "ivpu_drv.h"
#include "ivpu_hw_mtl_reg.h" #include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h" #include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h" #include "ivpu_mmu.h"
#include "ivpu_mmu_context.h" #include "ivpu_mmu_context.h"
...@@ -186,13 +186,13 @@ ...@@ -186,13 +186,13 @@
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC) #define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC) #define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \ #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \ (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT))) (REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd) static char *ivpu_mmu_event_to_str(u32 cmd)
{ {
...@@ -250,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev) ...@@ -250,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else else
val_ref = IVPU_MMU_IDR0_REF; val_ref = IVPU_MMU_IDR0_REF;
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0); val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
if (val != val_ref) if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref); ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1); val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
if (val != IVPU_MMU_IDR1_REF) if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF); ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3); val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
if (val != IVPU_MMU_IDR3_REF) if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF); ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
...@@ -269,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev) ...@@ -269,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else else
val_ref = IVPU_MMU_IDR5_REF; val_ref = IVPU_MMU_IDR5_REF;
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5); val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
if (val != val_ref) if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref); ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
} }
...@@ -396,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev) ...@@ -396,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN; u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret; int ret;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0); ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
if (ret) if (ret)
return ret; return ret;
return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl); return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
} }
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev) static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{ {
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US); IVPU_MMU_QUEUE_TIMEOUT_US);
} }
...@@ -447,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) ...@@ -447,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
return ret; return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE); clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod); REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev); ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret) if (ret)
...@@ -495,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) ...@@ -495,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0; mmu->evtq.prod = 0;
mmu->evtq.cons = 0; mmu->evtq.cons = 0;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0); ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -505,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) ...@@ -505,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) | FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) | FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB); FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
REGV_WR32(MTL_VPU_HOST_MMU_CR1, val); REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q); REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg); REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q); REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0); REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0); REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN; val = IVPU_MMU_CR0_CMDQEN;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret) if (ret)
return ret; return ret;
...@@ -531,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) ...@@ -531,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret) if (ret)
return ret; return ret;
REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q); REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0); REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0); REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN; val |= IVPU_MMU_CR0_EVTQEN;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret) if (ret)
return ret; return ret;
val |= IVPU_MMU_CR0_ATSCHK; val |= IVPU_MMU_CR0_ATSCHK;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret) if (ret)
return ret; return ret;
...@@ -550,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev) ...@@ -550,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret; return ret;
val |= IVPU_MMU_CR0_SMMUEN; val |= IVPU_MMU_CR0_SMMUEN;
return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val); return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
} }
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid) static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
...@@ -801,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) ...@@ -801,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 idx = IVPU_MMU_Q_IDX(evtq->cons); u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE); u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC); evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT)) if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL; return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE); clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK; evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons); REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
return evt; return evt;
} }
...@@ -841,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) ...@@ -841,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
ivpu_dbg(vdev, IRQ, "MMU error\n"); ivpu_dbg(vdev, IRQ, "MMU error\n");
gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR); gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN); gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
active = gerror_val ^ gerrorn_val; active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK)) if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return; return;
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n"); ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n"); ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n"); ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n"); ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n"); ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n"); ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active)) if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n"); ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val); REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
} }
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable) int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
......
...@@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3 ...@@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret; return ret;
if (!context_id) { if (!context_id) {
start = vdev->hw->ranges.global_low.start; start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.global_high.end; end = vdev->hw->ranges.shave.end;
} else { } else {
start = vdev->hw->ranges.user_low.start; start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.user_high.end; end = vdev->hw->ranges.dma.end;
} }
drm_mm_init(&ctx->mm, start, end - start); drm_mm_init(&ctx->mm, start, end - start);
......
...@@ -135,7 +135,6 @@ config DRM_DEBUG_MODESET_LOCK ...@@ -135,7 +135,6 @@ config DRM_DEBUG_MODESET_LOCK
config DRM_FBDEV_EMULATION config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver" bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM depends on DRM
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default y default y
help help
...@@ -196,6 +195,21 @@ config DRM_TTM ...@@ -196,6 +195,21 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver GPU memory types. Will be enabled automatically if a device driver
uses it. uses it.
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
depends on DRM && KUNIT
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
developers.
If in doubt, say "N".
config DRM_EXEC config DRM_EXEC
tristate tristate
depends on DRM depends on DRM
......
...@@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) ...@@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap; struct regmap *regmap = crtc->dc->hlcdc->regmap;
struct drm_display_mode *adj = &c->state->adjusted_mode; struct drm_display_mode *adj = &c->state->adjusted_mode;
struct drm_encoder *encoder = NULL, *en_iter;
struct drm_connector *connector = NULL;
struct atmel_hlcdc_crtc_state *state; struct atmel_hlcdc_crtc_state *state;
struct drm_device *ddev = c->dev;
struct drm_connector_list_iter iter;
unsigned long mode_rate; unsigned long mode_rate;
struct videomode vm; struct videomode vm;
unsigned long prate; unsigned long prate;
...@@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) ...@@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned int cfg = 0; unsigned int cfg = 0;
int div, ret; int div, ret;
/* get encoder from crtc */
drm_for_each_encoder(en_iter, ddev) {
if (en_iter->crtc == c) {
encoder = en_iter;
break;
}
}
if (encoder) {
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter)
if (connector->encoder == encoder)
break;
drm_connector_list_iter_end(&iter);
}
ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk); ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
if (ret) if (ret)
return; return;
...@@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) ...@@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg |= ATMEL_HLCDC_CLKDIV(div); cfg |= ATMEL_HLCDC_CLKDIV(div);
if (connector &&
connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
cfg |= ATMEL_HLCDC_CLKPOL;
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg); regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg);
state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
......
...@@ -2449,15 +2449,7 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) ...@@ -2449,15 +2449,7 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
enum drm_connector_status result; enum drm_connector_status result;
result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
hdmi->last_connector_result = result;
mutex_lock(&hdmi->mutex);
if (result != hdmi->last_connector_result) {
dev_dbg(hdmi->dev, "read_hpd result: %d", result);
handle_plugged_change(hdmi,
result == connector_status_connected);
hdmi->last_connector_result = result;
}
mutex_unlock(&hdmi->mutex);
return result; return result;
} }
...@@ -2958,6 +2950,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge, ...@@ -2958,6 +2950,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
hdmi->curr_conn = NULL; hdmi->curr_conn = NULL;
dw_hdmi_update_power(hdmi); dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi); dw_hdmi_update_phy_mask(hdmi);
handle_plugged_change(hdmi, false);
mutex_unlock(&hdmi->mutex); mutex_unlock(&hdmi->mutex);
} }
...@@ -2976,6 +2969,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge, ...@@ -2976,6 +2969,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
hdmi->curr_conn = connector; hdmi->curr_conn = connector;
dw_hdmi_update_power(hdmi); dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi); dw_hdmi_update_phy_mask(hdmi);
handle_plugged_change(hdmi, true);
mutex_unlock(&hdmi->mutex); mutex_unlock(&hdmi->mutex);
} }
......
...@@ -496,6 +496,8 @@ struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev, ...@@ -496,6 +496,8 @@ struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
int ret; int ret;
lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL); lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
if (IS_ERR(lbo))
return ERR_CAST(lbo);
ret = lsdc_bo_reserve(lbo); ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -1226,7 +1226,7 @@ static const struct of_device_id mcde_dsi_of_match[] = { ...@@ -1226,7 +1226,7 @@ static const struct of_device_id mcde_dsi_of_match[] = {
struct platform_driver mcde_dsi_driver = { struct platform_driver mcde_dsi_driver = {
.driver = { .driver = {
.name = "mcde-dsi", .name = "mcde-dsi",
.of_match_table = of_match_ptr(mcde_dsi_of_match), .of_match_table = mcde_dsi_of_match,
}, },
.probe = mcde_dsi_probe, .probe = mcde_dsi_probe,
.remove_new = mcde_dsi_remove, .remove_new = mcde_dsi_remove,
......
...@@ -47,6 +47,9 @@ nouveau-y += nouveau_prime.o ...@@ -47,6 +47,9 @@ nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o nouveau-y += nouveau_ttm.o
nouveau-y += nouveau_vmm.o nouveau-y += nouveau_vmm.o
nouveau-y += nouveau_exec.o
nouveau-y += nouveau_sched.o
nouveau-y += nouveau_uvmm.o
# DRM - modesetting # DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
......
...@@ -10,6 +10,8 @@ config DRM_NOUVEAU ...@@ -10,6 +10,8 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_TTM select DRM_TTM
select DRM_TTM_HELPER select DRM_TTM_HELPER
select DRM_EXEC
select DRM_SCHED
select I2C select I2C
select I2C_ALGOBIT select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
......
...@@ -1122,11 +1122,18 @@ nv04_page_flip_emit(struct nouveau_channel *chan, ...@@ -1122,11 +1122,18 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000); PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push); PUSH_KICK(push);
ret = nouveau_fence_new(chan, false, pfence); ret = nouveau_fence_new(pfence);
if (ret) if (ret)
goto fail; goto fail;
ret = nouveau_fence_emit(*pfence, chan);
if (ret)
goto fail_fence_unref;
return 0; return 0;
fail_fence_unref:
nouveau_fence_unref(pfence);
fail: fail:
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head); list_del(&s->head);
......
...@@ -3,7 +3,10 @@ ...@@ -3,7 +3,10 @@
struct nvif_vmm_v0 { struct nvif_vmm_v0 {
__u8 version; __u8 version;
__u8 page_nr; __u8 page_nr;
__u8 managed; #define NVIF_VMM_V0_TYPE_UNMANAGED 0x00
#define NVIF_VMM_V0_TYPE_MANAGED 0x01
#define NVIF_VMM_V0_TYPE_RAW 0x02
__u8 type;
__u8 pad03[5]; __u8 pad03[5];
__u64 addr; __u64 addr;
__u64 size; __u64 size;
...@@ -17,6 +20,7 @@ struct nvif_vmm_v0 { ...@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04 #define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05 #define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06 #define NVIF_VMM_V0_PFNCLR 0x06
#define NVIF_VMM_V0_RAW 0x07
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80) #define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 { struct nvif_vmm_page_v0 {
...@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 { ...@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
__u64 addr; __u64 addr;
}; };
struct nvif_vmm_raw_v0 {
__u8 version;
#define NVIF_VMM_RAW_V0_GET 0x0
#define NVIF_VMM_RAW_V0_PUT 0x1
#define NVIF_VMM_RAW_V0_MAP 0x2
#define NVIF_VMM_RAW_V0_UNMAP 0x3
#define NVIF_VMM_RAW_V0_SPARSE 0x4
__u8 op;
__u8 sparse;
__u8 ref;
__u8 shift;
__u32 argc;
__u8 pad01[7];
__u64 addr;
__u64 size;
__u64 offset;
__u64 memory;
__u64 argv;
};
struct nvif_vmm_pfnmap_v0 { struct nvif_vmm_pfnmap_v0 {
__u8 version; __u8 version;
__u8 page; __u8 page;
......
...@@ -4,6 +4,12 @@ ...@@ -4,6 +4,12 @@
struct nvif_mem; struct nvif_mem;
struct nvif_mmu; struct nvif_mmu;
enum nvif_vmm_type {
UNMANAGED,
MANAGED,
RAW,
};
enum nvif_vmm_get { enum nvif_vmm_get {
ADDR, ADDR,
PTES, PTES,
...@@ -30,8 +36,9 @@ struct nvif_vmm { ...@@ -30,8 +36,9 @@ struct nvif_vmm {
int page_nr; int page_nr;
}; };
int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed, int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *); enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *); void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse, int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *); u8 page, u8 align, u64 size, struct nvif_vma *);
...@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *); ...@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc, int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset); struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64); int nvif_vmm_unmap(struct nvif_vmm *, u64);
int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift, bool sparse);
int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif #endif
...@@ -17,6 +17,7 @@ struct nvkm_vma { ...@@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */ bool part:1; /* Region was split from an allocated region by map(). */
bool busy:1; /* Region busy (for temporarily preventing user access). */ bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */ bool mapped:1; /* Region contains valid pages. */
bool no_comp:1; /* Force no memory compression. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */ struct nvkm_tags *tags; /* Compression tag reference. */
}; };
...@@ -27,10 +28,26 @@ struct nvkm_vmm { ...@@ -27,10 +28,26 @@ struct nvkm_vmm {
const char *name; const char *name;
u32 debug; u32 debug;
struct kref kref; struct kref kref;
struct mutex mutex;
struct {
struct mutex vmm;
struct mutex ref;
struct mutex map;
} mutex;
u64 start; u64 start;
u64 limit; u64 limit;
struct {
struct {
u64 addr;
u64 size;
} p;
struct {
u64 addr;
u64 size;
} n;
bool raw;
} managed;
struct nvkm_vmm_pt *pd; struct nvkm_vmm_pt *pd;
struct list_head join; struct list_head join;
...@@ -70,6 +87,7 @@ struct nvkm_vmm_map { ...@@ -70,6 +87,7 @@ struct nvkm_vmm_map {
const struct nvkm_vmm_page *page; const struct nvkm_vmm_page *page;
bool no_comp;
struct nvkm_tags *tags; struct nvkm_tags *tags;
u64 next; u64 next;
u64 type; u64 type;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "nouveau_chan.h" #include "nouveau_chan.h"
#include "nouveau_abi16.h" #include "nouveau_abi16.h"
#include "nouveau_vmm.h" #include "nouveau_vmm.h"
#include "nouveau_sched.h"
static struct nouveau_abi16 * static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv) nouveau_abi16(struct drm_file *file_priv)
...@@ -125,6 +126,17 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, ...@@ -125,6 +126,17 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{ {
struct nouveau_abi16_ntfy *ntfy, *temp; struct nouveau_abi16_ntfy *ntfy, *temp;
/* When a client exits without waiting for it's queued up jobs to
* finish it might happen that we fault the channel. This is due to
* drm_file_free() calling drm_gem_release() before the postclose()
* callback. Hence, we can't tear down this scheduler entity before
* uvmm mappings are unmapped. Currently, we can't detect this case.
*
* However, this should be rare and harmless, since the channel isn't
* needed anymore.
*/
nouveau_sched_entity_fini(&chan->sched_entity);
/* wait for all activity to stop before cleaning up */ /* wait for all activity to stop before cleaning up */
if (chan->chan) if (chan->chan)
nouveau_channel_idle(chan->chan); nouveau_channel_idle(chan->chan);
...@@ -261,6 +273,13 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ...@@ -261,6 +273,13 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (!drm->channel) if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV); return nouveau_abi16_put(abi16, -ENODEV);
/* If uvmm wasn't initialized until now disable it completely to prevent
* userspace from mixing up UAPIs.
*
* The client lock is already acquired by nouveau_abi16_get().
*/
__nouveau_cli_disable_uvmm_noinit(cli);
device = &abi16->device; device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR; engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
...@@ -304,6 +323,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) ...@@ -304,6 +323,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret) if (ret)
goto done; goto done;
ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
drm->sched_wq);
if (ret)
goto done;
init->channel = chan->chan->chid; init->channel = chan->chan->chid;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
......
...@@ -26,6 +26,7 @@ struct nouveau_abi16_chan { ...@@ -26,6 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy; struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma; struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap; struct nvkm_mm heap;
struct nouveau_sched_entity sched_entity;
}; };
struct nouveau_abi16 { struct nouveau_abi16 {
...@@ -43,28 +44,6 @@ int nouveau_abi16_usif(struct drm_file *, void *data, u32 size); ...@@ -43,28 +44,6 @@ int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1) #define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2) #define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
int channel;
uint32_t pushbuf_domains;
/* Notifier memory */
uint32_t notifier_handle;
/* DRM-enforced subchannel assignments */
struct {
uint32_t handle;
uint32_t grclass;
} subchan[8];
uint32_t nr_subchan;
};
struct drm_nouveau_channel_free {
int channel;
};
struct drm_nouveau_grobj_alloc { struct drm_nouveau_grobj_alloc {
int channel; int channel;
uint32_t handle; uint32_t handle;
...@@ -83,31 +62,12 @@ struct drm_nouveau_gpuobj_free { ...@@ -83,31 +62,12 @@ struct drm_nouveau_gpuobj_free {
uint32_t handle; uint32_t handle;
}; };
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
};
struct drm_nouveau_setparam { struct drm_nouveau_setparam {
uint64_t param; uint64_t param;
uint64_t value; uint64_t value;
}; };
#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) #define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) #define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) #define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) #define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
......
...@@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) ...@@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
struct nouveau_bo * struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags) u32 tile_mode, u32 tile_flags, bool internal)
{ {
struct nouveau_drm *drm = cli->drm; struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu; struct nvif_mmu *mmu = &cli->mmu;
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
int i, pi = -1; int i, pi = -1;
if (!*size) { if (!*size) {
...@@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, ...@@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo) if (!nvbo)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list); INIT_LIST_HEAD(&nvbo->vma_list);
...@@ -232,68 +233,103 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, ...@@ -232,68 +233,103 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo->force_coherent = true; nvbo->force_coherent = true;
} }
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
nvbo->kind = (tile_flags & 0x0000ff00) >> 8; if (!nouveau_cli_uvmm(cli) || internal) {
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { /* for BO noVM allocs, don't assign kinds */
kfree(nvbo); if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
return ERR_PTR(-EINVAL); nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
nvbo->kind = (tile_flags & 0x00007f00) >> 8;
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
} else {
nvbo->zeta = (tile_flags & 0x00000007);
} }
nvbo->mode = tile_mode;
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; /* Select this page size if it's the first that supports
} else * the potential memory domains, or when it's compatible
if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { * with the requested compression settings.
nvbo->kind = (tile_flags & 0x00007f00) >> 8; */
nvbo->comp = (tile_flags & 0x00030000) >> 16; if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo); kfree(nvbo);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} else {
nvbo->zeta = (tile_flags & 0x00000007);
}
nvbo->mode = tile_mode;
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Select this page size if it's the first that supports /* Disable compression if suitable settings couldn't be found. */
* the potential memory domains, or when it's compatible if (nvbo->comp && !vmm->page[pi].comp) {
* with the requested compression settings. if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
*/ nvbo->kind = mmu->kind[nvbo->kind];
if (pi < 0 || !nvbo->comp || vmm->page[i].comp) nvbo->comp = 0;
pi = i; }
nvbo->page = vmm->page[pi].shift;
/* Stop once the buffer is larger than the current page size. */ } else {
if (*size >= 1ULL << vmm->page[i].shift) /* reject other tile flags when in VM mode. */
break; if (tile_mode)
} return ERR_PTR(-EINVAL);
if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
return ERR_PTR(-EINVAL);
if (WARN_ON(pi < 0)) { /* Determine the desirable target GPU page size for the buffer. */
kfree(nvbo); for (i = 0; i < vmm->page_nr; i++) {
return ERR_PTR(-EINVAL); /* Because we cannot currently allow VMM maps to fail
} * during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Disable compression if suitable settings couldn't be found. */ if (pi < 0)
if (nvbo->comp && !vmm->page[pi].comp) { pi = i;
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) /* Stop once the buffer is larger than the current page size. */
nvbo->kind = mmu->kind[nvbo->kind]; if (*size >= 1ULL << vmm->page[i].shift)
nvbo->comp = 0; break;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->page = vmm->page[pi].shift;
} }
nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, align, size); nouveau_bo_fixup_align(nvbo, align, size);
...@@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, ...@@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
{ {
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret; int ret;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
.resv = robj,
};
nouveau_bo_placement_set(nvbo, domain, 0); nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru); INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type, ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
&nvbo->placement, align >> PAGE_SHIFT, false, &nvbo->placement, align >> PAGE_SHIFT, &ctx,
sg, robj, nouveau_bo_del_ttm); sg, robj, nouveau_bo_del_ttm);
if (ret) { if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */ /* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret; return ret;
} }
if (!robj)
ttm_bo_unreserve(&nvbo->bo);
return 0; return 0;
} }
...@@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, ...@@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
int ret; int ret;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags); tile_flags, true);
if (IS_ERR(nvbo)) if (IS_ERR(nvbo))
return PTR_ERR(nvbo); return PTR_ERR(nvbo);
...@@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, ...@@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
dma_resv_init(&nvbo->bo.base._resv); dma_resv_init(&nvbo->bo.base._resv);
drm_vma_node_reset(&nvbo->bo.base.vma_node); drm_vma_node_reset(&nvbo->bo.base.vma_node);
/* This must be called before ttm_bo_init_reserved(). Subsequent
* bo_move() callbacks might already iterate the GEMs GPUVA list.
*/
drm_gem_gpuva_init(&nvbo->bo.base);
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret) if (ret)
return ret; return ret;
...@@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, ...@@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock(&cli->mutex); mutex_lock(&cli->mutex);
else else
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) { if (ret)
ret = drm->ttm.move(chan, bo, bo->resource, new_reg); goto out_unlock;
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence); ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) { if (ret)
/* TODO: figure out a better solution here goto out_unlock;
*
* wait on the fence here explicitly as going through ret = nouveau_fence_new(&fence);
* ttm_bo_move_accel_cleanup somehow doesn't seem to do it. if (ret)
* goto out_unlock;
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish. ret = nouveau_fence_emit(fence, chan);
*/ if (ret) {
nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence);
ret = ttm_bo_move_accel_cleanup(bo, goto out_unlock;
&fence->base,
evict, false,
new_reg);
nouveau_fence_unref(&fence);
}
}
} }
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
* ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
*
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish.
*/
nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
new_reg);
nouveau_fence_unref(&fence);
out_unlock:
mutex_unlock(&cli->mutex); mutex_unlock(&cli->mutex);
return ret; return ret;
} }
...@@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, ...@@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
nouveau_vma_map(vma, mem); nouveau_vma_map(vma, mem);
} }
nouveau_uvmm_bo_map_all(nvbo, mem);
} else { } else {
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
ret = dma_resv_wait_timeout(bo->base.resv, ret = dma_resv_wait_timeout(bo->base.resv,
...@@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, ...@@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
WARN_ON(ret <= 0); WARN_ON(ret <= 0);
nouveau_vma_unmap(vma); nouveau_vma_unmap(vma);
} }
nouveau_uvmm_bo_unmap_all(nvbo);
} }
if (new_reg) if (new_reg)
......
...@@ -26,6 +26,7 @@ struct nouveau_bo { ...@@ -26,6 +26,7 @@ struct nouveau_bo {
struct list_head entry; struct list_head entry;
int pbbo_index; int pbbo_index;
bool validate_mapped; bool validate_mapped;
bool no_share;
/* GPU address space is independent of CPU word size */ /* GPU address space is independent of CPU word size */
uint64_t offset; uint64_t offset;
...@@ -73,7 +74,7 @@ extern struct ttm_device_funcs nouveau_bo_driver; ...@@ -73,7 +74,7 @@ extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *); void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
u32 domain, u32 tile_mode, u32 tile_flags); u32 domain, u32 tile_mode, u32 tile_flags, bool internal);
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain, int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj); struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain, int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
......
...@@ -40,6 +40,14 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); ...@@ -40,6 +40,14 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf; int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
void
nouveau_channel_kill(struct nouveau_channel *chan)
{
atomic_set(&chan->killed, 1);
if (chan->fence)
nouveau_fence_context_kill(chan->fence, -ENODEV);
}
static int static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc) nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{ {
...@@ -47,9 +55,9 @@ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc) ...@@ -47,9 +55,9 @@ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid); NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
atomic_set(&chan->killed, 1);
if (chan->fence) if (unlikely(!atomic_read(&chan->killed)))
nouveau_fence_context_kill(chan->fence, -ENODEV); nouveau_channel_kill(chan);
return NVIF_EVENT_DROP; return NVIF_EVENT_DROP;
} }
...@@ -62,9 +70,11 @@ nouveau_channel_idle(struct nouveau_channel *chan) ...@@ -62,9 +70,11 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL; struct nouveau_fence *fence = NULL;
int ret; int ret;
ret = nouveau_fence_new(chan, false, &fence); ret = nouveau_fence_new(&fence);
if (!ret) { if (!ret) {
ret = nouveau_fence_wait(fence, false, false); ret = nouveau_fence_emit(fence, chan);
if (!ret)
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence); nouveau_fence_unref(&fence);
} }
...@@ -149,7 +159,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, ...@@ -149,7 +159,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->device = device; chan->device = device;
chan->drm = drm; chan->drm = drm;
chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm; chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0); atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */ /* allocate memory for dma push buffer */
......
...@@ -66,6 +66,7 @@ int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv, ...@@ -66,6 +66,7 @@ int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv,
u32 vram, u32 gart, struct nouveau_channel **); u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **); void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *); int nouveau_channel_idle(struct nouveau_channel *);
void nouveau_channel_kill(struct nouveau_channel *);
extern int nouveau_vram_pushbuf; extern int nouveau_vram_pushbuf;
......
...@@ -203,6 +203,44 @@ nouveau_debugfs_pstate_open(struct inode *inode, struct file *file) ...@@ -203,6 +203,44 @@ nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
return single_open(file, nouveau_debugfs_pstate_get, inode->i_private); return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
} }
static void
nouveau_debugfs_gpuva_regions(struct seq_file *m, struct nouveau_uvmm *uvmm)
{
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
seq_puts (m, " VA regions | start | range | end \n");
seq_puts (m, "----------------------------------------------------------------------------\n");
mas_for_each(&mas, reg, ULONG_MAX)
seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx\n",
reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
}
static int
nouveau_debugfs_gpuva(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
struct nouveau_cli *cli;
mutex_lock(&drm->clients_lock);
list_for_each_entry(cli, &drm->clients, head) {
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
if (!uvmm)
continue;
nouveau_uvmm_lock(uvmm);
drm_debugfs_gpuva_info(m, &uvmm->umgr);
seq_puts(m, "\n");
nouveau_debugfs_gpuva_regions(m, uvmm);
nouveau_uvmm_unlock(uvmm);
}
mutex_unlock(&drm->clients_lock);
return 0;
}
static const struct file_operations nouveau_pstate_fops = { static const struct file_operations nouveau_pstate_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = nouveau_debugfs_pstate_open, .open = nouveau_debugfs_pstate_open,
...@@ -214,6 +252,7 @@ static const struct file_operations nouveau_pstate_fops = { ...@@ -214,6 +252,7 @@ static const struct file_operations nouveau_pstate_fops = {
static struct drm_info_list nouveau_debugfs_list[] = { static struct drm_info_list nouveau_debugfs_list[] = {
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL }, { "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
DRM_DEBUGFS_GPUVA_INFO(nouveau_debugfs_gpuva, NULL),
}; };
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
......
...@@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) ...@@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done; goto done;
} }
nouveau_fence_new(dmem->migrate.chan, false, &fence); if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, dmem->migrate.chan);
migrate_vma_pages(&args); migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence); nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
...@@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) ...@@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
} }
} }
nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence); if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages); migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence); nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages); migrate_device_finalize(src_pfns, dst_pfns, npages);
...@@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm, ...@@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE; addr += PAGE_SIZE;
} }
nouveau_fence_new(drm->dmem->migrate.chan, false, &fence); if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, drm->dmem->migrate.chan);
migrate_vma_pages(args); migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence); nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i); nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
......
...@@ -68,6 +68,9 @@ ...@@ -68,6 +68,9 @@
#include "nouveau_platform.h" #include "nouveau_platform.h"
#include "nouveau_svm.h" #include "nouveau_svm.h"
#include "nouveau_dmem.h" #include "nouveau_dmem.h"
#include "nouveau_exec.h"
#include "nouveau_uvmm.h"
#include "nouveau_sched.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE", "DRM_UT_CORE",
...@@ -196,6 +199,8 @@ nouveau_cli_fini(struct nouveau_cli *cli) ...@@ -196,6 +199,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker)); WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli); usif_client_fini(cli);
nouveau_uvmm_fini(&cli->uvmm);
nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm); nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm); nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu); nvif_mmu_dtor(&cli->mmu);
...@@ -301,6 +306,12 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, ...@@ -301,6 +306,12 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
} }
cli->mem = &mems[ret]; cli->mem = &mems[ret];
ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
drm->sched_wq);
if (ret)
goto done;
return 0; return 0;
done: done:
if (ret) if (ret)
...@@ -568,10 +579,14 @@ nouveau_drm_device_init(struct drm_device *dev) ...@@ -568,10 +579,14 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent); nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent; drm->master.base.object.parent = &drm->parent;
ret = nouveau_cli_init(drm, "DRM-master", &drm->master); ret = nouveau_sched_init(drm);
if (ret) if (ret)
goto fail_alloc; goto fail_alloc;
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
goto fail_sched;
ret = nouveau_cli_init(drm, "DRM", &drm->client); ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret) if (ret)
goto fail_master; goto fail_master;
...@@ -628,7 +643,6 @@ nouveau_drm_device_init(struct drm_device *dev) ...@@ -628,7 +643,6 @@ nouveau_drm_device_init(struct drm_device *dev)
} }
return 0; return 0;
fail_dispinit: fail_dispinit:
nouveau_display_destroy(dev); nouveau_display_destroy(dev);
fail_dispctor: fail_dispctor:
...@@ -641,6 +655,8 @@ nouveau_drm_device_init(struct drm_device *dev) ...@@ -641,6 +655,8 @@ nouveau_drm_device_init(struct drm_device *dev)
nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->client);
fail_master: fail_master:
nouveau_cli_fini(&drm->master); nouveau_cli_fini(&drm->master);
fail_sched:
nouveau_sched_fini(drm);
fail_alloc: fail_alloc:
nvif_parent_dtor(&drm->parent); nvif_parent_dtor(&drm->parent);
kfree(drm); kfree(drm);
...@@ -692,6 +708,8 @@ nouveau_drm_device_fini(struct drm_device *dev) ...@@ -692,6 +708,8 @@ nouveau_drm_device_fini(struct drm_device *dev)
} }
mutex_unlock(&drm->clients_lock); mutex_unlock(&drm->clients_lock);
nouveau_sched_fini(drm);
nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master); nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent); nvif_parent_dtor(&drm->parent);
...@@ -1193,6 +1211,9 @@ nouveau_ioctls[] = { ...@@ -1193,6 +1211,9 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_VM_INIT, nouveau_uvmm_ioctl_vm_init, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_VM_BIND, nouveau_uvmm_ioctl_vm_bind, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
}; };
long long
...@@ -1240,6 +1261,8 @@ nouveau_driver_fops = { ...@@ -1240,6 +1261,8 @@ nouveau_driver_fops = {
static struct drm_driver static struct drm_driver
driver_stub = { driver_stub = {
.driver_features = DRIVER_GEM | .driver_features = DRIVER_GEM |
DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
DRIVER_GEM_GPUVA |
DRIVER_MODESET | DRIVER_MODESET |
DRIVER_RENDER, DRIVER_RENDER,
.open = nouveau_drm_open, .open = nouveau_drm_open,
......
...@@ -10,8 +10,8 @@ ...@@ -10,8 +10,8 @@
#define DRIVER_DATE "20120801" #define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 3 #define DRIVER_MINOR 4
#define DRIVER_PATCHLEVEL 1 #define DRIVER_PATCHLEVEL 0
/* /*
* 1.1.1: * 1.1.1:
...@@ -63,7 +63,9 @@ struct platform_device; ...@@ -63,7 +63,9 @@ struct platform_device;
#include "nouveau_fence.h" #include "nouveau_fence.h"
#include "nouveau_bios.h" #include "nouveau_bios.h"
#include "nouveau_sched.h"
#include "nouveau_vmm.h" #include "nouveau_vmm.h"
#include "nouveau_uvmm.h"
struct nouveau_drm_tile { struct nouveau_drm_tile {
struct nouveau_fence *fence; struct nouveau_fence *fence;
...@@ -91,6 +93,10 @@ struct nouveau_cli { ...@@ -91,6 +93,10 @@ struct nouveau_cli {
struct nvif_mmu mmu; struct nvif_mmu mmu;
struct nouveau_vmm vmm; struct nouveau_vmm vmm;
struct nouveau_vmm svm; struct nouveau_vmm svm;
struct nouveau_uvmm uvmm;
struct nouveau_sched_entity sched_entity;
const struct nvif_mclass *mem; const struct nvif_mclass *mem;
struct list_head head; struct list_head head;
...@@ -112,6 +118,59 @@ struct nouveau_cli_work { ...@@ -112,6 +118,59 @@ struct nouveau_cli_work {
struct dma_fence_cb cb; struct dma_fence_cb cb;
}; };
static inline struct nouveau_uvmm *
nouveau_cli_uvmm(struct nouveau_cli *cli)
{
if (!cli || !cli->uvmm.vmm.cli)
return NULL;
return &cli->uvmm;
}
static inline struct nouveau_uvmm *
nouveau_cli_uvmm_locked(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm;
mutex_lock(&cli->mutex);
uvmm = nouveau_cli_uvmm(cli);
mutex_unlock(&cli->mutex);
return uvmm;
}
static inline struct nouveau_vmm *
nouveau_cli_vmm(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm;
uvmm = nouveau_cli_uvmm(cli);
if (uvmm)
return &uvmm->vmm;
if (cli->svm.cli)
return &cli->svm;
return &cli->vmm;
}
static inline void
__nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
if (!uvmm)
cli->uvmm.disabled = true;
}
static inline void
nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
{
mutex_lock(&cli->mutex);
__nouveau_cli_disable_uvmm_noinit(cli);
mutex_unlock(&cli->mutex);
}
void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *, void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
struct nouveau_cli_work *); struct nouveau_cli_work *);
...@@ -121,6 +180,32 @@ nouveau_cli(struct drm_file *fpriv) ...@@ -121,6 +180,32 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL; return fpriv ? fpriv->driver_priv : NULL;
} }
static inline void
u_free(void *addr)
{
kvfree(addr);
}
static inline void *
u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
size *= nmemb;
mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
if (copy_from_user(mem, userptr, size)) {
u_free(mem);
return ERR_PTR(-EFAULT);
}
return mem;
}
#include <nvif/object.h> #include <nvif/object.h>
#include <nvif/parent.h> #include <nvif/parent.h>
...@@ -222,6 +307,10 @@ struct nouveau_drm { ...@@ -222,6 +307,10 @@ struct nouveau_drm {
struct mutex lock; struct mutex lock;
bool component_registered; bool component_registered;
} audio; } audio;
struct drm_gpu_scheduler sched;
struct workqueue_struct *sched_wq;
}; };
static inline struct nouveau_drm * static inline struct nouveau_drm *
......
This diff is collapsed.
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_EXEC_H__
#define __NOUVEAU_EXEC_H__
#include <drm/drm_exec.h>
#include "nouveau_drv.h"
#include "nouveau_sched.h"
struct nouveau_exec_job_args {
struct drm_file *file_priv;
struct nouveau_sched_entity *sched_entity;
struct drm_exec exec;
struct nouveau_channel *chan;
struct {
struct drm_nouveau_sync *s;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} out_sync;
struct {
struct drm_nouveau_exec_push *s;
u32 count;
} push;
};
struct nouveau_exec_job {
struct nouveau_job base;
struct nouveau_fence *fence;
struct nouveau_channel *chan;
struct {
struct drm_nouveau_exec_push *s;
u32 count;
} push;
};
#define to_nouveau_exec_job(job) \
container_of((job), struct nouveau_exec_job, base)
int nouveau_exec_job_init(struct nouveau_exec_job **job,
struct nouveau_exec_job_args *args);
int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif
This diff is collapsed.
...@@ -17,8 +17,7 @@ struct nouveau_fence { ...@@ -17,8 +17,7 @@ struct nouveau_fence {
unsigned long timeout; unsigned long timeout;
}; };
int nouveau_fence_new(struct nouveau_channel *, bool sysmem, int nouveau_fence_new(struct nouveau_fence **);
struct nouveau_fence **);
void nouveau_fence_unref(struct nouveau_fence **); void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
...@@ -45,7 +44,7 @@ struct nouveau_fence_chan { ...@@ -45,7 +44,7 @@ struct nouveau_fence_chan {
char name[32]; char name[32];
struct nvif_event event; struct nvif_event event;
int notify_ref, dead; int notify_ref, dead, killed;
}; };
struct nouveau_fence_priv { struct nouveau_fence_priv {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ...@@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment