Commit 9e994444 authored by Ben Skeggs's avatar Ben Skeggs Committed by Dave Airlie

drm/nouveau/disp/r535: initial support

Adds support for modesetting on RM.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230918202149.4343-38-skeggsb@gmail.com
parent 5bf02571
...@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore) ...@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version; int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **); int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = { } cores[] = {
{ AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new }, { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new }, { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new }, { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
......
...@@ -1592,6 +1592,146 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st ...@@ -1592,6 +1592,146 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
nv_encoder->crtc = NULL; nv_encoder->crtc = NULL;
} }
// common/inc/displayport/displayport.h
#define DP_CONFIG_WATERMARK_ADJUST 2
#define DP_CONFIG_WATERMARK_LIMIT 20
#define DP_CONFIG_INCREASED_WATERMARK_ADJUST 8
#define DP_CONFIG_INCREASED_WATERMARK_LIMIT 22
static bool
nv50_sor_dp_watermark_sst(struct nouveau_encoder *outp,
struct nv50_head *head, struct nv50_head_atom *asyh)
{
bool enhancedFraming = outp->dp.dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP;
u64 minRate = outp->dp.link_bw * 1000;
unsigned tuSize = 64;
unsigned waterMark;
unsigned hBlankSym;
unsigned vBlankSym;
unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST;
unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT;
// depth is multiplied by 16 in case of DSC enable
s32 hblank_symbols;
// number of link clocks per line.
int vblank_symbols = 0;
bool bEnableDsc = false;
unsigned surfaceWidth = asyh->mode.h.blanks - asyh->mode.h.blanke;
unsigned rasterWidth = asyh->mode.h.active;
unsigned depth = asyh->or.bpc * 3;
unsigned DSC_FACTOR = bEnableDsc ? 16 : 1;
u64 pixelClockHz = asyh->mode.clock * 1000;
u64 PrecisionFactor = 100000, ratioF, watermarkF;
u32 numLanesPerLink = outp->dp.link_nr;
u32 numSymbolsPerLine;
u32 BlankingBits;
u32 surfaceWidthPerLink;
u32 PixelSteeringBits;
u64 NumBlankingLinkClocks;
u32 MinHBlank;
if (outp->outp.info.dp.increased_wm) {
watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST;
watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT;
}
if ((pixelClockHz * depth) >= (8 * minRate * outp->dp.link_nr * DSC_FACTOR))
{
return false;
}
//
// For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with
// 0 active symbols. This may cause HW hang. Bug 200379426
//
if ((bEnableDsc) &&
((pixelClockHz * depth) < ((8 * minRate * outp->dp.link_nr * DSC_FACTOR) / 64)))
{
return false;
}
//
// Perform the SST calculation.
// For auto mode the watermark calculation does not need to track accumulated error the
// formulas for manual mode will not work. So below calculation was extracted from the DTB.
//
ratioF = ((u64)pixelClockHz * depth * PrecisionFactor) / DSC_FACTOR;
ratioF /= 8 * (u64) minRate * outp->dp.link_nr;
if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below
return false;
watermarkF = ratioF * tuSize * (PrecisionFactor - ratioF) / PrecisionFactor;
waterMark = (unsigned)(watermarkAdjust + ((2 * (depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor));
//
// Bounds check the watermark
//
numSymbolsPerLine = (surfaceWidth * depth) / (8 * outp->dp.link_nr * DSC_FACTOR);
if (WARN_ON(waterMark > 39 || waterMark > numSymbolsPerLine))
return false;
//
// Clamp the low side
//
if (waterMark < watermarkMinimum)
waterMark = watermarkMinimum;
//Bits to send BS/BE/Extra symbols due to pixel padding
//Also accounts for enhanced framing.
BlankingBits = 3*8*numLanesPerLink + (enhancedFraming ? 3*8*numLanesPerLink : 0);
//VBID/MVID/MAUD sent 4 times all the time
BlankingBits += 3*8*4;
surfaceWidthPerLink = surfaceWidth;
//Extra bits sent due to pixel steering
PixelSteeringBits = (surfaceWidthPerLink % numLanesPerLink) ? (((numLanesPerLink - surfaceWidthPerLink % numLanesPerLink) * depth) / DSC_FACTOR) : 0;
BlankingBits += PixelSteeringBits;
NumBlankingLinkClocks = (u64)BlankingBits * PrecisionFactor / (8 * numLanesPerLink);
MinHBlank = (u32)(NumBlankingLinkClocks * pixelClockHz/ minRate / PrecisionFactor);
MinHBlank += 12;
if (WARN_ON(MinHBlank > rasterWidth - surfaceWidth))
return false;
// Bug 702290 - Active Width should be greater than 60
if (WARN_ON(surfaceWidth <= 60))
return false;
hblank_symbols = (s32)(((u64)(rasterWidth - surfaceWidth - MinHBlank) * minRate) / pixelClockHz);
//reduce HBlank Symbols to account for secondary data packet
hblank_symbols -= 1; //Stuffer latency to send BS
hblank_symbols -= 3; //SPKT latency to send data to stuffer
hblank_symbols -= numLanesPerLink == 1 ? 9 : numLanesPerLink == 2 ? 6 : 3;
hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols;
// Refer to dev_disp.ref for more information.
// # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1;
// where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
if (surfaceWidth < 40)
{
vblank_symbols = 0;
}
else
{
vblank_symbols = (s32)(((u64)(surfaceWidth - 40) * minRate) / pixelClockHz) - 1;
vblank_symbols -= numLanesPerLink == 1 ? 39 : numLanesPerLink == 2 ? 21 : 12;
}
vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols;
return nvif_outp_dp_sst(&outp->outp, head->base.index, waterMark, hBlankSym, vBlankSym);
}
static void static void
nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{ {
...@@ -1679,6 +1819,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta ...@@ -1679,6 +1819,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
break; break;
case DCB_OUTPUT_DP: case DCB_OUTPUT_DP:
nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc); nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
nv50_sor_dp_watermark_sst(nv_encoder, head, asyh);
depth = nv50_dp_bpc_to_depth(asyh->or.bpc); depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->outp.or.link & 1) if (nv_encoder->outp.or.link & 1)
......
...@@ -104,6 +104,7 @@ ...@@ -104,6 +104,7 @@
#define GV100_DISP /* if0010.h */ 0x0000c370 #define GV100_DISP /* if0010.h */ 0x0000c370
#define TU102_DISP /* if0010.h */ 0x0000c570 #define TU102_DISP /* if0010.h */ 0x0000c570
#define GA102_DISP /* if0010.h */ 0x0000c670 #define GA102_DISP /* if0010.h */ 0x0000c670
#define AD102_DISP /* if0010.h */ 0x0000c770
#define GV100_DISP_CAPS 0x0000c373 #define GV100_DISP_CAPS 0x0000c373
...@@ -154,6 +155,7 @@ ...@@ -154,6 +155,7 @@
#define GV100_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c37d #define GV100_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d #define TU102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d #define GA102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c67d
#define AD102_DISP_CORE_CHANNEL_DMA /* if0014.h */ 0x0000c77d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e #define NV50_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e #define G82_DISP_OVERLAY_CHANNEL_DMA /* if0014.h */ 0x0000827e
......
...@@ -5,11 +5,29 @@ ...@@ -5,11 +5,29 @@
#include <core/engine.h> #include <core/engine.h>
#include <core/object.h> #include <core/object.h>
#include <core/event.h> #include <core/event.h>
#include <subdev/gsp.h>
struct nvkm_disp { struct nvkm_disp {
const struct nvkm_disp_func *func; const struct nvkm_disp_func *func;
struct nvkm_engine engine; struct nvkm_engine engine;
struct {
struct nvkm_gsp_client client;
struct nvkm_gsp_device device;
struct nvkm_gsp_object objcom;
struct nvkm_gsp_object object;
#define NVKM_DPYID_PLUG BIT(0)
#define NVKM_DPYID_UNPLUG BIT(1)
#define NVKM_DPYID_IRQ BIT(2)
struct nvkm_event event;
struct nvkm_gsp_event hpd;
struct nvkm_gsp_event irq;
u32 assigned_sors;
} rm;
struct list_head heads; struct list_head heads;
struct list_head iors; struct list_head iors;
struct list_head outps; struct list_head outps;
...@@ -69,4 +87,5 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct ...@@ -69,4 +87,5 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **); int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
#endif #endif
...@@ -23,6 +23,9 @@ void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *); ...@@ -23,6 +23,9 @@ void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *);
typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc); typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp_event;
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
struct nvkm_gsp { struct nvkm_gsp {
const struct nvkm_gsp_func *func; const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
...@@ -150,6 +153,8 @@ struct nvkm_gsp { ...@@ -150,6 +153,8 @@ struct nvkm_gsp {
} object; } object;
struct nvkm_gsp *gsp; struct nvkm_gsp *gsp;
struct list_head events;
} client; } client;
struct nvkm_gsp_device { struct nvkm_gsp_device {
...@@ -191,6 +196,10 @@ struct nvkm_gsp { ...@@ -191,6 +196,10 @@ struct nvkm_gsp {
int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *); int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
void (*device_dtor)(struct nvkm_gsp_device *); void (*device_dtor)(struct nvkm_gsp_device *);
int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
nvkm_gsp_event_func, struct nvkm_gsp_event *);
void (*event_dtor)(struct nvkm_gsp_event *);
} *rm; } *rm;
struct { struct {
...@@ -399,6 +408,32 @@ nvkm_gsp_client_device_ctor(struct nvkm_gsp *gsp, ...@@ -399,6 +408,32 @@ nvkm_gsp_client_device_ctor(struct nvkm_gsp *gsp,
return ret; return ret;
} }
struct nvkm_gsp_event {
struct nvkm_gsp_device *device;
u32 id;
nvkm_gsp_event_func func;
struct nvkm_gsp_object object;
struct list_head head;
};
static inline int
nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
{
return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
}
static inline void
nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
{
struct nvkm_gsp_device *device = event->device;
if (device)
device->object.client->gsp->rm->event_dtor(event);
}
int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int); int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int); int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
......
#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
#define __src_common_sdk_nvidia_inc_class_cl0005_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct NV0005_ALLOC_PARAMETERS {
NvHandle hParentClient;
NvHandle hSrcResource;
NvV32 hClass;
NvV32 notifyIndex;
NV_DECLARE_ALIGNED(NvP64 data, 8);
} NV0005_ALLOC_PARAMETERS;
#endif
#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NV2080_NOTIFIERS_HOTPLUG (1)
#define NV2080_NOTIFIERS_DP_IRQ (7)
typedef struct {
NvU32 plugDisplayMask;
NvU32 unplugDisplayMask;
} Nv2080HotplugNotification;
typedef struct Nv2080DpIrqNotificationRec {
NvU32 displayId;
} Nv2080DpIrqNotification;
#endif
#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
NvBool bDscSupported;
NvU32 encoderColorFormatMask;
NvU32 lineBufferSizeKB;
NvU32 rateBufferSizeKB;
NvU32 bitsPerPixelPrecision;
NvU32 maxNumHztSlices;
NvU32 lineBufferBitDepth;
} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
#endif
...@@ -26,6 +26,40 @@ ...@@ -26,6 +26,40 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
NvU32 subDeviceInstance;
NvU32 flags;
NvU32 numHeads;
} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
NvU32 subDeviceInstance;
NvU32 displayMask;
NvU32 displayMaskDDC;
} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
NvU32 subDeviceInstance;
NvU32 flags;
NvU32 displayMask;
NvU32 retryTimeMs;
} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
NvU32 subDeviceInstance;
NvU32 head;
NvU32 flags;
NvU32 displayId;
} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U) #define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
#endif #endif
#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
NvU32 event;
NvU32 action;
NvBool bNotifyState;
NvU32 info32;
NvU16 info16;
} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
#endif
...@@ -26,6 +26,39 @@ ...@@ -26,6 +26,39 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
NvU32 feHwSysCap;
NvU32 windowPresentMask;
NvBool bFbRemapperEnabled;
NvU32 numHeads;
NvBool bPrimaryVga;
NvU32 i2cPort;
NvU32 internalDispActiveMask;
} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
NvU32 instMemAddrSpace;
NvU32 instMemCpuCacheAttr;
} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
NvU32 addressSpace;
NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
NV_DECLARE_ALIGNED(NvU64 limit, 8);
NvU32 cacheSnoop;
NvU32 hclass;
NvU32 channelInstance;
NvBool valid;
} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ #define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 #define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128
...@@ -81,4 +114,14 @@ typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS { ...@@ -81,4 +114,14 @@ typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
NV_DECLARE_ALIGNED(NvU64 size, 8); NV_DECLARE_ALIGNED(NvU64 size, 8);
} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS; } NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_ACPI_DSM_READ_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */
typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
NvU32 status;
NvU16 backLightDataSize;
NvU8 backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
#endif #endif
...@@ -78,6 +78,33 @@ ...@@ -78,6 +78,33 @@
#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) #define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001)
#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) #define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002)
#define NV01_EVENT_CLIENT_RM (0x04000000)
typedef struct
{
NvV32 channelInstance; // One of the n channel instances of a given channel type.
// Note that core channel has only one instance
// while all others have two (one per head).
NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer
NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
NvU32 offset; // Initial offset for put/get, usually zero.
NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
NvU32 flags;
#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1
#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000
#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001
} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
typedef struct
{
NvV32 channelInstance; // One of the n channel instances of a given channel type.
// All PIO channels have two instances (one per head).
NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
typedef struct typedef struct
{ {
NvU32 index; NvU32 index;
......
#ifndef __src_nvidia_generated_g_allclasses_h__
#define __src_nvidia_generated_g_allclasses_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e)
#define NV04_DISPLAY_COMMON (0x00000073)
#endif
#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
#define __src_nvidia_generated_g_mem_desc_nvoc_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define ADDR_SYSMEM 1 // System memory (PCI)
#define ADDR_FBMEM 2 // Frame buffer memory space
#endif
...@@ -91,6 +91,19 @@ typedef struct rpc_run_cpu_sequencer_v17_00 ...@@ -91,6 +91,19 @@ typedef struct rpc_run_cpu_sequencer_v17_00
NvU32 commandBuffer[]; NvU32 commandBuffer[];
} rpc_run_cpu_sequencer_v17_00; } rpc_run_cpu_sequencer_v17_00;
typedef struct rpc_post_event_v17_00
{
NvHandle hClient;
NvHandle hEvent;
NvU32 notifyIndex;
NvU32 data;
NvU16 info16;
NvU32 status;
NvU32 eventDataSize;
NvBool bNotifyList;
NvU8 eventData[];
} rpc_post_event_v17_00;
typedef struct rpc_os_error_log_v17_00 typedef struct rpc_os_error_log_v17_00
{ {
NvU32 exceptType; NvU32 exceptType;
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#define MC_ENGINE_IDX_DISP 2
#define MC_ENGINE_IDX_GSP 49 #define MC_ENGINE_IDX_GSP 49
#endif #endif
#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NV_MEMORY_WRITECOMBINED 2
#endif
...@@ -36,6 +36,7 @@ int ...@@ -36,6 +36,7 @@ int
nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp) nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
{ {
static const struct nvif_mclass disps[] = { static const struct nvif_mclass disps[] = {
{ AD102_DISP, 0 },
{ GA102_DISP, 0 }, { GA102_DISP, 0 },
{ TU102_DISP, 0 }, { TU102_DISP, 0 },
{ GV100_DISP, 0 }, { GV100_DISP, 0 },
......
...@@ -2758,6 +2758,8 @@ nv192_chipset = { ...@@ -2758,6 +2758,8 @@ nv192_chipset = {
.pci = { 0x00000001, gp100_pci_new }, .pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new }, .timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new }, .vfn = { 0x00000001, ga100_vfn_new },
.disp = { 0x00000001, ad102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.sec2 = { 0x00000001, ga102_sec2_new }, .sec2 = { 0x00000001, ga102_sec2_new },
}; };
...@@ -2775,6 +2777,8 @@ nv193_chipset = { ...@@ -2775,6 +2777,8 @@ nv193_chipset = {
.pci = { 0x00000001, gp100_pci_new }, .pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new }, .timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new }, .vfn = { 0x00000001, ga100_vfn_new },
.disp = { 0x00000001, ad102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.sec2 = { 0x00000001, ga102_sec2_new }, .sec2 = { 0x00000001, ga102_sec2_new },
}; };
...@@ -2792,6 +2796,8 @@ nv194_chipset = { ...@@ -2792,6 +2796,8 @@ nv194_chipset = {
.pci = { 0x00000001, gp100_pci_new }, .pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new }, .timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new }, .vfn = { 0x00000001, ga100_vfn_new },
.disp = { 0x00000001, ad102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.sec2 = { 0x00000001, ga102_sec2_new }, .sec2 = { 0x00000001, ga102_sec2_new },
}; };
...@@ -2809,6 +2815,8 @@ nv196_chipset = { ...@@ -2809,6 +2815,8 @@ nv196_chipset = {
.pci = { 0x00000001, gp100_pci_new }, .pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new }, .timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new }, .vfn = { 0x00000001, ga100_vfn_new },
.disp = { 0x00000001, ad102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.sec2 = { 0x00000001, ga102_sec2_new }, .sec2 = { 0x00000001, ga102_sec2_new },
}; };
...@@ -2826,6 +2834,8 @@ nv197_chipset = { ...@@ -2826,6 +2834,8 @@ nv197_chipset = {
.pci = { 0x00000001, gp100_pci_new }, .pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new }, .timer = { 0x00000001, gk20a_timer_new },
.vfn = { 0x00000001, ga100_vfn_new }, .vfn = { 0x00000001, ga100_vfn_new },
.disp = { 0x00000001, ad102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.sec2 = { 0x00000001, ga102_sec2_new }, .sec2 = { 0x00000001, ga102_sec2_new },
}; };
......
...@@ -27,6 +27,9 @@ nvkm-y += nvkm/engine/disp/gp102.o ...@@ -27,6 +27,9 @@ nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o nvkm-y += nvkm/engine/disp/gv100.o
nvkm-y += nvkm/engine/disp/tu102.o nvkm-y += nvkm/engine/disp/tu102.o
nvkm-y += nvkm/engine/disp/ga102.o nvkm-y += nvkm/engine/disp/ga102.o
nvkm-y += nvkm/engine/disp/ad102.o
nvkm-y += nvkm/engine/disp/r535.o
nvkm-y += nvkm/engine/disp/udisp.o nvkm-y += nvkm/engine/disp/udisp.o
nvkm-y += nvkm/engine/disp/uconn.o nvkm-y += nvkm/engine/disp/uconn.o
......
/*
* Copyright 2023 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include "chan.h"
#include <subdev/gsp.h>
#include <nvif/class.h>
static const struct nvkm_disp_func
ad102_disp = {
.uevent = &gv100_disp_chan_uevent,
.ramht_size = 0x2000,
.root = { 0, 0,AD102_DISP },
.user = {
{{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{ 0, 0,GA102_DISP_CURSOR }, nvkm_disp_chan_new, &gv100_disp_curs },
{{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
{{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gv100_disp_core },
{{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA }, nvkm_disp_wndw_new, &gv100_disp_wndw },
{}
},
};
int
ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp)
{
if (nvkm_gsp_rm(device->gsp))
return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
return -ENODEV;
}
...@@ -137,7 +137,8 @@ nvkm_disp_init(struct nvkm_engine *engine) ...@@ -137,7 +137,8 @@ nvkm_disp_init(struct nvkm_engine *engine)
* each output resource to 'fully enabled'. * each output resource to 'fully enabled'.
*/ */
list_for_each_entry(ior, &disp->iors, head) { list_for_each_entry(ior, &disp->iors, head) {
ior->func->power(ior, true, true, true, true, true); if (ior->func->power)
ior->func->power(ior, true, true, true, true, true);
} }
return 0; return 0;
......
...@@ -22,6 +22,10 @@ struct nvkm_disp_chan { ...@@ -22,6 +22,10 @@ struct nvkm_disp_chan {
u64 push; u64 push;
u32 suspend_put; u32 suspend_put;
struct {
struct nvkm_gsp_object object;
} rm;
}; };
int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **); int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
......
...@@ -149,7 +149,7 @@ ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, ...@@ -149,7 +149,7 @@ ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp) struct nvkm_disp **pdisp)
{ {
if (nvkm_gsp_rm(device->gsp)) if (nvkm_gsp_rm(device->gsp))
return -ENODEV; return r535_disp_new(&ga102_disp, device, type, inst, pdisp);
return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp); return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp);
} }
...@@ -96,7 +96,7 @@ gv100_sor_dp = { ...@@ -96,7 +96,7 @@ gv100_sor_dp = {
.watermark = gv100_sor_dp_watermark, .watermark = gv100_sor_dp_watermark,
}; };
static void void
gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size) gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
{ {
struct nvkm_device *device = ior->disp->engine.subdev.device; struct nvkm_device *device = ior->disp->engine.subdev.device;
...@@ -120,7 +120,7 @@ gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 siz ...@@ -120,7 +120,7 @@ gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 siz
nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001); nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001);
} }
static void void
gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size) gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
{ {
struct nvkm_device *device = ior->disp->engine.subdev.device; struct nvkm_device *device = ior->disp->engine.subdev.device;
......
...@@ -187,6 +187,8 @@ int gp100_sor_new(struct nvkm_disp *, int); ...@@ -187,6 +187,8 @@ int gp100_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *); int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *); void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
extern const struct nvkm_ior_func_hdmi gv100_sor_hdmi; extern const struct nvkm_ior_func_hdmi gv100_sor_hdmi;
void gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *, int, void *, u32);
void gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *, int, void *, u32);
void gv100_sor_dp_audio(struct nvkm_ior *, int, bool); void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32); void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8); void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
......
...@@ -386,7 +386,8 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp, ...@@ -386,7 +386,8 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
outp->disp = disp; outp->disp = disp;
outp->index = index; outp->index = index;
outp->info = *dcbE; outp->info = *dcbE;
outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); if (!disp->rm.client.gsp)
outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
"edid %x bus %d head %x", "edid %x bus %d head %x",
......
...@@ -8,6 +8,9 @@ struct nvkm_head; ...@@ -8,6 +8,9 @@ struct nvkm_head;
struct nvkm_outp; struct nvkm_outp;
struct dcb_output; struct dcb_output;
int r535_disp_new(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_disp **);
int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_disp *); struct nvkm_disp *);
int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int, int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
......
This diff is collapsed.
...@@ -235,7 +235,7 @@ tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, ...@@ -235,7 +235,7 @@ tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_disp **pdisp) struct nvkm_disp **pdisp)
{ {
if (nvkm_gsp_rm(device->gsp)) if (nvkm_gsp_rm(device->gsp))
return -ENODEV; return r535_disp_new(&tu102_disp, device, type, inst, pdisp);
return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp); return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp);
} }
...@@ -30,6 +30,23 @@ ...@@ -30,6 +30,23 @@
#include <nvif/if0011.h> #include <nvif/if0011.h>
static int
nvkm_uconn_uevent_gsp(struct nvkm_object *object, u64 token, u32 bits)
{
union nvif_conn_event_args args;
args.v0.version = 0;
args.v0.types = 0;
if (bits & NVKM_DPYID_PLUG)
args.v0.types |= NVIF_CONN_EVENT_V0_PLUG;
if (bits & NVKM_DPYID_UNPLUG)
args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG;
if (bits & NVKM_DPYID_IRQ)
args.v0.types |= NVIF_CONN_EVENT_V0_IRQ;
return object->client->event(token, &args, sizeof(args.v0));
}
static int static int
nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits) nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits)
{ {
...@@ -78,13 +95,14 @@ static int ...@@ -78,13 +95,14 @@ static int
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent) nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{ {
struct nvkm_conn *conn = nvkm_uconn(object); struct nvkm_conn *conn = nvkm_uconn(object);
struct nvkm_device *device = conn->disp->engine.subdev.device; struct nvkm_disp *disp = conn->disp;
struct nvkm_device *device = disp->engine.subdev.device;
struct nvkm_outp *outp; struct nvkm_outp *outp;
union nvif_conn_event_args *args = argv; union nvif_conn_event_args *args = argv;
u64 bits = 0; u64 bits = 0;
if (!uevent) { if (!uevent) {
if (conn->info.hpd == DCB_GPIO_UNUSED) if (!disp->rm.client.gsp && conn->info.hpd == DCB_GPIO_UNUSED)
return -ENOSYS; return -ENOSYS;
return 0; return 0;
} }
...@@ -100,6 +118,15 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_ ...@@ -100,6 +118,15 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
if (&outp->head == &conn->disp->outps) if (&outp->head == &conn->disp->outps)
return -EINVAL; return -EINVAL;
if (disp->rm.client.gsp) {
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_DPYID_PLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_DPYID_UNPLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ ) bits |= NVKM_DPYID_IRQ;
return nvkm_uevent_add(uevent, &disp->rm.event, outp->index, bits,
nvkm_uconn_uevent_gsp);
}
if (outp->dp.aux && !outp->info.location) { if (outp->dp.aux && !outp->info.location) {
if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG; if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG ) bits |= NVKM_I2C_PLUG;
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG; if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
......
...@@ -30,16 +30,20 @@ ...@@ -30,16 +30,20 @@
#include <nvrm/nvtypes.h> #include <nvrm/nvtypes.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl0000.h> #include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl0000.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl0005.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl0080.h> #include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl0080.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl2080.h> #include <nvrm/535.54.03/common/sdk/nvidia/inc/class/cl2080.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> #include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> #include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
#include <nvrm/535.54.03/common/sdk/nvidia/inc/nvos.h>
#include <nvrm/535.54.03/common/shared/msgq/inc/msgq/msgq_priv.h> #include <nvrm/535.54.03/common/shared/msgq/inc/msgq/msgq_priv.h>
#include <nvrm/535.54.03/common/uproc/os/common/include/libos_init_args.h> #include <nvrm/535.54.03/common/uproc/os/common/include/libos_init_args.h>
#include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> #include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
#include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> #include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
#include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> #include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
#include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/rmgspseq.h> #include <nvrm/535.54.03/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
#include <nvrm/535.54.03/nvidia/generated/g_allclasses.h>
#include <nvrm/535.54.03/nvidia/generated/g_os_nvoc.h> #include <nvrm/535.54.03/nvidia/generated/g_os_nvoc.h>
#include <nvrm/535.54.03/nvidia/generated/g_rpc-structures.h> #include <nvrm/535.54.03/nvidia/generated/g_rpc-structures.h>
#include <nvrm/535.54.03/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> #include <nvrm/535.54.03/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
...@@ -360,6 +364,81 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) ...@@ -360,6 +364,81 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
return repv; return repv;
} }
static void
r535_gsp_event_dtor(struct nvkm_gsp_event *event)
{
struct nvkm_gsp_device *device = event->device;
struct nvkm_gsp_client *client = device->object.client;
struct nvkm_gsp *gsp = client->gsp;
mutex_lock(&gsp->client_id.mutex);
if (event->func) {
list_del(&event->head);
event->func = NULL;
}
mutex_unlock(&gsp->client_id.mutex);
nvkm_gsp_rm_free(&event->object);
event->device = NULL;
}
static int
r535_gsp_device_event_get(struct nvkm_gsp_event *event)
{
struct nvkm_gsp_device *device = event->device;
NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
ctrl->event = event->id;
ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
}
static int
r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
{
struct nvkm_gsp_client *client = device->object.client;
struct nvkm_gsp *gsp = client->gsp;
NV0005_ALLOC_PARAMETERS *args;
int ret;
args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
&event->object);
if (IS_ERR(args))
return PTR_ERR(args);
args->hParentClient = client->object.handle;
args->hSrcResource = 0;
args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
args->data = NULL;
ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
if (ret)
return ret;
event->device = device;
event->id = id;
ret = r535_gsp_device_event_get(event);
if (ret) {
nvkm_gsp_event_dtor(event);
return ret;
}
mutex_lock(&gsp->client_id.mutex);
event->func = func;
list_add(&event->head, &client->events);
mutex_unlock(&gsp->client_id.mutex);
return 0;
}
static void static void
r535_gsp_device_dtor(struct nvkm_gsp_device *device) r535_gsp_device_dtor(struct nvkm_gsp_device *device)
{ {
...@@ -428,6 +507,7 @@ r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) ...@@ -428,6 +507,7 @@ r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
client->gsp = gsp; client->gsp = gsp;
client->object.client = client; client->object.client = client;
INIT_LIST_HEAD(&client->events);
args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
&client->object); &client->object);
...@@ -690,6 +770,9 @@ r535_gsp_rm = { ...@@ -690,6 +770,9 @@ r535_gsp_rm = {
.device_ctor = r535_gsp_device_ctor, .device_ctor = r535_gsp_device_ctor,
.device_dtor = r535_gsp_device_dtor, .device_dtor = r535_gsp_device_dtor,
.event_ctor = r535_gsp_device_event_ctor,
.event_dtor = r535_gsp_event_dtor,
}; };
static void static void
...@@ -763,6 +846,10 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp) ...@@ -763,6 +846,10 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
type = NVKM_SUBDEV_GSP; type = NVKM_SUBDEV_GSP;
inst = 0; inst = 0;
break; break;
case MC_ENGINE_IDX_DISP:
type = NVKM_ENGINE_DISP;
inst = 0;
break;
default: default:
continue; continue;
} }
...@@ -1151,6 +1238,47 @@ r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) ...@@ -1151,6 +1238,47 @@ r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
return 0; return 0;
} }
static int
r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
{
struct nvkm_gsp *gsp = priv;
struct nvkm_gsp_client *client;
struct nvkm_subdev *subdev = &gsp->subdev;
rpc_post_event_v17_00 *msg = repv;
if (WARN_ON(repc < sizeof(*msg)))
return -EINVAL;
if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
return -EINVAL;
nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
msg->status, msg->eventDataSize, msg->bNotifyList);
mutex_lock(&gsp->client_id.mutex);
client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
if (client) {
struct nvkm_gsp_event *event;
bool handled = false;
list_for_each_entry(event, &client->events, head) {
if (event->object.handle == msg->hEvent) {
event->func(event, msg->eventData, msg->eventDataSize);
handled = true;
}
}
if (!handled) {
nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
msg->hClient, msg->hEvent);
}
} else {
nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
}
mutex_unlock(&gsp->client_id.mutex);
return 0;
}
static int static int
r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{ {
...@@ -1872,6 +2000,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp) ...@@ -1872,6 +2000,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
r535_gsp_msg_run_cpu_sequencer, gsp); r535_gsp_msg_run_cpu_sequencer, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp); r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment