Commit f074037a authored by Joonas Lahtinen's avatar Joonas Lahtinen

Merge drm-next into drm-intel-next-queued

To pull in the HDCP changes, especially wait_for changes to drm/i915
that Chris wants to build on top of.
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parents 73c0fcac b8a89f53
...@@ -471,13 +471,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, ...@@ -471,13 +471,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
unsigned long timeout) unsigned long timeout)
{ {
struct dma_fence *fence; struct dma_fence *fence;
unsigned seq, shared_count, i = 0; unsigned seq, shared_count;
long ret = timeout ? timeout : 1; long ret = timeout ? timeout : 1;
int i;
retry: retry:
shared_count = 0; shared_count = 0;
seq = read_seqcount_begin(&obj->seq); seq = read_seqcount_begin(&obj->seq);
rcu_read_lock(); rcu_read_lock();
i = -1;
fence = rcu_dereference(obj->fence_excl); fence = rcu_dereference(obj->fence_excl);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
...@@ -493,14 +495,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, ...@@ -493,14 +495,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
fence = NULL; fence = NULL;
} }
if (!fence && wait_all) { if (wait_all) {
struct reservation_object_list *fobj = struct reservation_object_list *fobj =
rcu_dereference(obj->fence); rcu_dereference(obj->fence);
if (fobj) if (fobj)
shared_count = fobj->shared_count; shared_count = fobj->shared_count;
for (i = 0; i < shared_count; ++i) { for (i = 0; !fence && i < shared_count; ++i) {
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
......
...@@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc) ...@@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc)
{ {
struct ast_private *ast = crtc->dev->dev_private; struct ast_private *ast = crtc->dev->dev_private;
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
ast_crtc_load_lut(crtc);
} }
......
...@@ -230,6 +230,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, ...@@ -230,6 +230,12 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
if (!dev->master) if (!dev->master)
goto out_unlock; goto out_unlock;
if (file_priv->master->lessor != NULL) {
DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id);
ret = -EINVAL;
goto out_unlock;
}
ret = 0; ret = 0;
drm_drop_master(dev, file_priv); drm_drop_master(dev, file_priv);
out_unlock: out_unlock:
......
...@@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) ...@@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
ret = PTR_ERR(dmabuf); ret = PTR_ERR(dmabuf);
goto out_free_gem; goto out_free_gem;
} }
obj->base.dma_buf = dmabuf;
i915_gem_object_put(obj); i915_gem_object_put(obj);
......
...@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, ...@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct intel_vgpu *vgpu = spt->vgpu; struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s; struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge; struct intel_gvt_gtt_entry se, ge;
unsigned long i; unsigned long gfn, i;
int ret; int ret;
trace_spt_change(spt->vgpu->id, "born", spt, trace_spt_change(spt->vgpu->id, "born", spt,
...@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
if (gtt_type_is_pte_pt(spt->shadow_page.type)) { if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) { for_each_present_guest_entry(spt, &ge, i) {
ret = gtt_entry_p2m(vgpu, &ge, &se); gfn = ops->get_pfn(&ge);
if (ret) if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
goto fail; gtt_entry_p2m(vgpu, &ge, &se))
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i); ppgtt_set_shadow_entry(spt, &se, i);
} }
return 0; return 0;
...@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma; unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m; struct intel_gvt_gtt_entry e, m;
int ret; int ret;
...@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
bytes); bytes);
if (ops->test_present(&e)) { if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
ret = gtt_entry_p2m(vgpu, &e, &m); ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) { if (ret) {
gvt_vgpu_err("fail to translate guest gtt entry\n"); gvt_vgpu_err("fail to translate guest gtt entry\n");
...@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn); ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} }
out:
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
gtt_invalidate(gvt->dev_priv); gtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
......
...@@ -2843,6 +2843,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2843,6 +2843,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
......
...@@ -58,6 +58,7 @@ struct intel_gvt_mpt { ...@@ -58,6 +58,7 @@ struct intel_gvt_mpt {
int (*set_opregion)(void *vgpu); int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu); int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu);
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
}; };
extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt xengt_mpt;
......
...@@ -1570,6 +1570,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) ...@@ -1570,6 +1570,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
return PFN_DOWN(__pa(addr)); return PFN_DOWN(__pa(addr));
} }
static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
if (!handle_valid(handle))
return false;
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
return kvm_is_visible_gfn(kvm, gfn);
}
struct intel_gvt_mpt kvmgt_mpt = { struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
...@@ -1585,6 +1600,7 @@ struct intel_gvt_mpt kvmgt_mpt = { ...@@ -1585,6 +1600,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
.set_opregion = kvmgt_set_opregion, .set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device, .get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device, .put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn,
}; };
EXPORT_SYMBOL_GPL(kvmgt_mpt); EXPORT_SYMBOL_GPL(kvmgt_mpt);
......
...@@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { ...@@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{ /* Terminated */ } {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
}; };
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
...@@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { ...@@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
{ /* Terminated */ } {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
}; };
static struct { static struct {
...@@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
}; };
int ring_id, i; int ring_id, i;
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
gen9_render_mocs.control_table[ring_id][i] = gen9_render_mocs.control_table[ring_id][i] =
...@@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
switch_mocs(pre, next, ring_id); switch_mocs(pre, next, ring_id);
mmio = dev_priv->gvt->engine_mmio_list; for (mmio = dev_priv->gvt->engine_mmio_list;
while (i915_mmio_reg_offset((mmio++)->reg)) { i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id) if (mmio->ring_id != ring_id)
continue; continue;
// save // save
......
...@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) ...@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
intel_gvt_host.mpt->put_vfio_device(vgpu); intel_gvt_host.mpt->put_vfio_device(vgpu);
} }
/**
* intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
* @vgpu: a vGPU
* @gfn: guest PFN
*
* Returns:
* true on valid gfn, false on not.
*/
static inline bool intel_gvt_hypervisor_is_valid_gfn(
struct intel_vgpu *vgpu, unsigned long gfn)
{
if (!intel_gvt_host.mpt->is_valid_gfn)
return true;
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
}
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
...@@ -308,8 +308,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) ...@@ -308,8 +308,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt;
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
kfree(vgpu->sched_data); kfree(vgpu->sched_data);
vgpu->sched_data = NULL; vgpu->sched_data = NULL;
/* this vgpu id has been removed */
if (idr_is_empty(&gvt->vgpu_idr))
hrtimer_cancel(&sched_data->timer);
} }
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
......
...@@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_debugfs_remove_vgpu(vgpu); intel_gvt_debugfs_remove_vgpu(vgpu);
idr_remove(&gvt->vgpu_idr, vgpu->id); idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
......
...@@ -75,6 +75,7 @@ int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **); ...@@ -75,6 +75,7 @@ int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
......
...@@ -60,6 +60,7 @@ int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long); ...@@ -60,6 +60,7 @@ int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif #endif
...@@ -46,6 +46,16 @@ enum nvkm_therm_attr_type { ...@@ -46,6 +46,16 @@ enum nvkm_therm_attr_type {
NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
}; };
struct nvkm_therm_clkgate_init {
u32 addr;
u8 count;
u32 data;
};
struct nvkm_therm_clkgate_pack {
const struct nvkm_therm_clkgate_init *init;
};
struct nvkm_therm { struct nvkm_therm {
const struct nvkm_therm_func *func; const struct nvkm_therm_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
...@@ -85,17 +95,24 @@ struct nvkm_therm { ...@@ -85,17 +95,24 @@ struct nvkm_therm {
int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type); int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int); int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
bool clkgating_enabled;
}; };
int nvkm_therm_temp_get(struct nvkm_therm *); int nvkm_therm_temp_get(struct nvkm_therm *);
int nvkm_therm_fan_sense(struct nvkm_therm *); int nvkm_therm_fan_sense(struct nvkm_therm *);
int nvkm_therm_cstate(struct nvkm_therm *, int, int); int nvkm_therm_cstate(struct nvkm_therm *, int, int);
void nvkm_therm_clkgate_init(struct nvkm_therm *,
const struct nvkm_therm_clkgate_pack *);
void nvkm_therm_clkgate_enable(struct nvkm_therm *);
void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **); int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
......
...@@ -105,4 +105,32 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) ...@@ -105,4 +105,32 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr; return ioptr;
} }
static inline void
nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
{
if (*pnvbo) {
nouveau_bo_unmap(*pnvbo);
nouveau_bo_unpin(*pnvbo);
nouveau_bo_ref(NULL, pnvbo);
}
}
static inline int
nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
struct nouveau_bo **pnvbo)
{
int ret = nouveau_bo_new(cli, size, align, flags,
0, 0, NULL, NULL, pnvbo);
if (ret == 0) {
ret = nouveau_bo_pin(*pnvbo, flags, true);
if (ret == 0) {
ret = nouveau_bo_map(*pnvbo);
if (ret == 0)
return ret;
nouveau_bo_unpin(*pnvbo);
}
nouveau_bo_ref(NULL, pnvbo);
}
return ret;
}
#endif #endif
...@@ -60,7 +60,6 @@ struct nouveau_crtc { ...@@ -60,7 +60,6 @@ struct nouveau_crtc {
} cursor; } cursor;
struct { struct {
struct nouveau_bo *nvbo;
int depth; int depth;
} lut; } lut;
......
...@@ -56,6 +56,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); ...@@ -56,6 +56,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0; int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
static int nouveau_fbcon_bpp;
module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
static void static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{ {
...@@ -488,7 +492,7 @@ nouveau_fbcon_init(struct drm_device *dev) ...@@ -488,7 +492,7 @@ nouveau_fbcon_init(struct drm_device *dev)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_fbdev *fbcon; struct nouveau_fbdev *fbcon;
int preferred_bpp; int preferred_bpp = nouveau_fbcon_bpp;
int ret; int ret;
if (!dev->mode_config.num_crtc || if (!dev->mode_config.num_crtc ||
...@@ -512,13 +516,15 @@ nouveau_fbcon_init(struct drm_device *dev) ...@@ -512,13 +516,15 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret) if (ret)
goto fini; goto fini;
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
preferred_bpp = 8; if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
else preferred_bpp = 8;
if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) else
preferred_bpp = 16; if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
else preferred_bpp = 16;
preferred_bpp = 32; else
preferred_bpp = 32;
}
/* disable all the possible outputs/crtcs before entering KMS mode */ /* disable all the possible outputs/crtcs before entering KMS mode */
if (!drm_drv_uses_atomic_modeset(dev)) if (!drm_drv_uses_atomic_modeset(dev))
......
This diff is collapsed.
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <core/option.h> #include <core/option.h>
#include <subdev/bios.h> #include <subdev/bios.h>
#include <subdev/therm.h>
static DEFINE_MUTEX(nv_devices_mutex); static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices); static LIST_HEAD(nv_devices);
...@@ -1682,7 +1683,7 @@ nve4_chipset = { ...@@ -1682,7 +1683,7 @@ nve4_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk104_pmu_new, .pmu = gk104_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -1721,7 +1722,7 @@ nve6_chipset = { ...@@ -1721,7 +1722,7 @@ nve6_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk104_pmu_new, .pmu = gk104_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -1760,7 +1761,7 @@ nve7_chipset = { ...@@ -1760,7 +1761,7 @@ nve7_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk104_pmu_new, .pmu = gk104_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -1811,7 +1812,7 @@ nvf0_chipset = { ...@@ -1811,7 +1812,7 @@ nvf0_chipset = {
.bus = gf100_bus_new, .bus = gf100_bus_new,
.clk = gk104_clk_new, .clk = gk104_clk_new,
.devinit = gf100_devinit_new, .devinit = gf100_devinit_new,
.fb = gk104_fb_new, .fb = gk110_fb_new,
.fuse = gf100_fuse_new, .fuse = gf100_fuse_new,
.gpio = gk104_gpio_new, .gpio = gk104_gpio_new,
.i2c = gk104_i2c_new, .i2c = gk104_i2c_new,
...@@ -1824,7 +1825,7 @@ nvf0_chipset = { ...@@ -1824,7 +1825,7 @@ nvf0_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk110_pmu_new, .pmu = gk110_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -1849,7 +1850,7 @@ nvf1_chipset = { ...@@ -1849,7 +1850,7 @@ nvf1_chipset = {
.bus = gf100_bus_new, .bus = gf100_bus_new,
.clk = gk104_clk_new, .clk = gk104_clk_new,
.devinit = gf100_devinit_new, .devinit = gf100_devinit_new,
.fb = gk104_fb_new, .fb = gk110_fb_new,
.fuse = gf100_fuse_new, .fuse = gf100_fuse_new,
.gpio = gk104_gpio_new, .gpio = gk104_gpio_new,
.i2c = gk104_i2c_new, .i2c = gk104_i2c_new,
...@@ -1862,7 +1863,7 @@ nvf1_chipset = { ...@@ -1862,7 +1863,7 @@ nvf1_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk110_pmu_new, .pmu = gk110_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -1887,7 +1888,7 @@ nv106_chipset = { ...@@ -1887,7 +1888,7 @@ nv106_chipset = {
.bus = gf100_bus_new, .bus = gf100_bus_new,
.clk = gk104_clk_new, .clk = gk104_clk_new,
.devinit = gf100_devinit_new, .devinit = gf100_devinit_new,
.fb = gk104_fb_new, .fb = gk110_fb_new,
.fuse = gf100_fuse_new, .fuse = gf100_fuse_new,
.gpio = gk104_gpio_new, .gpio = gk104_gpio_new,
.i2c = gk104_i2c_new, .i2c = gk104_i2c_new,
...@@ -1900,7 +1901,7 @@ nv106_chipset = { ...@@ -1900,7 +1901,7 @@ nv106_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk208_pmu_new, .pmu = gk208_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -1925,7 +1926,7 @@ nv108_chipset = { ...@@ -1925,7 +1926,7 @@ nv108_chipset = {
.bus = gf100_bus_new, .bus = gf100_bus_new,
.clk = gk104_clk_new, .clk = gk104_clk_new,
.devinit = gf100_devinit_new, .devinit = gf100_devinit_new,
.fb = gk104_fb_new, .fb = gk110_fb_new,
.fuse = gf100_fuse_new, .fuse = gf100_fuse_new,
.gpio = gk104_gpio_new, .gpio = gk104_gpio_new,
.i2c = gk104_i2c_new, .i2c = gk104_i2c_new,
...@@ -1938,7 +1939,7 @@ nv108_chipset = { ...@@ -1938,7 +1939,7 @@ nv108_chipset = {
.mxm = nv50_mxm_new, .mxm = nv50_mxm_new,
.pci = gk104_pci_new, .pci = gk104_pci_new,
.pmu = gk208_pmu_new, .pmu = gk208_pmu_new,
.therm = gf119_therm_new, .therm = gk104_therm_new,
.timer = nv41_timer_new, .timer = nv41_timer_new,
.top = gk104_top_new, .top = gk104_top_new,
.volt = gk104_volt_new, .volt = gk104_volt_new,
...@@ -2345,6 +2346,7 @@ nv138_chipset = { ...@@ -2345,6 +2346,7 @@ nv138_chipset = {
.mc = gp100_mc_new, .mc = gp100_mc_new,
.mmu = gp100_mmu_new, .mmu = gp100_mmu_new,
.therm = gp100_therm_new, .therm = gp100_therm_new,
.secboot = gp108_secboot_new,
.pci = gp100_pci_new, .pci = gp100_pci_new,
.pmu = gp102_pmu_new, .pmu = gp102_pmu_new,
.timer = gk20a_timer_new, .timer = gk20a_timer_new,
...@@ -2356,6 +2358,10 @@ nv138_chipset = { ...@@ -2356,6 +2358,10 @@ nv138_chipset = {
.disp = gp102_disp_new, .disp = gp102_disp_new,
.dma = gf119_dma_new, .dma = gf119_dma_new,
.fifo = gp100_fifo_new, .fifo = gp100_fifo_new,
.gr = gp107_gr_new,
.nvdec = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
}; };
static const struct nvkm_device_chip static const struct nvkm_device_chip
...@@ -2508,6 +2514,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend) ...@@ -2508,6 +2514,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
} }
} }
nvkm_therm_clkgate_fini(device->therm, suspend);
if (device->func->fini) if (device->func->fini)
device->func->fini(device, suspend); device->func->fini(device, suspend);
...@@ -2597,6 +2604,7 @@ nvkm_device_init(struct nvkm_device *device) ...@@ -2597,6 +2604,7 @@ nvkm_device_init(struct nvkm_device *device)
} }
nvkm_acpi_init(device); nvkm_acpi_init(device);
nvkm_therm_clkgate_enable(device->therm);
time = ktime_to_us(ktime_get()) - time; time = ktime_to_us(ktime_get()) - time;
nvdev_trace(device, "init completed in %lldus\n", time); nvdev_trace(device, "init completed in %lldus\n", time);
......
...@@ -137,6 +137,7 @@ struct gf100_gr_func { ...@@ -137,6 +137,7 @@ struct gf100_gr_func {
int (*rops)(struct gf100_gr *); int (*rops)(struct gf100_gr *);
int ppc_nr; int ppc_nr;
const struct gf100_grctx_func *grctx; const struct gf100_grctx_func *grctx;
const struct nvkm_therm_clkgate_pack *clkgate_pack;
struct nvkm_sclass sclass[]; struct nvkm_sclass sclass[];
}; };
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com> * Authors: Ben Skeggs <bskeggs@redhat.com>
*/ */
#include "gf100.h" #include "gf100.h"
#include "gk104.h"
#include "ctxgf100.h" #include "ctxgf100.h"
#include <nvif/class.h> #include <nvif/class.h>
...@@ -173,6 +174,208 @@ gk104_gr_pack_mmio[] = { ...@@ -173,6 +174,208 @@ gk104_gr_pack_mmio[] = {
{} {}
}; };
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_main_0[] = {
{ 0x4041f0, 1, 0x00004046 },
{ 0x409890, 1, 0x00000045 },
{ 0x4098b0, 1, 0x0000007f },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_rstr2d_0[] = {
{ 0x4078c0, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_unk_0[] = {
{ 0x406000, 1, 0x00004044 },
{ 0x405860, 1, 0x00004042 },
{ 0x40590c, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gcc_0[] = {
{ 0x408040, 1, 0x00004044 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_sked_0[] = {
{ 0x407000, 1, 0x00004044 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_unk_1[] = {
{ 0x405bf0, 1, 0x00004044 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_ctxctl_0[] = {
{ 0x41a890, 1, 0x00000042 },
{ 0x41a8b0, 1, 0x0000007f },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_unk_0[] = {
{ 0x418500, 1, 0x00004042 },
{ 0x418608, 1, 0x00004042 },
{ 0x418688, 1, 0x00004042 },
{ 0x418718, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_esetup_0[] = {
{ 0x418828, 1, 0x00000044 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_tpbus_0[] = {
{ 0x418bbc, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_zcull_0[] = {
{ 0x418970, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_tpconf_0[] = {
{ 0x418c70, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_unk_1[] = {
{ 0x418cf0, 1, 0x00004042 },
{ 0x418d70, 1, 0x00004042 },
{ 0x418f0c, 1, 0x00004042 },
{ 0x418e0c, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_gcc_0[] = {
{ 0x419020, 1, 0x00004042 },
{ 0x419038, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_ffb_0[] = {
{ 0x418898, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_tex_0[] = {
{ 0x419a40, 9, 0x00004042 },
{ 0x419acc, 1, 0x00004047 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_poly_0[] = {
{ 0x419868, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_l1c_0[] = {
{ 0x419ccc, 3, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_unk_2[] = {
{ 0x419c70, 1, 0x00004045 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_mp_0[] = {
{ 0x419fd0, 1, 0x00004043 },
{ 0x419fd8, 1, 0x00004049 },
{ 0x419fe0, 2, 0x00004042 },
{ 0x419ff0, 1, 0x00004046 },
{ 0x419ff8, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_gpc_ppc_0[] = {
{ 0x41be28, 1, 0x00000042 },
{ 0x41bfe8, 1, 0x00004042 },
{ 0x41bed0, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_rop_zrop_0[] = {
{ 0x408810, 2, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_rop_0[] = {
{ 0x408a80, 6, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_rop_crop_0[] = {
{ 0x4089a8, 1, 0x00004042 },
{ 0x4089b0, 1, 0x00000042 },
{ 0x4089b8, 1, 0x00004042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_clkgate_blcg_init_pxbar_0[] = {
{ 0x13c820, 1, 0x0001007f },
{ 0x13cbe0, 1, 0x00000042 },
{}
};
static const struct nvkm_therm_clkgate_pack
gk104_clkgate_pack[] = {
{ gk104_clkgate_blcg_init_main_0 },
{ gk104_clkgate_blcg_init_rstr2d_0 },
{ gk104_clkgate_blcg_init_unk_0 },
{ gk104_clkgate_blcg_init_gcc_0 },
{ gk104_clkgate_blcg_init_sked_0 },
{ gk104_clkgate_blcg_init_unk_1 },
{ gk104_clkgate_blcg_init_gpc_ctxctl_0 },
{ gk104_clkgate_blcg_init_gpc_unk_0 },
{ gk104_clkgate_blcg_init_gpc_esetup_0 },
{ gk104_clkgate_blcg_init_gpc_tpbus_0 },
{ gk104_clkgate_blcg_init_gpc_zcull_0 },
{ gk104_clkgate_blcg_init_gpc_tpconf_0 },
{ gk104_clkgate_blcg_init_gpc_unk_1 },
{ gk104_clkgate_blcg_init_gpc_gcc_0 },
{ gk104_clkgate_blcg_init_gpc_ffb_0 },
{ gk104_clkgate_blcg_init_gpc_tex_0 },
{ gk104_clkgate_blcg_init_gpc_poly_0 },
{ gk104_clkgate_blcg_init_gpc_l1c_0 },
{ gk104_clkgate_blcg_init_gpc_unk_2 },
{ gk104_clkgate_blcg_init_gpc_mp_0 },
{ gk104_clkgate_blcg_init_gpc_ppc_0 },
{ gk104_clkgate_blcg_init_rop_zrop_0 },
{ gk104_clkgate_blcg_init_rop_0 },
{ gk104_clkgate_blcg_init_rop_crop_0 },
{ gk104_clkgate_blcg_init_pxbar_0 },
{}
};
/******************************************************************************* /*******************************************************************************
* PGRAPH engine/subdev functions * PGRAPH engine/subdev functions
******************************************************************************/ ******************************************************************************/
...@@ -214,6 +417,9 @@ gk104_gr_init(struct gf100_gr *gr) ...@@ -214,6 +417,9 @@ gk104_gr_init(struct gf100_gr *gr)
gr->func->init_gpc_mmu(gr); gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio); gf100_gr_mmio(gr, gr->func->mmio);
if (gr->func->clkgate_pack)
nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
gr->func->clkgate_pack);
nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
...@@ -338,6 +544,7 @@ gk104_gr = { ...@@ -338,6 +544,7 @@ gk104_gr = {
.rops = gf100_gr_rops, .rops = gf100_gr_rops,
.ppc_nr = 1, .ppc_nr = 1,
.grctx = &gk104_grctx, .grctx = &gk104_grctx,
.clkgate_pack = gk104_clkgate_pack,
.sclass = { .sclass = {
{ -1, -1, FERMI_TWOD_A }, { -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A }, { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
......
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Lyude Paul <lyude@redhat.com>
*/
#ifndef __GK104_GR_H__
#define __GK104_GR_H__
#include <subdev/therm.h>
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_main_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rstr2d_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gcc_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_sked_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_1[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ctxctl_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_esetup_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpbus_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_zcull_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpconf_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_1[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_gcc_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ffb_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tex_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_poly_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_l1c_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_2[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_mp_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ppc_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_zrop_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_crop_0[];
extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_pxbar_0[];
#endif
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com> * Authors: Ben Skeggs <bskeggs@redhat.com>
*/ */
#include "gf100.h" #include "gf100.h"
#include "gk104.h"
#include "ctxgf100.h" #include "ctxgf100.h"
#include <subdev/timer.h> #include <subdev/timer.h>
...@@ -156,6 +157,159 @@ gk110_gr_pack_mmio[] = { ...@@ -156,6 +157,159 @@ gk110_gr_pack_mmio[] = {
{} {}
}; };
static const struct nvkm_therm_clkgate_init
gk110_clkgate_blcg_init_sked_0[] = {
{ 0x407000, 1, 0x00004041 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_blcg_init_gpc_gcc_0[] = {
{ 0x419020, 1, 0x00000042 },
{ 0x419038, 1, 0x00000042 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_blcg_init_gpc_l1c_0[] = {
{ 0x419cd4, 2, 0x00004042 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_blcg_init_gpc_mp_0[] = {
{ 0x419fd0, 1, 0x00004043 },
{ 0x419fd8, 1, 0x00004049 },
{ 0x419fe0, 2, 0x00004042 },
{ 0x419ff0, 1, 0x00000046 },
{ 0x419ff8, 1, 0x00004042 },
{ 0x419f90, 1, 0x00004042 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_main_0[] = {
{ 0x4041f4, 1, 0x00000000 },
{ 0x409894, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_unk_0[] = {
{ 0x406004, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_sked_0[] = {
{ 0x407004, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_ctxctl_0[] = {
{ 0x41a894, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_unk_0[] = {
{ 0x418504, 1, 0x00000000 },
{ 0x41860c, 1, 0x00000000 },
{ 0x41868c, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_esetup_0[] = {
{ 0x41882c, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_zcull_0[] = {
{ 0x418974, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_l1c_0[] = {
{ 0x419cd8, 2, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_unk_1[] = {
{ 0x419c74, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_mp_0[] = {
{ 0x419fd4, 1, 0x00004a4a },
{ 0x419fdc, 1, 0x00000014 },
{ 0x419fe4, 1, 0x00000000 },
{ 0x419ff4, 1, 0x00001724 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_gpc_ppc_0[] = {
{ 0x41be2c, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_init
gk110_clkgate_slcg_init_pcounter_0[] = {
{ 0x1be018, 1, 0x000001ff },
{ 0x1bc018, 1, 0x000001ff },
{ 0x1b8018, 1, 0x000001ff },
{ 0x1b4124, 1, 0x00000000 },
{}
};
static const struct nvkm_therm_clkgate_pack
gk110_clkgate_pack[] = {
{ gk104_clkgate_blcg_init_main_0 },
{ gk104_clkgate_blcg_init_rstr2d_0 },
{ gk104_clkgate_blcg_init_unk_0 },
{ gk104_clkgate_blcg_init_gcc_0 },
{ gk110_clkgate_blcg_init_sked_0 },
{ gk104_clkgate_blcg_init_unk_1 },
{ gk104_clkgate_blcg_init_gpc_ctxctl_0 },
{ gk104_clkgate_blcg_init_gpc_unk_0 },
{ gk104_clkgate_blcg_init_gpc_esetup_0 },
{ gk104_clkgate_blcg_init_gpc_tpbus_0 },
{ gk104_clkgate_blcg_init_gpc_zcull_0 },
{ gk104_clkgate_blcg_init_gpc_tpconf_0 },
{ gk104_clkgate_blcg_init_gpc_unk_1 },
{ gk110_clkgate_blcg_init_gpc_gcc_0 },
{ gk104_clkgate_blcg_init_gpc_ffb_0 },
{ gk104_clkgate_blcg_init_gpc_tex_0 },
{ gk104_clkgate_blcg_init_gpc_poly_0 },
{ gk110_clkgate_blcg_init_gpc_l1c_0 },
{ gk104_clkgate_blcg_init_gpc_unk_2 },
{ gk110_clkgate_blcg_init_gpc_mp_0 },
{ gk104_clkgate_blcg_init_gpc_ppc_0 },
{ gk104_clkgate_blcg_init_rop_zrop_0 },
{ gk104_clkgate_blcg_init_rop_0 },
{ gk104_clkgate_blcg_init_rop_crop_0 },
{ gk104_clkgate_blcg_init_pxbar_0 },
{ gk110_clkgate_slcg_init_main_0 },
{ gk110_clkgate_slcg_init_unk_0 },
{ gk110_clkgate_slcg_init_sked_0 },
{ gk110_clkgate_slcg_init_gpc_ctxctl_0 },
{ gk110_clkgate_slcg_init_gpc_unk_0 },
{ gk110_clkgate_slcg_init_gpc_esetup_0 },
{ gk110_clkgate_slcg_init_gpc_zcull_0 },
{ gk110_clkgate_slcg_init_gpc_l1c_0 },
{ gk110_clkgate_slcg_init_gpc_unk_1 },
{ gk110_clkgate_slcg_init_gpc_mp_0 },
{ gk110_clkgate_slcg_init_gpc_ppc_0 },
{ gk110_clkgate_slcg_init_pcounter_0 },
{}
};
/******************************************************************************* /*******************************************************************************
* PGRAPH engine/subdev functions * PGRAPH engine/subdev functions
******************************************************************************/ ******************************************************************************/
...@@ -192,6 +346,7 @@ gk110_gr = { ...@@ -192,6 +346,7 @@ gk110_gr = {
.rops = gf100_gr_rops, .rops = gf100_gr_rops,
.ppc_nr = 2, .ppc_nr = 2,
.grctx = &gk110_grctx, .grctx = &gk110_grctx,
.clkgate_pack = gk110_clkgate_pack,
.sclass = { .sclass = {
{ -1, -1, FERMI_TWOD_A }, { -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B }, { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
......
...@@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon, ...@@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
args->v0.id = di; args->v0.id = di;
args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom); args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
strncpy(args->v0.name, dom->name, sizeof(args->v0.name)); strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
/* Currently only global counters (PCOUNTER) are implemented /* Currently only global counters (PCOUNTER) are implemented
* but this will be different for local counters (MP). */ * but this will be different for local counters (MP). */
...@@ -514,7 +514,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon, ...@@ -514,7 +514,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
"/%s/%02x", dom->name, si); "/%s/%02x", dom->name, si);
} else { } else {
strncpy(args->v0.name, sig->name, strncpy(args->v0.name, sig->name,
sizeof(args->v0.name)); sizeof(args->v0.name) - 1);
} }
args->v0.signal = si; args->v0.signal = si;
...@@ -572,7 +572,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon, ...@@ -572,7 +572,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
args->v0.source = sig->source[si]; args->v0.source = sig->source[si];
args->v0.mask = src->mask; args->v0.mask = src->mask;
strncpy(args->v0.name, src->name, sizeof(args->v0.name)); strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
} }
if (++si < source_nr) { if (++si < source_nr) {
......
...@@ -505,6 +505,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, ...@@ -505,6 +505,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
ret = msgqueue_0137bca5_new(falcon, sb, queue); ret = msgqueue_0137bca5_new(falcon, sb, queue);
break; break;
case 0x0148cdec: case 0x0148cdec:
case 0x015ccf3e:
ret = msgqueue_0148cdec_new(falcon, sb, queue); ret = msgqueue_0148cdec_new(falcon, sb, queue);
break; break;
default: default:
......
...@@ -110,6 +110,7 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll) ...@@ -110,6 +110,7 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
struct nvkm_device *device = clk->base.subdev.device; struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0); u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1; u32 sclk = 0, P = 1, N = 1, M = 1;
u32 MP;
if (!(ctrl & 0x00000008)) { if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) { if (ctrl & 0x00000001) {
...@@ -130,10 +131,12 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll) ...@@ -130,10 +131,12 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
sclk = read_clk(clk, 0x10 + idx, false); sclk = read_clk(clk, 0x10 + idx, false);
} }
if (M * P) MP = M * P;
return sclk * N / (M * P);
return 0; if (!MP)
return 0;
return sclk * N / MP;
} }
static int static int
......
...@@ -22,6 +22,7 @@ nvkm-y += nvkm/subdev/fb/mcp89.o ...@@ -22,6 +22,7 @@ nvkm-y += nvkm/subdev/fb/mcp89.o
nvkm-y += nvkm/subdev/fb/gf100.o nvkm-y += nvkm/subdev/fb/gf100.o
nvkm-y += nvkm/subdev/fb/gf108.o nvkm-y += nvkm/subdev/fb/gf108.o
nvkm-y += nvkm/subdev/fb/gk104.o nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk110.o
nvkm-y += nvkm/subdev/fb/gk20a.o nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o nvkm-y += nvkm/subdev/fb/gm200.o
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <core/memory.h> #include <core/memory.h>
#include <core/option.h> #include <core/option.h>
#include <subdev/therm.h>
void void
gf100_fb_intr(struct nvkm_fb *base) gf100_fb_intr(struct nvkm_fb *base)
...@@ -92,6 +93,11 @@ gf100_fb_init(struct nvkm_fb *base) ...@@ -92,6 +93,11 @@ gf100_fb_init(struct nvkm_fb *base)
if (fb->r100c10_page) if (fb->r100c10_page)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
if (base->func->clkgate_pack) {
nvkm_therm_clkgate_init(device->therm,
base->func->clkgate_pack);
}
} }
void * void *
......
...@@ -20,10 +20,56 @@ ...@@ -20,10 +20,56 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
* Authors: Ben Skeggs * Authors: Ben Skeggs
* Lyude Paul
*/ */
#include "gk104.h"
#include "gf100.h" #include "gf100.h"
#include "ram.h" #include "ram.h"
/*
*******************************************************************************
* PGRAPH registers for clockgating
*******************************************************************************
*/
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_unk_0[] = {
{ 0x100d10, 1, 0x0000c244 },
{ 0x100d30, 1, 0x0000c242 },
{ 0x100d3c, 1, 0x00000242 },
{ 0x100d48, 1, 0x00000242 },
{ 0x100d1c, 1, 0x00000042 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_vm_0[] = {
{ 0x100c98, 1, 0x00000242 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_main_0[] = {
{ 0x10f000, 1, 0x00000042 },
{ 0x17e030, 1, 0x00000044 },
{ 0x17e040, 1, 0x00000044 },
{}
};
const struct nvkm_therm_clkgate_init
gk104_fb_clkgate_blcg_init_bcast_0[] = {
{ 0x17ea60, 4, 0x00000044 },
{}
};
static const struct nvkm_therm_clkgate_pack
gk104_fb_clkgate_pack[] = {
{ gk104_fb_clkgate_blcg_init_unk_0 },
{ gk104_fb_clkgate_blcg_init_vm_0 },
{ gk104_fb_clkgate_blcg_init_main_0 },
{ gk104_fb_clkgate_blcg_init_bcast_0 },
{}
};
static const struct nvkm_fb_func static const struct nvkm_fb_func
gk104_fb = { gk104_fb = {
.dtor = gf100_fb_dtor, .dtor = gf100_fb_dtor,
...@@ -33,6 +79,7 @@ gk104_fb = { ...@@ -33,6 +79,7 @@ gk104_fb = {
.intr = gf100_fb_intr, .intr = gf100_fb_intr,
.ram_new = gk104_ram_new, .ram_new = gk104_ram_new,
.default_bigpage = 17, .default_bigpage = 17,
.clkgate_pack = gk104_fb_clkgate_pack,
}; };
int int
......
/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Lyude Paul
*/
#ifndef __GK104_FB_H__
#define __GK104_FB_H__
#include <subdev/therm.h>
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_unk_0[];
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_vm_0[];
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_main_0[];
extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_bcast_0[];
#endif
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Lyude Paul
*/
#include "gf100.h"
#include "gk104.h"
#include "ram.h"
#include <subdev/therm.h>
#include <subdev/fb.h>
/*
*******************************************************************************
* PGRAPH registers for clockgating
*******************************************************************************
*/
static const struct nvkm_therm_clkgate_init
gk110_fb_clkgate_blcg_init_unk_0[] = {
{ 0x100d10, 1, 0x0000c242 },
{ 0x100d30, 1, 0x0000c242 },
{ 0x100d3c, 1, 0x00000242 },
{ 0x100d48, 1, 0x0000c242 },
{ 0x100d1c, 1, 0x00000042 },
{}
};
static const struct nvkm_therm_clkgate_pack
gk110_fb_clkgate_pack[] = {
{ gk110_fb_clkgate_blcg_init_unk_0 },
{ gk104_fb_clkgate_blcg_init_vm_0 },
{ gk104_fb_clkgate_blcg_init_main_0 },
{ gk104_fb_clkgate_blcg_init_bcast_0 },
{}
};
static const struct nvkm_fb_func
gk110_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
.clkgate_pack = gk110_fb_clkgate_pack,
};
int
gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
{
return gf100_fb_new_(&gk110_fb, device, index, pfb);
}
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define __NVKM_FB_PRIV_H__ #define __NVKM_FB_PRIV_H__
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev) #define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
#include <subdev/fb.h> #include <subdev/fb.h>
#include <subdev/therm.h>
struct nvkm_bios; struct nvkm_bios;
struct nvkm_fb_func { struct nvkm_fb_func {
...@@ -27,6 +28,7 @@ struct nvkm_fb_func { ...@@ -27,6 +28,7 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **); int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
u8 default_bigpage; u8 default_bigpage;
const struct nvkm_therm_clkgate_pack *clkgate_pack;
}; };
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device, void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
......
...@@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else } else
return ret; return ret;
if (IS_ERR((memory = nvkm_umem_search(client, handle)))) { memory = nvkm_umem_search(client, handle);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory); return PTR_ERR(memory);
} }
......
...@@ -642,7 +642,7 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) ...@@ -642,7 +642,7 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
else else
block = (size >> page[i].shift) << page[i].shift; block = (size >> page[i].shift) << page[i].shift;
} else { } else {
block = (size >> page[i].shift) << page[i].shift;; block = (size >> page[i].shift) << page[i].shift;
} }
/* Perform operation. */ /* Perform operation. */
......
...@@ -82,15 +82,15 @@ memx_train_tail: ...@@ -82,15 +82,15 @@ memx_train_tail:
// $r0 - zero // $r0 - zero
memx_func_enter: memx_func_enter:
#if NVKM_PPWR_CHIPSET == GT215 #if NVKM_PPWR_CHIPSET == GT215
movw $r8 0x1610 mov $r8 0x1610
nv_rd32($r7, $r8) nv_rd32($r7, $r8)
imm32($r6, 0xfffffffc) imm32($r6, 0xfffffffc)
and $r7 $r6 and $r7 $r6
movw $r6 0x2 mov $r6 0x2
or $r7 $r6 or $r7 $r6
nv_wr32($r8, $r7) nv_wr32($r8, $r7)
#else #else
movw $r6 0x001620 mov $r6 0x001620
imm32($r7, ~0x00000aa2); imm32($r7, ~0x00000aa2);
nv_rd32($r8, $r6) nv_rd32($r8, $r6)
and $r8 $r7 and $r8 $r7
...@@ -101,7 +101,7 @@ memx_func_enter: ...@@ -101,7 +101,7 @@ memx_func_enter:
and $r8 $r7 and $r8 $r7
nv_wr32($r6, $r8) nv_wr32($r6, $r8)
movw $r6 0x0026f0 mov $r6 0x0026f0
nv_rd32($r8, $r6) nv_rd32($r8, $r6)
and $r8 $r7 and $r8 $r7
nv_wr32($r6, $r8) nv_wr32($r6, $r8)
...@@ -136,19 +136,19 @@ memx_func_leave: ...@@ -136,19 +136,19 @@ memx_func_leave:
bra nz #memx_func_leave_wait bra nz #memx_func_leave_wait
#if NVKM_PPWR_CHIPSET == GT215 #if NVKM_PPWR_CHIPSET == GT215
movw $r8 0x1610 mov $r8 0x1610
nv_rd32($r7, $r8) nv_rd32($r7, $r8)
imm32($r6, 0xffffffcc) imm32($r6, 0xffffffcc)
and $r7 $r6 and $r7 $r6
nv_wr32($r8, $r7) nv_wr32($r8, $r7)
#else #else
movw $r6 0x0026f0 mov $r6 0x0026f0
imm32($r7, 0x00000001) imm32($r7, 0x00000001)
nv_rd32($r8, $r6) nv_rd32($r8, $r6)
or $r8 $r7 or $r8 $r7
nv_wr32($r6, $r8) nv_wr32($r6, $r8)
movw $r6 0x001620 mov $r6 0x001620
nv_rd32($r8, $r6) nv_rd32($r8, $r6)
or $r8 $r7 or $r8 $r7
nv_wr32($r6, $r8) nv_wr32($r6, $r8)
...@@ -177,11 +177,11 @@ memx_func_wait_vblank: ...@@ -177,11 +177,11 @@ memx_func_wait_vblank:
bra #memx_func_wait_vblank_fini bra #memx_func_wait_vblank_fini
memx_func_wait_vblank_head1: memx_func_wait_vblank_head1:
movw $r7 0x20 mov $r7 0x20
bra #memx_func_wait_vblank_0 bra #memx_func_wait_vblank_0
memx_func_wait_vblank_head0: memx_func_wait_vblank_head0:
movw $r7 0x8 mov $r7 0x8
memx_func_wait_vblank_0: memx_func_wait_vblank_0:
nv_iord($r6, NV_PPWR_INPUT) nv_iord($r6, NV_PPWR_INPUT)
...@@ -273,13 +273,13 @@ memx_func_train: ...@@ -273,13 +273,13 @@ memx_func_train:
// $r5 - outer loop counter // $r5 - outer loop counter
// $r6 - inner loop counter // $r6 - inner loop counter
// $r7 - entry counter (#memx_train_head + $r7) // $r7 - entry counter (#memx_train_head + $r7)
movw $r5 0x3 mov $r5 0x3
movw $r7 0x0 mov $r7 0x0
// Read random memory to wake up... things // Read random memory to wake up... things
imm32($r9, 0x700000) imm32($r9, 0x700000)
nv_rd32($r8,$r9) nv_rd32($r8,$r9)
movw $r14 0x2710 mov $r14 0x2710
call(nsec) call(nsec)
memx_func_train_loop_outer: memx_func_train_loop_outer:
...@@ -289,9 +289,9 @@ memx_func_train: ...@@ -289,9 +289,9 @@ memx_func_train:
nv_wr32($r9, $r8) nv_wr32($r9, $r8)
push $r5 push $r5
movw $r6 0x0 mov $r6 0x0
memx_func_train_loop_inner: memx_func_train_loop_inner:
movw $r8 0x1111 mov $r8 0x1111
mulu $r9 $r6 $r8 mulu $r9 $r6 $r8
shl b32 $r8 $r9 0x10 shl b32 $r8 $r9 0x10
or $r8 $r9 or $r8 $r9
...@@ -315,7 +315,7 @@ memx_func_train: ...@@ -315,7 +315,7 @@ memx_func_train:
// $r5 - inner inner loop counter // $r5 - inner inner loop counter
// $r9 - result // $r9 - result
movw $r5 0 mov $r5 0
imm32($r9, 0x8300ffff) imm32($r9, 0x8300ffff)
memx_func_train_loop_4x: memx_func_train_loop_4x:
imm32($r10, 0x100080) imm32($r10, 0x100080)
......
...@@ -7,8 +7,10 @@ nvkm-y += nvkm/subdev/secboot/acr_r352.o ...@@ -7,8 +7,10 @@ nvkm-y += nvkm/subdev/secboot/acr_r352.o
nvkm-y += nvkm/subdev/secboot/acr_r361.o nvkm-y += nvkm/subdev/secboot/acr_r361.o
nvkm-y += nvkm/subdev/secboot/acr_r364.o nvkm-y += nvkm/subdev/secboot/acr_r364.o
nvkm-y += nvkm/subdev/secboot/acr_r367.o nvkm-y += nvkm/subdev/secboot/acr_r367.o
nvkm-y += nvkm/subdev/secboot/acr_r370.o
nvkm-y += nvkm/subdev/secboot/acr_r375.o nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o nvkm-y += nvkm/subdev/secboot/gp102.o
nvkm-y += nvkm/subdev/secboot/gp108.o
nvkm-y += nvkm/subdev/secboot/gp10b.o nvkm-y += nvkm/subdev/secboot/gp10b.o
...@@ -64,6 +64,7 @@ struct nvkm_acr *acr_r352_new(unsigned long); ...@@ -64,6 +64,7 @@ struct nvkm_acr *acr_r352_new(unsigned long);
struct nvkm_acr *acr_r361_new(unsigned long); struct nvkm_acr *acr_r361_new(unsigned long);
struct nvkm_acr *acr_r364_new(unsigned long); struct nvkm_acr *acr_r364_new(unsigned long);
struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long); struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long); struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
#endif #endif
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "acr_r370.h"
#include "acr_r367.h"
#include <core/msgqueue.h>
#include <engine/falcon.h>
#include <engine/sec2.h>
static void
acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
struct acr_r370_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
}
const struct acr_r352_ls_func
acr_r370_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r370_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
/* GPCCS will be loaded using PRI */
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
static void
acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
struct acr_r370_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
/* For some reason we should not add app_resident_code_offset here */
addr_code = base;
addr_data = base + pdesc->app_resident_data_offset;
addr_args = sec->falcon->data.limit;
addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
desc->argc = 1;
/* args are stored at the beginning of EMEM */
desc->argv = 0x01000000;
}
const struct acr_r352_ls_func
acr_r370_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
.generate_bl_desc = acr_r370_generate_sec2_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_sec2_post_run,
};
void
acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset)
{
struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
}
const struct acr_r352_func
acr_r370_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
[NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
[NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
},
};
struct nvkm_acr *
acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
unsigned long managed_falcons)
{
return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
}
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVKM_SECBOOT_ACR_R370_H__
#define __NVKM_SECBOOT_ACR_R370_H__
#include "priv.h"
struct hsf_load_header;
/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
struct acr_r370_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
u32 argc;
u32 argv;
};
void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
#endif
...@@ -20,90 +20,12 @@ ...@@ -20,90 +20,12 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include "acr_r370.h"
#include "acr_r367.h" #include "acr_r367.h"
#include <engine/falcon.h>
#include <core/msgqueue.h> #include <core/msgqueue.h>
#include <subdev/pmu.h> #include <subdev/pmu.h>
/*
* r375 ACR: similar to r367, but with a unified bootloader descriptor
* structure for GR and PMU falcons.
*/
/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
struct acr_r375_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off;
u32 non_sec_code_size;
u32 sec_code_off;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
u32 argc;
u32 argv;
};
static void
acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
struct acr_r375_flcn_bl_desc *desc = _desc;
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
desc->ctx_dma = FALCON_DMAIDX_UCODE;
desc->code_dma_base = u64_to_flcn64(addr_code);
desc->non_sec_code_off = pdesc->app_resident_code_offset;
desc->non_sec_code_size = pdesc->app_resident_code_size;
desc->code_entry_point = pdesc->app_imem_entry;
desc->data_dma_base = u64_to_flcn64(addr_data);
desc->data_size = pdesc->app_resident_data_size;
}
static void
acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset)
{
struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
}
const struct acr_r352_ls_func
acr_r375_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
.generate_bl_desc = acr_r375_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
};
const struct acr_r352_ls_func
acr_r375_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
.generate_bl_desc = acr_r375_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
/* GPCCS will be loaded using PRI */
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
static void static void
acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr, const struct ls_ucode_img *img, u64 wpr_addr,
...@@ -111,7 +33,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, ...@@ -111,7 +33,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
{ {
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu; const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
struct acr_r375_flcn_bl_desc *desc = _desc; struct acr_r370_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data; u64 base, addr_code, addr_data;
u32 addr_args; u32 addr_args;
...@@ -136,23 +58,22 @@ const struct acr_r352_ls_func ...@@ -136,23 +58,22 @@ const struct acr_r352_ls_func
acr_r375_ls_pmu_func = { acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu, .load = acr_ls_ucode_load_pmu,
.generate_bl_desc = acr_r375_generate_pmu_bl_desc, .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
.bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc), .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_pmu_post_run, .post_run = acr_ls_pmu_post_run,
}; };
const struct acr_r352_func const struct acr_r352_func
acr_r375_func = { acr_r375_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc, .fixup_hs_desc = acr_r367_fixup_hs_desc,
.generate_hs_bl_desc = acr_r375_generate_hs_bl_desc, .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc), .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.shadow_blob = true, .shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load, .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers, .ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr, .ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = { .ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func, [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func, [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func, [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
}, },
}; };
......
...@@ -133,7 +133,7 @@ gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob, ...@@ -133,7 +133,7 @@ gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return gm200_secboot_run_blob(sb, blob, falcon); return gm200_secboot_run_blob(sb, blob, falcon);
} }
static const struct nvkm_secboot_func const struct nvkm_secboot_func
gp102_secboot = { gp102_secboot = {
.dtor = gm200_secboot_dtor, .dtor = gm200_secboot_dtor,
.oneinit = gm200_secboot_oneinit, .oneinit = gm200_secboot_oneinit,
......
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "gm200.h"
#include "acr.h"
int
gp108_secboot_new(struct nvkm_device *device, int index,
struct nvkm_secboot **psb)
{
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
BIT(NVKM_SECBOOT_FALCON_FECS) |
BIT(NVKM_SECBOOT_FALCON_GPCCS) |
BIT(NVKM_SECBOOT_FALCON_SEC2));
if (IS_ERR(acr))
return PTR_ERR(acr);
if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
acr->func->dtor(acr);
return -ENOMEM;
}
*psb = &gsb->base;
return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
}
MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
...@@ -40,6 +40,8 @@ int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *, ...@@ -40,6 +40,8 @@ int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
int nvkm_secboot_falcon_reset(struct nvkm_secboot *); int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
int nvkm_secboot_falcon_run(struct nvkm_secboot *); int nvkm_secboot_falcon_run(struct nvkm_secboot *);
extern const struct nvkm_secboot_func gp102_secboot;
struct flcn_u64 { struct flcn_u64 {
u32 lo; u32 lo;
u32 hi; u32 hi;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment