Commit a9d90860 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/pmu/gm20b,gp10b: boot RTOS from PMU init

Cleanup before falcon changes.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent ccdc0431
...@@ -16,7 +16,7 @@ enum nvkm_falcon_dmaidx { ...@@ -16,7 +16,7 @@ enum nvkm_falcon_dmaidx {
struct nvkm_falcon { struct nvkm_falcon {
const struct nvkm_falcon_func *func; const struct nvkm_falcon_func *func;
const struct nvkm_subdev *owner; struct nvkm_subdev *owner;
const char *name; const char *name;
u32 addr; u32 addr;
...@@ -24,7 +24,7 @@ struct nvkm_falcon { ...@@ -24,7 +24,7 @@ struct nvkm_falcon {
struct mutex dmem_mutex; struct mutex dmem_mutex;
bool oneinit; bool oneinit;
const struct nvkm_subdev *user; struct nvkm_subdev *user;
u8 version; u8 version;
u8 secret; u8 secret;
...@@ -50,8 +50,8 @@ struct nvkm_falcon { ...@@ -50,8 +50,8 @@ struct nvkm_falcon {
struct nvkm_engine engine; struct nvkm_engine engine;
}; };
int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *); int nvkm_falcon_get(struct nvkm_falcon *, struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *); void nvkm_falcon_put(struct nvkm_falcon *, struct nvkm_subdev *);
int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *, int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
enum nvkm_subdev_type, int inst, bool enable, u32 addr, struct nvkm_engine **); enum nvkm_subdev_type, int inst, bool enable, u32 addr, struct nvkm_engine **);
......
...@@ -50,6 +50,7 @@ struct nvkm_acr { ...@@ -50,6 +50,7 @@ struct nvkm_acr {
struct nvkm_vmm *vmm; struct nvkm_vmm *vmm;
bool done; bool done;
struct nvkm_acr_lsf *rtos;
const struct firmware *wpr_fw; const struct firmware *wpr_fw;
bool wpr_comp; bool wpr_comp;
......
...@@ -169,7 +169,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon) ...@@ -169,7 +169,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
} }
void void
nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
{ {
if (unlikely(!falcon)) if (unlikely(!falcon))
return; return;
...@@ -183,7 +183,7 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) ...@@ -183,7 +183,7 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
} }
int int
nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user)
{ {
int ret = 0; int ret = 0;
......
...@@ -63,10 +63,30 @@ nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) ...@@ -63,10 +63,30 @@ nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name)
return 0; return 0;
} }
static struct nvkm_acr_lsf *
nvkm_acr_rtos(struct nvkm_acr *acr)
{
struct nvkm_acr_lsf *lsf;
if (acr) {
list_for_each_entry(lsf, &acr->lsf, head) {
if (lsf->func->bootstrap_falcon)
return lsf;
}
}
return NULL;
}
static void static void
nvkm_acr_unload(struct nvkm_acr *acr) nvkm_acr_unload(struct nvkm_acr *acr)
{ {
if (acr->done) { if (acr->done) {
if (acr->rtos) {
nvkm_subdev_unref(acr->rtos->falcon->owner);
acr->rtos = NULL;
}
nvkm_acr_hsf_boot(acr, "unload"); nvkm_acr_hsf_boot(acr, "unload");
acr->done = false; acr->done = false;
} }
...@@ -76,6 +96,7 @@ static int ...@@ -76,6 +96,7 @@ static int
nvkm_acr_load(struct nvkm_acr *acr) nvkm_acr_load(struct nvkm_acr *acr)
{ {
struct nvkm_subdev *subdev = &acr->subdev; struct nvkm_subdev *subdev = &acr->subdev;
struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr);
struct nvkm_acr_lsf *lsf; struct nvkm_acr_lsf *lsf;
u64 start, limit; u64 start, limit;
int ret; int ret;
...@@ -100,6 +121,14 @@ nvkm_acr_load(struct nvkm_acr *acr) ...@@ -100,6 +121,14 @@ nvkm_acr_load(struct nvkm_acr *acr)
acr->done = true; acr->done = true;
if (rtos) {
ret = nvkm_subdev_ref(rtos->falcon->owner);
if (ret)
return ret;
acr->rtos = rtos;
}
list_for_each_entry(lsf, &acr->lsf, head) { list_for_each_entry(lsf, &acr->lsf, head) {
if (lsf->func->boot) { if (lsf->func->boot) {
ret = lsf->func->boot(lsf->falcon); ret = lsf->func->boot(lsf->falcon);
...@@ -118,33 +147,17 @@ nvkm_acr_reload(struct nvkm_acr *acr) ...@@ -118,33 +147,17 @@ nvkm_acr_reload(struct nvkm_acr *acr)
return nvkm_acr_load(acr); return nvkm_acr_load(acr);
} }
static struct nvkm_acr_lsf *
nvkm_acr_falcon(struct nvkm_device *device)
{
struct nvkm_acr *acr = device->acr;
struct nvkm_acr_lsf *lsf;
if (acr) {
list_for_each_entry(lsf, &acr->lsf, head) {
if (lsf->func->bootstrap_falcon)
return lsf;
}
}
return NULL;
}
int int
nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
{ {
struct nvkm_acr_lsf *acrflcn = nvkm_acr_falcon(device);
struct nvkm_acr *acr = device->acr; struct nvkm_acr *acr = device->acr;
struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr);
unsigned long id; unsigned long id;
/* If there's no LS FW managing bootstrapping of other LS falcons, /* If there's no LS FW managing bootstrapping of other LS falcons,
* we depend on the HS firmware being able to do it instead. * we depend on the HS firmware being able to do it instead.
*/ */
if (!acrflcn) { if (!rtos) {
/* Which isn't possible everywhere... */ /* Which isn't possible everywhere... */
if ((mask & acr->func->bootstrap_falcons) == mask) { if ((mask & acr->func->bootstrap_falcons) == mask) {
int ret = nvkm_acr_reload(acr); int ret = nvkm_acr_reload(acr);
...@@ -156,16 +169,14 @@ nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) ...@@ -156,16 +169,14 @@ nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask)
return -ENOSYS; return -ENOSYS;
} }
if ((mask & acrflcn->func->bootstrap_falcons) != mask) if ((mask & rtos->func->bootstrap_falcons) != mask)
return -ENOSYS; return -ENOSYS;
if (acrflcn->func->bootstrap_multiple_falcons) { if (rtos->func->bootstrap_multiple_falcons)
return acrflcn->func-> return rtos->func->bootstrap_multiple_falcons(rtos->falcon, mask);
bootstrap_multiple_falcons(acrflcn->falcon, mask);
}
for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) {
int ret = acrflcn->func->bootstrap_falcon(acrflcn->falcon, id); int ret = rtos->func->bootstrap_falcon(rtos->falcon, id);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -189,6 +200,9 @@ nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) ...@@ -189,6 +200,9 @@ nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id)
static int static int
nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
{ {
if (!subdev->use.enabled)
return 0;
nvkm_acr_unload(nvkm_acr(subdev)); nvkm_acr_unload(nvkm_acr(subdev));
return 0; return 0;
} }
...@@ -196,10 +210,12 @@ nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) ...@@ -196,10 +210,12 @@ nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend)
static int static int
nvkm_acr_init(struct nvkm_subdev *subdev) nvkm_acr_init(struct nvkm_subdev *subdev)
{ {
if (!nvkm_acr_falcon(subdev->device)) struct nvkm_acr *acr = nvkm_acr(subdev);
if (!nvkm_acr_rtos(acr))
return 0; return 0;
return nvkm_acr_load(nvkm_acr(subdev)); return nvkm_acr_load(acr);
} }
static void static void
...@@ -218,7 +234,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev) ...@@ -218,7 +234,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
struct nvkm_acr *acr = nvkm_acr(subdev); struct nvkm_acr *acr = nvkm_acr(subdev);
struct nvkm_acr_hsfw *hsfw; struct nvkm_acr_hsfw *hsfw;
struct nvkm_acr_lsfw *lsfw, *lsft; struct nvkm_acr_lsfw *lsfw, *lsft;
struct nvkm_acr_lsf *lsf; struct nvkm_acr_lsf *lsf, *rtos;
u32 wpr_size = 0; u32 wpr_size = 0;
u64 falcons; u64 falcons;
int ret, i; int ret, i;
...@@ -260,10 +276,10 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev) ...@@ -260,10 +276,10 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
} }
/* Ensure the falcon that'll provide ACR functions is booted first. */ /* Ensure the falcon that'll provide ACR functions is booted first. */
lsf = nvkm_acr_falcon(device); rtos = nvkm_acr_rtos(acr);
if (lsf) { if (rtos) {
falcons = lsf->func->bootstrap_falcons; falcons = rtos->func->bootstrap_falcons;
list_move(&lsf->head, &acr->lsf); list_move(&rtos->head, &acr->lsf);
} else { } else {
falcons = acr->func->bootstrap_falcons; falcons = acr->func->bootstrap_falcons;
} }
...@@ -301,7 +317,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev) ...@@ -301,7 +317,7 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size);
if (!acr->wpr_fw || acr->wpr_comp) if (!acr->wpr_fw || acr->wpr_comp)
acr->func->wpr_build(acr, nvkm_acr_falcon(device)); acr->func->wpr_build(acr, rtos);
acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev);
if (acr->wpr_fw && acr->wpr_comp) { if (acr->wpr_fw && acr->wpr_comp) {
......
...@@ -87,13 +87,6 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) ...@@ -87,13 +87,6 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
if (pmu->func->fini) if (pmu->func->fini)
pmu->func->fini(pmu); pmu->func->fini(pmu);
flush_work(&pmu->recv.work);
reinit_completion(&pmu->wpr_ready);
nvkm_falcon_cmdq_fini(pmu->lpq);
nvkm_falcon_cmdq_fini(pmu->hpq);
pmu->initmsg_received = false;
return 0; return 0;
} }
......
...@@ -62,16 +62,6 @@ gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon, ...@@ -62,16 +62,6 @@ gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
return ret; return ret;
} }
int
gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
{
struct nv_pmu_args args = { .secure_mode = true };
const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
nvkm_falcon_start(falcon);
return 0;
}
void void
gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust) gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
{ {
...@@ -125,7 +115,6 @@ gm20b_pmu_acr = { ...@@ -125,7 +115,6 @@ gm20b_pmu_acr = {
.bld_size = sizeof(struct loader_config), .bld_size = sizeof(struct loader_config),
.bld_write = gm20b_pmu_acr_bld_write, .bld_write = gm20b_pmu_acr_bld_write,
.bld_patch = gm20b_pmu_acr_bld_patch, .bld_patch = gm20b_pmu_acr_bld_patch,
.boot = gm20b_pmu_acr_boot,
.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) | .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
BIT_ULL(NVKM_ACR_LSF_FECS) | BIT_ULL(NVKM_ACR_LSF_FECS) |
BIT_ULL(NVKM_ACR_LSF_GPCCS), BIT_ULL(NVKM_ACR_LSF_GPCCS),
...@@ -198,8 +187,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) ...@@ -198,8 +187,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
if (!pmu->initmsg_received) { if (!pmu->initmsg_received) {
int ret = pmu->func->initmsg(pmu); int ret = pmu->func->initmsg(pmu);
if (ret) { if (ret) {
nvkm_error(&pmu->subdev, nvkm_error(&pmu->subdev, "error parsing init message: %d\n", ret);
"error parsing init message: %d\n", ret);
return; return;
} }
...@@ -209,9 +197,44 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) ...@@ -209,9 +197,44 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
nvkm_falcon_msgq_recv(pmu->msgq); nvkm_falcon_msgq_recv(pmu->msgq);
} }
static void
gm20b_pmu_fini(struct nvkm_pmu *pmu)
{
/*TODO: shutdown RTOS. */
flush_work(&pmu->recv.work);
nvkm_falcon_cmdq_fini(pmu->lpq);
nvkm_falcon_cmdq_fini(pmu->hpq);
reinit_completion(&pmu->wpr_ready);
nvkm_falcon_put(&pmu->falcon, &pmu->subdev);
}
static int
gm20b_pmu_init(struct nvkm_pmu *pmu)
{
struct nvkm_falcon *falcon = &pmu->falcon;
struct nv_pmu_args args = { .secure_mode = true };
u32 addr_args = falcon->data.limit - sizeof(args);
int ret;
ret = nvkm_falcon_get(&pmu->falcon, &pmu->subdev);
if (ret)
return ret;
pmu->initmsg_received = false;
nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
nvkm_falcon_start(falcon);
return 0;
}
const struct nvkm_pmu_func const struct nvkm_pmu_func
gm20b_pmu = { gm20b_pmu = {
.flcn = &gm200_pmu_flcn, .flcn = &gm200_pmu_flcn,
.init = gm20b_pmu_init,
.fini = gm20b_pmu_fini,
.intr = gt215_pmu_intr, .intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv, .recv = gm20b_pmu_recv,
.initmsg = gm20b_pmu_initmsg, .initmsg = gm20b_pmu_initmsg,
......
...@@ -68,7 +68,6 @@ gp10b_pmu_acr = { ...@@ -68,7 +68,6 @@ gp10b_pmu_acr = {
.bld_size = sizeof(struct loader_config), .bld_size = sizeof(struct loader_config),
.bld_write = gm20b_pmu_acr_bld_write, .bld_write = gm20b_pmu_acr_bld_write,
.bld_patch = gm20b_pmu_acr_bld_patch, .bld_patch = gm20b_pmu_acr_bld_patch,
.boot = gm20b_pmu_acr_boot,
.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) | .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
BIT_ULL(NVKM_ACR_LSF_FECS) | BIT_ULL(NVKM_ACR_LSF_FECS) |
BIT_ULL(NVKM_ACR_LSF_GPCCS), BIT_ULL(NVKM_ACR_LSF_GPCCS),
......
...@@ -178,6 +178,7 @@ void ...@@ -178,6 +178,7 @@ void
gt215_pmu_fini(struct nvkm_pmu *pmu) gt215_pmu_fini(struct nvkm_pmu *pmu)
{ {
nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060); nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
flush_work(&pmu->recv.work);
} }
static void static void
......
...@@ -50,7 +50,6 @@ extern const struct nvkm_falcon_func gm200_pmu_flcn; ...@@ -50,7 +50,6 @@ extern const struct nvkm_falcon_func gm200_pmu_flcn;
extern const struct nvkm_pmu_func gm20b_pmu; extern const struct nvkm_pmu_func gm20b_pmu;
void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
int gm20b_pmu_acr_boot(struct nvkm_falcon *);
int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id); int gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *, enum nvkm_acr_lsf_id);
struct nvkm_pmu_fwif { struct nvkm_pmu_fwif {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment