Commit 8f491c89 authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Ben Skeggs

drm/nouveau/secboot: generate HS BL descriptor in hook

Use the HS hook to completely generate the HS BL descriptor, similarly
to what is done in the LS hook, instead of (arbitrarily) using the
acr_v1 format as an intermediate.

This allows us to make the bootloader descriptor structures private to
each implementation, resulting in a cleaner an more consistent design.
Signed-off-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent e781ff91
...@@ -175,21 +175,45 @@ struct hsf_fw_header { ...@@ -175,21 +175,45 @@ struct hsf_fw_header {
u32 hdr_size; u32 hdr_size;
}; };
/** /**
* struct hsf_load_header - HS firmware load header * struct gm200_flcn_bl_desc - DMEM bootloader descriptor
* @signature: 16B signature for secure code. 0s if no secure code
* @ctx_dma: DMA context to be used by BL while loading code/data
* @code_dma_base: 256B-aligned Physical FB Address where code is located
* (falcon's $xcbase register)
* @non_sec_code_off: offset from code_dma_base where the non-secure code is
* located. The offset must be multiple of 256 to help perf
* @non_sec_code_size: the size of the nonSecure code part.
* @sec_code_off: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @sec_code_size: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @code_entry_point: code entry point which will be invoked by BL after
* code is loaded.
* @data_dma_base: 256B aligned Physical FB Address where data is located.
* (falcon's $xdbase register)
* @data_size: size of data block. Should be multiple of 256B
*
* Structure used by the bootloader to load the rest of the code. This has
* to be filled by host and copied into DMEM at offset provided in the
* hsflcn_bl_desc.bl_desc_dmem_load_off.
*/ */
struct hsf_load_header { struct gm200_flcn_bl_desc {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off; u32 non_sec_code_off;
u32 non_sec_code_size; u32 non_sec_code_size;
u32 data_dma_base;
u32 data_size;
u32 num_apps;
struct {
u32 sec_code_off; u32 sec_code_off;
u32 sec_code_size; u32 sec_code_size;
} app[0]; u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size;
}; };
/** /**
* Convenience function to duplicate a firmware file in memory and check that * Convenience function to duplicate a firmware file in memory and check that
* it has the required minimum size. * it has the required minimum size.
...@@ -739,39 +763,6 @@ gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image) ...@@ -739,39 +763,6 @@ gm200_secboot_hsf_patch_signature(struct gm200_secboot *gsb, void *acr_image)
memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size); memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
} }
/**
* gm200_secboot_populate_hsf_bl_desc() - populate BL descriptor for HS image
*/
static void
gm200_secboot_populate_hsf_bl_desc(void *acr_image,
struct gm200_flcn_bl_desc *bl_desc)
{
struct fw_bin_header *hsbin_hdr = acr_image;
struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
struct hsf_load_header *load_hdr = acr_image + fw_hdr->hdr_offset;
/*
* Descriptor for the bootloader that will load the ACR image into
* IMEM/DMEM memory.
*/
fw_hdr = acr_image + hsbin_hdr->header_offset;
load_hdr = acr_image + fw_hdr->hdr_offset;
memset(bl_desc, 0, sizeof(*bl_desc));
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
bl_desc->code_entry_point = 0;
/*
* We need to set code_dma_base to the virtual address of the acr_blob,
* and add this address to data_dma_base before writing it into DMEM
*/
bl_desc->code_dma_base.lo = 0;
bl_desc->data_dma_base.lo = load_hdr->data_dma_base;
bl_desc->data_size = load_hdr->data_size;
}
/** /**
* struct hsflcn_acr_desc - data section of the HS firmware * struct hsflcn_acr_desc - data section of the HS firmware
* *
...@@ -847,37 +838,44 @@ gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb, ...@@ -847,37 +838,44 @@ gm200_secboot_fixup_hs_desc(struct gm200_secboot *gsb,
static int static int
gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw, gm200_secboot_prepare_hs_blob(struct gm200_secboot *gsb, const char *fw,
struct nvkm_gpuobj **blob, struct nvkm_gpuobj **blob,
struct gm200_flcn_bl_desc *bl_desc, bool patch) struct hsf_load_header *load_header, bool patch)
{ {
struct nvkm_subdev *subdev = &gsb->base.subdev; struct nvkm_subdev *subdev = &gsb->base.subdev;
void *acr_image; void *acr_image;
struct fw_bin_header *hsbin_hdr; struct fw_bin_header *hsbin_hdr;
struct hsf_fw_header *fw_hdr; struct hsf_fw_header *fw_hdr;
void *acr_data;
struct hsf_load_header *load_hdr; struct hsf_load_header *load_hdr;
struct hsflcn_acr_desc *desc; void *acr_data;
int ret; int ret;
acr_image = gm200_secboot_load_firmware(subdev, fw, 0); acr_image = gm200_secboot_load_firmware(subdev, fw, 0);
if (IS_ERR(acr_image)) if (IS_ERR(acr_image))
return PTR_ERR(acr_image); return PTR_ERR(acr_image);
hsbin_hdr = acr_image; hsbin_hdr = acr_image;
fw_hdr = acr_image + hsbin_hdr->header_offset;
load_hdr = acr_image + fw_hdr->hdr_offset;
acr_data = acr_image + hsbin_hdr->data_offset;
/* Patch signature */ /* Patch signature */
gm200_secboot_hsf_patch_signature(gsb, acr_image); gm200_secboot_hsf_patch_signature(gsb, acr_image);
acr_data = acr_image + hsbin_hdr->data_offset;
/* Patch descriptor with WPR information? */ /* Patch descriptor with WPR information? */
if (patch) { if (patch) {
fw_hdr = acr_image + hsbin_hdr->header_offset; struct hsflcn_acr_desc *desc;
load_hdr = acr_image + fw_hdr->hdr_offset;
desc = acr_data + load_hdr->data_dma_base; desc = acr_data + load_hdr->data_dma_base;
gm200_secboot_fixup_hs_desc(gsb, desc); gm200_secboot_fixup_hs_desc(gsb, desc);
} }
/* Generate HS BL descriptor */ if (load_hdr->num_apps > GM200_ACR_MAX_APPS) {
gm200_secboot_populate_hsf_bl_desc(acr_image, bl_desc); nvkm_error(subdev, "more apps (%d) than supported (%d)!",
load_hdr->num_apps, GM200_ACR_MAX_APPS);
ret = -EINVAL;
goto cleanup;
}
memcpy(load_header, load_hdr, sizeof(*load_header) +
(sizeof(load_hdr->app[0]) * load_hdr->num_apps));
/* Create ACR blob and copy HS data to it */ /* Create ACR blob and copy HS data to it */
ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256), ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
...@@ -938,7 +936,7 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb) ...@@ -938,7 +936,7 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
if (!gsb->acr_load_blob) { if (!gsb->acr_load_blob) {
ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
&gsb->acr_load_blob, &gsb->acr_load_blob,
&gsb->acr_load_bl_desc, true); &gsb->load_bl_header, true);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -966,7 +964,7 @@ gm200_secboot_prepare_blobs(struct gm200_secboot *gsb) ...@@ -966,7 +964,7 @@ gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
if (!gsb->acr_unload_blob) { if (!gsb->acr_unload_blob) {
ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
&gsb->acr_unload_blob, &gsb->acr_unload_blob,
&gsb->acr_unload_bl_desc, false); &gsb->unload_bl_header, false);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1037,8 +1035,7 @@ gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, struct nvkm_falcon *falcon, ...@@ -1037,8 +1035,7 @@ gm200_secboot_load_hs_bl(struct gm200_secboot *gsb, struct nvkm_falcon *falcon,
* gm200_secboot_run_hs_blob() - run the given high-secure blob * gm200_secboot_run_hs_blob() - run the given high-secure blob
*/ */
static int static int
gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob, gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob)
struct gm200_flcn_bl_desc *desc)
{ {
struct nvkm_subdev *subdev = &gsb->base.subdev; struct nvkm_subdev *subdev = &gsb->base.subdev;
struct fw_bin_header *hdr = gsb->hsbl_blob; struct fw_bin_header *hdr = gsb->hsbl_blob;
...@@ -1046,11 +1043,22 @@ gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob, ...@@ -1046,11 +1043,22 @@ gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
struct nvkm_falcon *falcon = gsb->base.boot_falcon; struct nvkm_falcon *falcon = gsb->base.boot_falcon;
const u32 virt_addr = hsbl_desc->start_tag << 8; const u32 virt_addr = hsbl_desc->start_tag << 8;
const u32 bl_desc_size = gsb->func->bl_desc_size; const u32 bl_desc_size = gsb->func->bl_desc_size;
const struct hsf_load_header *load_hdr;
u8 bl_desc[bl_desc_size]; u8 bl_desc[bl_desc_size];
struct nvkm_vma vma; struct nvkm_vma vma;
u64 vma_addr;
int ret; int ret;
/* Find the bootloader descriptor for our blob and copy it */
if (blob == gsb->acr_load_blob) {
load_hdr = &gsb->load_bl_header;
} else if (blob == gsb->acr_unload_blob) {
load_hdr = &gsb->unload_bl_header;
} else {
nvkm_error(&gsb->base.subdev, "invalid secure boot blob!\n");
return -EINVAL;
}
ret = nvkm_falcon_get(falcon, subdev); ret = nvkm_falcon_get(falcon, subdev);
if (ret) if (ret)
return ret; return ret;
...@@ -1062,21 +1070,13 @@ gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob, ...@@ -1062,21 +1070,13 @@ gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
return ret; return ret;
} }
/* Add the mapping address to the DMA bases */ /* Generate the BL header */
vma_addr = flcn64_to_u64(desc->code_dma_base) + vma.offset; gsb->func->generate_bl_desc(load_hdr, bl_desc, vma.offset);
desc->code_dma_base.lo = lower_32_bits(vma_addr);
desc->code_dma_base.hi = upper_32_bits(vma_addr);
vma_addr = flcn64_to_u64(desc->data_dma_base) + vma.offset;
desc->data_dma_base.lo = lower_32_bits(vma_addr);
desc->data_dma_base.hi = upper_32_bits(vma_addr);
/* Fixup the BL header */
gsb->func->fixup_bl_desc(desc, &bl_desc);
/* Reset and set the falcon up */ /* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon); ret = nvkm_falcon_reset(falcon);
if (ret) if (ret)
goto done; goto end;
nvkm_falcon_bind_context(falcon, gsb->inst); nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */ /* Load the HS bootloader into the falcon's IMEM/DMEM */
...@@ -1087,25 +1087,17 @@ gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob, ...@@ -1087,25 +1087,17 @@ gm200_secboot_run_hs_blob(struct gm200_secboot *gsb, struct nvkm_gpuobj *blob,
nvkm_falcon_start(falcon); nvkm_falcon_start(falcon);
ret = nvkm_falcon_wait_for_halt(falcon, 100); ret = nvkm_falcon_wait_for_halt(falcon, 100);
if (ret) if (ret)
goto done; goto end;
/* If mailbox register contains an error code, then ACR has failed */ /* If mailbox register contains an error code, then ACR has failed */
ret = nvkm_falcon_rd32(falcon, 0x040); ret = nvkm_falcon_rd32(falcon, 0x040);
if (ret) { if (ret) {
nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret); nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret);
ret = -EINVAL; ret = -EINVAL;
goto done; goto end;
} }
done: end:
/* Restore the original DMA addresses */
vma_addr = flcn64_to_u64(desc->code_dma_base) - vma.offset;
desc->code_dma_base.lo = lower_32_bits(vma_addr);
desc->code_dma_base.hi = upper_32_bits(vma_addr);
vma_addr = flcn64_to_u64(desc->data_dma_base) - vma.offset;
desc->data_dma_base.lo = lower_32_bits(vma_addr);
desc->data_dma_base.hi = upper_32_bits(vma_addr);
/* We don't need the ACR firmware anymore */ /* We don't need the ACR firmware anymore */
nvkm_gpuobj_unmap(&vma); nvkm_gpuobj_unmap(&vma);
nvkm_falcon_put(falcon, subdev); nvkm_falcon_put(falcon, subdev);
...@@ -1145,15 +1137,13 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) ...@@ -1145,15 +1137,13 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
/* If WPR is set and we have an unload blob, run it to unlock WPR */ /* If WPR is set and we have an unload blob, run it to unlock WPR */
if (gsb->acr_unload_blob && if (gsb->acr_unload_blob &&
gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) { gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) {
ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob, ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob);
&gsb->acr_unload_bl_desc);
if (ret) if (ret)
return ret; return ret;
} }
/* Reload all managed falcons */ /* Reload all managed falcons */
ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob, ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_load_blob);
&gsb->acr_load_bl_desc);
if (ret) if (ret)
return ret; return ret;
...@@ -1211,8 +1201,7 @@ gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend) ...@@ -1211,8 +1201,7 @@ gm200_secboot_fini(struct nvkm_secboot *sb, bool suspend)
/* Run the unload blob to unprotect the WPR region */ /* Run the unload blob to unprotect the WPR region */
if (gsb->acr_unload_blob && if (gsb->acr_unload_blob &&
gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE) gsb->falcon_state[NVKM_SECBOOT_FALCON_FECS] != NON_SECURE)
ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob, ret = gm200_secboot_run_hs_blob(gsb, gsb->acr_unload_blob);
&gsb->acr_unload_bl_desc);
for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++) for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
gsb->falcon_state[i] = NON_SECURE; gsb->falcon_state[i] = NON_SECURE;
...@@ -1250,21 +1239,29 @@ gm200_secboot = { ...@@ -1250,21 +1239,29 @@ gm200_secboot = {
.boot_falcon = NVKM_SECBOOT_FALCON_PMU, .boot_falcon = NVKM_SECBOOT_FALCON_PMU,
}; };
/**
* gm200_fixup_bl_desc - just copy the BL descriptor
*
* Use the GM200 descriptor format by default.
*/
static void static void
gm200_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret) gm200_secboot_generate_bl_desc(const struct hsf_load_header *hdr,
void *_bl_desc, u64 offset)
{ {
memcpy(ret, desc, sizeof(*desc)); struct gm200_flcn_bl_desc *bl_desc = _bl_desc;
memset(bl_desc, 0, sizeof(*bl_desc));
bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
bl_desc->sec_code_off = hdr->app[0].sec_code_off;
bl_desc->sec_code_size = hdr->app[0].sec_code_size;
bl_desc->code_entry_point = 0;
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
} }
static const struct gm200_secboot_func static const struct gm200_secboot_func
gm200_secboot_func = { gm200_secboot_func = {
.bl_desc_size = sizeof(struct gm200_flcn_bl_desc), .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
.fixup_bl_desc = gm200_secboot_fixup_bl_desc, .generate_bl_desc = gm200_secboot_generate_bl_desc,
.prepare_blobs = gm200_secboot_prepare_blobs, .prepare_blobs = gm200_secboot_prepare_blobs,
}; };
......
...@@ -88,41 +88,28 @@ gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb) ...@@ -88,41 +88,28 @@ gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
return 0; return 0;
} }
/**
* gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
*
* There is only a slight format difference (DMA addresses being 32-bits and
* 256B-aligned) to address.
*/
static void static void
gm20b_secboot_fixup_bl_desc(const struct gm200_flcn_bl_desc *desc, void *ret) gm20b_secboot_generate_bl_desc(const struct hsf_load_header *load_hdr,
void *_bl_desc, u64 offset)
{ {
struct gm20b_flcn_bl_desc *gdesc = ret; struct gm20b_flcn_bl_desc *bl_desc = _bl_desc;
u64 addr;
memset(bl_desc, 0, sizeof(*bl_desc));
memcpy(gdesc->reserved, desc->reserved, sizeof(gdesc->reserved)); bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
memcpy(gdesc->signature, desc->signature, sizeof(gdesc->signature)); bl_desc->non_sec_code_off = load_hdr->non_sec_code_off;
gdesc->ctx_dma = desc->ctx_dma; bl_desc->non_sec_code_size = load_hdr->non_sec_code_size;
addr = desc->code_dma_base.hi; bl_desc->sec_code_off = load_hdr->app[0].sec_code_off;
addr <<= 32; bl_desc->sec_code_size = load_hdr->app[0].sec_code_size;
addr |= desc->code_dma_base.lo; bl_desc->code_entry_point = 0;
gdesc->code_dma_base = lower_32_bits(addr >> 8); bl_desc->code_dma_base = offset >> 8;
gdesc->non_sec_code_off = desc->non_sec_code_off; bl_desc->data_dma_base = (offset + load_hdr->data_dma_base) >> 8;
gdesc->non_sec_code_size = desc->non_sec_code_size; bl_desc->data_size = load_hdr->data_size;
gdesc->sec_code_off = desc->sec_code_off;
gdesc->sec_code_size = desc->sec_code_size;
gdesc->code_entry_point = desc->code_entry_point;
addr = desc->data_dma_base.hi;
addr <<= 32;
addr |= desc->data_dma_base.lo;
gdesc->data_dma_base = lower_32_bits(addr >> 8);
gdesc->data_size = desc->data_size;
} }
static const struct gm200_secboot_func static const struct gm200_secboot_func
gm20b_secboot_func = { gm20b_secboot_func = {
.bl_desc_size = sizeof(struct gm20b_flcn_bl_desc), .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
.fixup_bl_desc = gm20b_secboot_fixup_bl_desc, .generate_bl_desc = gm20b_secboot_generate_bl_desc,
.prepare_blobs = gm20b_secboot_prepare_blobs, .prepare_blobs = gm20b_secboot_prepare_blobs,
}; };
......
...@@ -214,46 +214,39 @@ struct flcn_u64 { ...@@ -214,46 +214,39 @@ struct flcn_u64 {
u32 lo; u32 lo;
u32 hi; u32 hi;
}; };
static inline u64 flcn64_to_u64(const struct flcn_u64 f) static inline u64 flcn64_to_u64(const struct flcn_u64 f)
{ {
return ((u64)f.hi) << 32 | f.lo; return ((u64)f.hi) << 32 | f.lo;
} }
static inline struct flcn_u64 u64_to_flcn64(u64 u)
{
struct flcn_u64 ret;
ret.hi = upper_32_bits(u);
ret.lo = lower_32_bits(u);
return ret;
}
#define GM200_ACR_MAX_APPS 8
struct hsf_load_header_app {
u32 sec_code_off;
u32 sec_code_size;
};
/** /**
* struct gm200_flcn_bl_desc - DMEM bootloader descriptor * struct hsf_load_header - HS firmware load header
* @signature: 16B signature for secure code. 0s if no secure code
* @ctx_dma: DMA context to be used by BL while loading code/data
* @code_dma_base: 256B-aligned Physical FB Address where code is located
* (falcon's $xcbase register)
* @non_sec_code_off: offset from code_dma_base where the non-secure code is
* located. The offset must be multiple of 256 to help perf
* @non_sec_code_size: the size of the nonSecure code part.
* @sec_code_off: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @sec_code_size: offset from code_dma_base where the secure code is
* located. The offset must be multiple of 256 to help perf
* @code_entry_point: code entry point which will be invoked by BL after
* code is loaded.
* @data_dma_base: 256B aligned Physical FB Address where data is located.
* (falcon's $xdbase register)
* @data_size: size of data block. Should be multiple of 256B
*
* Structure used by the bootloader to load the rest of the code. This has
* to be filled by host and copied into DMEM at offset provided in the
* hsflcn_bl_desc.bl_desc_dmem_load_off.
*/ */
struct gm200_flcn_bl_desc { struct hsf_load_header {
u32 reserved[4];
u32 signature[4];
u32 ctx_dma;
struct flcn_u64 code_dma_base;
u32 non_sec_code_off; u32 non_sec_code_off;
u32 non_sec_code_size; u32 non_sec_code_size;
u32 sec_code_off; u32 data_dma_base;
u32 sec_code_size;
u32 code_entry_point;
struct flcn_u64 data_dma_base;
u32 data_size; u32 data_size;
u32 num_apps;
struct hsf_load_header_app app[0];
}; };
/** /**
...@@ -319,11 +312,17 @@ struct gm200_secboot { ...@@ -319,11 +312,17 @@ struct gm200_secboot {
* on Tegra the HS FW copies the LS blob into the fixed WPR instead * on Tegra the HS FW copies the LS blob into the fixed WPR instead
*/ */
struct nvkm_gpuobj *acr_load_blob; struct nvkm_gpuobj *acr_load_blob;
struct gm200_flcn_bl_desc acr_load_bl_desc; struct {
struct hsf_load_header load_bl_header;
struct hsf_load_header_app __load_apps[GM200_ACR_MAX_APPS];
};
/* HS FW - unlock WPR region (dGPU only) */ /* HS FW - unlock WPR region (dGPU only) */
struct nvkm_gpuobj *acr_unload_blob; struct nvkm_gpuobj *acr_unload_blob;
struct gm200_flcn_bl_desc acr_unload_bl_desc; struct {
struct hsf_load_header unload_bl_header;
struct hsf_load_header_app __unload_apps[GM200_ACR_MAX_APPS];
};
/* HS bootloader */ /* HS bootloader */
void *hsbl_blob; void *hsbl_blob;
...@@ -353,9 +352,9 @@ struct gm200_secboot { ...@@ -353,9 +352,9 @@ struct gm200_secboot {
/** /**
* Contains functions we wish to abstract between GM200-like implementations * Contains functions we wish to abstract between GM200-like implementations
* @bl_desc_size: size of the BL descriptor used by this chip. * @bl_desc_size: size of the BL descriptor used by this chip.
* @fixup_bl_desc: hook that generates the proper BL descriptor format from * @generate_bl_desc: hook that generates the proper BL descriptor format from
* the generic GM200 format into a data array of size * the hsf_load_header format into a preallocated array of
* bl_desc_size * size bl_desc_size
* @prepare_blobs: prepares the various blobs needed for secure booting * @prepare_blobs: prepares the various blobs needed for secure booting
*/ */
struct gm200_secboot_func { struct gm200_secboot_func {
...@@ -365,7 +364,7 @@ struct gm200_secboot_func { ...@@ -365,7 +364,7 @@ struct gm200_secboot_func {
* callback is called on it * callback is called on it
*/ */
u32 bl_desc_size; u32 bl_desc_size;
void (*fixup_bl_desc)(const struct gm200_flcn_bl_desc *, void *); void (*generate_bl_desc)(const struct hsf_load_header *, void *, u64);
int (*prepare_blobs)(struct gm200_secboot *); int (*prepare_blobs)(struct gm200_secboot *);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment