Commit f0831acc authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Paul Mackerras

[PATCH] spufs: abstract priv1 register access.

In a hypervisor based setup, direct access to the first
priviledged register space can typically not be allowed
to the kernel and has to be implemented through hypervisor
calls.

As suggested by Masato Noguchi, let's abstract the register
access trough a number of function calls. Since there is
currently no public specification of actual hypervisor
calls to implement this, I only provide a place that
makes it easier to hook into.

Cc: Masato Noguchi <Masato.Noguchi@jp.sony.com>
Cc: Geoff Levand <geoff.levand@am.sony.com>
Signed-off-by: default avatarArnd Bergmann <arndb@de.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent ce8ab854
...@@ -2,6 +2,9 @@ obj-y += interrupt.o iommu.o setup.o spider-pic.o ...@@ -2,6 +2,9 @@ obj-y += interrupt.o iommu.o setup.o spider-pic.o
obj-y += pervasive.o obj-y += pervasive.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SPU_FS) += spufs/ spu_base.o obj-$(CONFIG_SPU_FS) += spufs/ spu-base.o
spu-base-y += spu_base.o spu_priv1.o
builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
obj-y += $(builtin-spufs-m) obj-y += $(builtin-spufs-m)
...@@ -142,8 +142,7 @@ static int __spu_trap_mailbox(struct spu *spu) ...@@ -142,8 +142,7 @@ static int __spu_trap_mailbox(struct spu *spu)
/* atomically disable SPU mailbox interrupts */ /* atomically disable SPU mailbox interrupts */
spin_lock(&spu->register_lock); spin_lock(&spu->register_lock);
out_be64(&spu->priv1->int_mask_class2_RW, spu_int_mask_and(spu, 2, ~0x1);
in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
return 0; return 0;
} }
...@@ -180,8 +179,7 @@ static int __spu_trap_spubox(struct spu *spu) ...@@ -180,8 +179,7 @@ static int __spu_trap_spubox(struct spu *spu)
/* atomically disable SPU mailbox interrupts */ /* atomically disable SPU mailbox interrupts */
spin_lock(&spu->register_lock); spin_lock(&spu->register_lock);
out_be64(&spu->priv1->int_mask_class2_RW, spu_int_mask_and(spu, 2, ~0x10);
in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
return 0; return 0;
} }
...@@ -206,8 +204,8 @@ spu_irq_class_0_bottom(struct spu *spu) ...@@ -206,8 +204,8 @@ spu_irq_class_0_bottom(struct spu *spu)
spu->class_0_pending = 0; spu->class_0_pending = 0;
mask = in_be64(&spu->priv1->int_mask_class0_RW); mask = spu_int_mask_get(spu, 0);
stat = in_be64(&spu->priv1->int_stat_class0_RW); stat = spu_int_stat_get(spu, 0);
stat &= mask; stat &= mask;
...@@ -220,7 +218,7 @@ spu_irq_class_0_bottom(struct spu *spu) ...@@ -220,7 +218,7 @@ spu_irq_class_0_bottom(struct spu *spu)
if (stat & 4) /* error on SPU */ if (stat & 4) /* error on SPU */
__spu_trap_error(spu); __spu_trap_error(spu);
out_be64(&spu->priv1->int_stat_class0_RW, stat); spu_int_stat_clear(spu, 0, stat);
return (stat & 0x7) ? -EIO : 0; return (stat & 0x7) ? -EIO : 0;
} }
...@@ -236,13 +234,13 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs) ...@@ -236,13 +234,13 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
/* atomically read & clear class1 status. */ /* atomically read & clear class1 status. */
spin_lock(&spu->register_lock); spin_lock(&spu->register_lock);
mask = in_be64(&spu->priv1->int_mask_class1_RW); mask = spu_int_mask_get(spu, 1);
stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask; stat = spu_int_stat_get(spu, 1) & mask;
dar = in_be64(&spu->priv1->mfc_dar_RW); dar = spu_mfc_dar_get(spu);
dsisr = in_be64(&spu->priv1->mfc_dsisr_RW); dsisr = spu_mfc_dsisr_get(spu);
if (stat & 2) /* mapping fault */ if (stat & 2) /* mapping fault */
out_be64(&spu->priv1->mfc_dsisr_RW, 0UL); spu_mfc_dsisr_set(spu, 0ul);
out_be64(&spu->priv1->int_stat_class1_RW, stat); spu_int_stat_clear(spu, 1, stat);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
if (stat & 1) /* segment fault */ if (stat & 1) /* segment fault */
...@@ -270,8 +268,8 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) ...@@ -270,8 +268,8 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
unsigned long mask; unsigned long mask;
spu = data; spu = data;
stat = in_be64(&spu->priv1->int_stat_class2_RW); stat = spu_int_stat_get(spu, 2);
mask = in_be64(&spu->priv1->int_mask_class2_RW); mask = spu_int_mask_get(spu, 2);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
...@@ -292,7 +290,7 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) ...@@ -292,7 +290,7 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
if (stat & 0x10) /* SPU mailbox threshold */ if (stat & 0x10) /* SPU mailbox threshold */
__spu_trap_spubox(spu); __spu_trap_spubox(spu);
out_be64(&spu->priv1->int_stat_class2_RW, stat); spu_int_stat_clear(spu, 2, stat);
return stat ? IRQ_HANDLED : IRQ_NONE; return stat ? IRQ_HANDLED : IRQ_NONE;
} }
...@@ -309,21 +307,18 @@ spu_request_irqs(struct spu *spu) ...@@ -309,21 +307,18 @@ spu_request_irqs(struct spu *spu)
spu_irq_class_0, 0, spu->irq_c0, spu); spu_irq_class_0, 0, spu->irq_c0, spu);
if (ret) if (ret)
goto out; goto out;
out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
spu_irq_class_1, 0, spu->irq_c1, spu); spu_irq_class_1, 0, spu->irq_c1, spu);
if (ret) if (ret)
goto out1; goto out1;
out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
spu_irq_class_2, 0, spu->irq_c2, spu); spu_irq_class_2, 0, spu->irq_c2, spu);
if (ret) if (ret)
goto out2; goto out2;
out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
goto out; goto out;
out2: out2:
...@@ -383,13 +378,6 @@ static void spu_init_channels(struct spu *spu) ...@@ -383,13 +378,6 @@ static void spu_init_channels(struct spu *spu)
} }
} }
static void spu_init_regs(struct spu *spu)
{
out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
}
struct spu *spu_alloc(void) struct spu *spu_alloc(void)
{ {
struct spu *spu; struct spu *spu;
...@@ -405,10 +393,8 @@ struct spu *spu_alloc(void) ...@@ -405,10 +393,8 @@ struct spu *spu_alloc(void)
} }
up(&spu_mutex); up(&spu_mutex);
if (spu) { if (spu)
spu_init_channels(spu); spu_init_channels(spu);
spu_init_regs(spu);
}
return spu; return spu;
} }
...@@ -579,8 +565,7 @@ static int __init spu_map_device(struct spu *spu, struct device_node *spe) ...@@ -579,8 +565,7 @@ static int __init spu_map_device(struct spu *spu, struct device_node *spe)
goto out_unmap; goto out_unmap;
spu->priv1= map_spe_prop(spe, "priv1"); spu->priv1= map_spe_prop(spe, "priv1");
if (!spu->priv1) /* priv1 is not available on a hypervisor */
goto out_unmap;
spu->priv2= map_spe_prop(spe, "priv2"); spu->priv2= map_spe_prop(spe, "priv2");
if (!spu->priv2) if (!spu->priv2)
...@@ -633,8 +618,8 @@ static int __init create_spu(struct device_node *spe) ...@@ -633,8 +618,8 @@ static int __init create_spu(struct device_node *spe)
spu->dsisr = 0UL; spu->dsisr = 0UL;
spin_lock_init(&spu->register_lock); spin_lock_init(&spu->register_lock);
out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
out_be64(&spu->priv1->mfc_sr1_RW, 0x33); spu_mfc_sr1_set(spu, 0x33);
spu->ibox_callback = NULL; spu->ibox_callback = NULL;
spu->wbox_callback = NULL; spu->wbox_callback = NULL;
......
/*
* access to SPU privileged registers
*/
#include <linux/module.h>
#include <asm/io.h>
#include <asm/spu.h>
void spu_int_mask_and(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
}
EXPORT_SYMBOL_GPL(spu_int_mask_and);
void spu_int_mask_or(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
}
EXPORT_SYMBOL_GPL(spu_int_mask_or);
void spu_int_mask_set(struct spu *spu, int class, u64 mask)
{
out_be64(&spu->priv1->int_mask_RW[class], mask);
}
EXPORT_SYMBOL_GPL(spu_int_mask_set);
u64 spu_int_mask_get(struct spu *spu, int class)
{
return in_be64(&spu->priv1->int_mask_RW[class]);
}
EXPORT_SYMBOL_GPL(spu_int_mask_get);
void spu_int_stat_clear(struct spu *spu, int class, u64 stat)
{
out_be64(&spu->priv1->int_stat_RW[class], stat);
}
EXPORT_SYMBOL_GPL(spu_int_stat_clear);
u64 spu_int_stat_get(struct spu *spu, int class)
{
return in_be64(&spu->priv1->int_stat_RW[class]);
}
EXPORT_SYMBOL_GPL(spu_int_stat_get);
void spu_int_route_set(struct spu *spu, u64 route)
{
out_be64(&spu->priv1->int_route_RW, route);
}
EXPORT_SYMBOL_GPL(spu_int_route_set);
u64 spu_mfc_dar_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_dar_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_dar_get);
u64 spu_mfc_dsisr_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_dsisr_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get);
void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr)
{
out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
}
EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set);
void spu_mfc_sdr_set(struct spu *spu, u64 sdr)
{
out_be64(&spu->priv1->mfc_sdr_RW, sdr);
}
EXPORT_SYMBOL_GPL(spu_mfc_sdr_set);
void spu_mfc_sr1_set(struct spu *spu, u64 sr1)
{
out_be64(&spu->priv1->mfc_sr1_RW, sr1);
}
EXPORT_SYMBOL_GPL(spu_mfc_sr1_set);
u64 spu_mfc_sr1_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_sr1_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_sr1_get);
void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
{
out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
}
EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set);
u64 spu_mfc_tclass_id_get(struct spu *spu)
{
return in_be64(&spu->priv1->mfc_tclass_id_RW);
}
EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get);
void spu_tlb_invalidate(struct spu *spu)
{
out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
}
EXPORT_SYMBOL_GPL(spu_tlb_invalidate);
void spu_resource_allocation_groupID_set(struct spu *spu, u64 id)
{
out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set);
u64 spu_resource_allocation_groupID_get(struct spu *spu)
{
return in_be64(&spu->priv1->resource_allocation_groupID_RW);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get);
void spu_resource_allocation_enable_set(struct spu *spu, u64 enable)
{
out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set);
u64 spu_resource_allocation_enable_get(struct spu *spu)
{
return in_be64(&spu->priv1->resource_allocation_enable_RW);
}
EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get);
...@@ -62,7 +62,6 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx, ...@@ -62,7 +62,6 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
unsigned int events) unsigned int events)
{ {
struct spu *spu = ctx->spu; struct spu *spu = ctx->spu;
struct spu_priv1 __iomem *priv1 = spu->priv1;
int ret = 0; int ret = 0;
u32 stat; u32 stat;
...@@ -78,18 +77,16 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx, ...@@ -78,18 +77,16 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
if (stat & 0xff0000) if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM; ret |= POLLIN | POLLRDNORM;
else { else {
out_be64(&priv1->int_stat_class2_RW, 0x1); spu_int_stat_clear(spu, 2, 0x1);
out_be64(&priv1->int_mask_class2_RW, spu_int_mask_or(spu, 2, 0x1);
in_be64(&priv1->int_mask_class2_RW) | 0x1);
} }
} }
if (events & (POLLOUT | POLLWRNORM)) { if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00) if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM; ret = POLLOUT | POLLWRNORM;
else { else {
out_be64(&priv1->int_stat_class2_RW, 0x10); spu_int_stat_clear(spu, 2, 0x10);
out_be64(&priv1->int_mask_class2_RW, spu_int_mask_or(spu, 2, 0x10);
in_be64(&priv1->int_mask_class2_RW) | 0x10);
} }
} }
spin_unlock_irq(&spu->register_lock); spin_unlock_irq(&spu->register_lock);
...@@ -100,7 +97,6 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) ...@@ -100,7 +97,6 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
{ {
struct spu *spu = ctx->spu; struct spu *spu = ctx->spu;
struct spu_problem __iomem *prob = spu->problem; struct spu_problem __iomem *prob = spu->problem;
struct spu_priv1 __iomem *priv1 = spu->priv1;
struct spu_priv2 __iomem *priv2 = spu->priv2; struct spu_priv2 __iomem *priv2 = spu->priv2;
int ret; int ret;
...@@ -111,8 +107,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) ...@@ -111,8 +107,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
ret = 4; ret = 4;
} else { } else {
/* make sure we get woken up by the interrupt */ /* make sure we get woken up by the interrupt */
out_be64(&priv1->int_mask_class2_RW, spu_int_mask_or(spu, 2, 0x1);
in_be64(&priv1->int_mask_class2_RW) | 0x1);
ret = 0; ret = 0;
} }
spin_unlock_irq(&spu->register_lock); spin_unlock_irq(&spu->register_lock);
...@@ -123,7 +118,6 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) ...@@ -123,7 +118,6 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
{ {
struct spu *spu = ctx->spu; struct spu *spu = ctx->spu;
struct spu_problem __iomem *prob = spu->problem; struct spu_problem __iomem *prob = spu->problem;
struct spu_priv1 __iomem *priv1 = spu->priv1;
int ret; int ret;
spin_lock_irq(&spu->register_lock); spin_lock_irq(&spu->register_lock);
...@@ -134,8 +128,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) ...@@ -134,8 +128,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
} else { } else {
/* make sure we get woken up by the interrupt when space /* make sure we get woken up by the interrupt when space
becomes available */ becomes available */
out_be64(&priv1->int_mask_class2_RW, spu_int_mask_or(spu, 2, 0x10);
in_be64(&priv1->int_mask_class2_RW) | 0x10);
ret = 0; ret = 0;
} }
spin_unlock_irq(&spu->register_lock); spin_unlock_irq(&spu->register_lock);
......
This diff is collapsed.
...@@ -172,6 +172,29 @@ static inline void unregister_spu_syscalls(struct spufs_calls *calls) ...@@ -172,6 +172,29 @@ static inline void unregister_spu_syscalls(struct spufs_calls *calls)
#endif /* MODULE */ #endif /* MODULE */
/* access to priv1 registers */
void spu_int_mask_and(struct spu *spu, int class, u64 mask);
void spu_int_mask_or(struct spu *spu, int class, u64 mask);
void spu_int_mask_set(struct spu *spu, int class, u64 mask);
u64 spu_int_mask_get(struct spu *spu, int class);
void spu_int_stat_clear(struct spu *spu, int class, u64 stat);
u64 spu_int_stat_get(struct spu *spu, int class);
void spu_int_route_set(struct spu *spu, u64 route);
u64 spu_mfc_dar_get(struct spu *spu);
u64 spu_mfc_dsisr_get(struct spu *spu);
void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr);
void spu_mfc_sdr_set(struct spu *spu, u64 sdr);
void spu_mfc_sr1_set(struct spu *spu, u64 sr1);
u64 spu_mfc_sr1_get(struct spu *spu);
void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id);
u64 spu_mfc_tclass_id_get(struct spu *spu);
void spu_tlb_invalidate(struct spu *spu);
void spu_resource_allocation_groupID_set(struct spu *spu, u64 id);
u64 spu_resource_allocation_groupID_get(struct spu *spu);
void spu_resource_allocation_enable_set(struct spu *spu, u64 enable);
u64 spu_resource_allocation_enable_get(struct spu *spu);
/* /*
* This defines the Local Store, Problem Area and Privlege Area of an SPU. * This defines the Local Store, Problem Area and Privlege Area of an SPU.
*/ */
...@@ -379,25 +402,21 @@ struct spu_priv1 { ...@@ -379,25 +402,21 @@ struct spu_priv1 {
/* Interrupt Area */ /* Interrupt Area */
u64 int_mask_class0_RW; /* 0x100 */ u64 int_mask_RW[3]; /* 0x100 */
#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L #define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L
#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR 0x2L #define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR 0x2L
#define CLASS0_ENABLE_SPU_ERROR_INTR 0x4L #define CLASS0_ENABLE_SPU_ERROR_INTR 0x4L
#define CLASS0_ENABLE_MFC_FIR_INTR 0x8L #define CLASS0_ENABLE_MFC_FIR_INTR 0x8L
u64 int_mask_class1_RW; /* 0x108 */
#define CLASS1_ENABLE_SEGMENT_FAULT_INTR 0x1L #define CLASS1_ENABLE_SEGMENT_FAULT_INTR 0x1L
#define CLASS1_ENABLE_STORAGE_FAULT_INTR 0x2L #define CLASS1_ENABLE_STORAGE_FAULT_INTR 0x2L
#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L #define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L #define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
u64 int_mask_class2_RW; /* 0x110 */
#define CLASS2_ENABLE_MAILBOX_INTR 0x1L #define CLASS2_ENABLE_MAILBOX_INTR 0x1L
#define CLASS2_ENABLE_SPU_STOP_INTR 0x2L #define CLASS2_ENABLE_SPU_STOP_INTR 0x2L
#define CLASS2_ENABLE_SPU_HALT_INTR 0x4L #define CLASS2_ENABLE_SPU_HALT_INTR 0x4L
#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L #define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
u8 pad_0x118_0x140[0x28]; /* 0x118 */ u8 pad_0x118_0x140[0x28]; /* 0x118 */
u64 int_stat_class0_RW; /* 0x140 */ u64 int_stat_RW[3]; /* 0x140 */
u64 int_stat_class1_RW; /* 0x148 */
u64 int_stat_class2_RW; /* 0x150 */
u8 pad_0x158_0x180[0x28]; /* 0x158 */ u8 pad_0x158_0x180[0x28]; /* 0x158 */
u64 int_route_RW; /* 0x180 */ u64 int_route_RW; /* 0x180 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment