Commit 9653a5c7 authored by Hans Rosenfeld's avatar Hans Rosenfeld Committed by Borislav Petkov

x86, amd-nb: Cleanup AMD northbridge caching code

Support more than just the "Misc Control" part of the northbridges.
Support more flags by turning "gart_supported" into a single bit flag
that is stored in a flags member. Clean up related code by using a set
of functions (amd_nb_num(), amd_nb_has_feature() and node_to_amd_nb())
instead of accessing the NB data structures directly. Reorder the
initialization code and put the GART flush words caching in a separate
function.
Signed-off-by: default avatarHans Rosenfeld <hans.rosenfeld@amd.com>
Signed-off-by: default avatarBorislav Petkov <borislav.petkov@amd.com>
parent eec1d4fa
...@@ -3,36 +3,52 @@ ...@@ -3,36 +3,52 @@
#include <linux/pci.h> #include <linux/pci.h>
extern struct pci_device_id amd_nb_ids[]; extern struct pci_device_id amd_nb_misc_ids[];
struct bootnode; struct bootnode;
extern int early_is_amd_nb(u32 value); extern int early_is_amd_nb(u32 value);
extern int cache_amd_northbridges(void); extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void); extern void amd_flush_garts(void);
extern int amd_get_nodes(struct bootnode *nodes); extern int amd_get_nodes(struct bootnode *nodes);
extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
extern int amd_scan_nodes(void); extern int amd_scan_nodes(void);
struct amd_northbridge {
struct pci_dev *misc;
};
struct amd_northbridge_info { struct amd_northbridge_info {
u16 num; u16 num;
u8 gart_supported; u64 flags;
struct pci_dev **nb_misc; struct amd_northbridge *nb;
}; };
extern struct amd_northbridge_info amd_northbridges; extern struct amd_northbridge_info amd_northbridges;
#define AMD_NB_GART 0x1
#ifdef CONFIG_AMD_NB #ifdef CONFIG_AMD_NB
static inline struct pci_dev *node_to_amd_nb_misc(int node) static inline int amd_nb_num(void)
{ {
return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL; return amd_northbridges.num;
} }
#else static inline int amd_nb_has_feature(int feature)
{
return ((amd_northbridges.flags & feature) == feature);
}
static inline struct pci_dev *node_to_amd_nb_misc(int node) static inline struct amd_northbridge *node_to_amd_nb(int node)
{ {
return NULL; return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
} }
#else
#define amd_nb_num(x) 0
#define amd_nb_has_feature(x) false
#define node_to_amd_nb(x) NULL
#endif #endif
......
...@@ -12,74 +12,65 @@ ...@@ -12,74 +12,65 @@
static u32 *flush_words; static u32 *flush_words;
struct pci_device_id amd_nb_ids[] = { struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
{} {}
}; };
EXPORT_SYMBOL(amd_nb_ids); EXPORT_SYMBOL(amd_nb_misc_ids);
struct amd_northbridge_info amd_northbridges; struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges); EXPORT_SYMBOL(amd_northbridges);
static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) static struct pci_dev *next_northbridge(struct pci_dev *dev,
struct pci_device_id *ids)
{ {
do { do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev) if (!dev)
break; break;
} while (!pci_match_id(&amd_nb_ids[0], dev)); } while (!pci_match_id(ids, dev));
return dev; return dev;
} }
int cache_amd_northbridges(void) int amd_cache_northbridges(void)
{ {
int i; int i = 0;
struct pci_dev *dev; struct amd_northbridge *nb;
struct pci_dev *misc;
if (amd_northbridges.num) if (amd_nb_num())
return 0; return 0;
dev = NULL; misc = NULL;
while ((dev = next_amd_northbridge(dev)) != NULL) while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
amd_northbridges.num++; i++;
/* some CPU families (e.g. family 0x11) do not support GART */ if (i == 0)
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || return 0;
boot_cpu_data.x86 == 0x15)
amd_northbridges.gart_supported = 1;
amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
sizeof(void *), GFP_KERNEL); if (!nb)
if (!amd_northbridges.nb_misc)
return -ENOMEM; return -ENOMEM;
if (!amd_northbridges.num) { amd_northbridges.nb = nb;
amd_northbridges.nb_misc[0] = NULL; amd_northbridges.num = i;
return 0;
}
if (amd_northbridges.gart_supported) { misc = NULL;
flush_words = kmalloc(amd_northbridges.num * sizeof(u32), for (i = 0; i != amd_nb_num(); i++) {
GFP_KERNEL); node_to_amd_nb(i)->misc = misc =
if (!flush_words) { next_northbridge(misc, amd_nb_misc_ids);
kfree(amd_northbridges.nb_misc); }
return -ENOMEM;
} /* some CPU families (e.g. family 0x11) do not support GART */
} if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_GART;
dev = NULL;
i = 0;
while ((dev = next_amd_northbridge(dev)) != NULL) {
amd_northbridges.nb_misc[i] = dev;
if (amd_northbridges.gart_supported)
pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
}
amd_northbridges.nb_misc[i] = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(cache_amd_northbridges); EXPORT_SYMBOL_GPL(amd_cache_northbridges);
/* Ignores subdevice/subvendor but as far as I can figure out /* Ignores subdevice/subvendor but as far as I can figure out
they're useless anyways */ they're useless anyways */
...@@ -88,19 +79,39 @@ int __init early_is_amd_nb(u32 device) ...@@ -88,19 +79,39 @@ int __init early_is_amd_nb(u32 device)
struct pci_device_id *id; struct pci_device_id *id;
u32 vendor = device & 0xffff; u32 vendor = device & 0xffff;
device >>= 16; device >>= 16;
for (id = amd_nb_ids; id->vendor; id++) for (id = amd_nb_misc_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device) if (vendor == id->vendor && device == id->device)
return 1; return 1;
return 0; return 0;
} }
int amd_cache_gart(void)
{
int i;
if (!amd_nb_has_feature(AMD_NB_GART))
return 0;
flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
if (!flush_words) {
amd_northbridges.flags &= ~AMD_NB_GART;
return -ENOMEM;
}
for (i = 0; i != amd_nb_num(); i++)
pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
&flush_words[i]);
return 0;
}
void amd_flush_garts(void) void amd_flush_garts(void)
{ {
int flushed, i; int flushed, i;
unsigned long flags; unsigned long flags;
static DEFINE_SPINLOCK(gart_lock); static DEFINE_SPINLOCK(gart_lock);
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return; return;
/* Avoid races between AGP and IOMMU. In theory it's not needed /* Avoid races between AGP and IOMMU. In theory it's not needed
...@@ -109,16 +120,16 @@ void amd_flush_garts(void) ...@@ -109,16 +120,16 @@ void amd_flush_garts(void)
that it doesn't matter to serialize more. -AK */ that it doesn't matter to serialize more. -AK */
spin_lock_irqsave(&gart_lock, flags); spin_lock_irqsave(&gart_lock, flags);
flushed = 0; flushed = 0;
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
flush_words[i]|1); flush_words[i] | 1);
flushed++; flushed++;
} }
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
u32 w; u32 w;
/* Make sure the hardware actually executed the flush*/ /* Make sure the hardware actually executed the flush*/
for (;;) { for (;;) {
pci_read_config_dword(amd_northbridges.nb_misc[i], pci_read_config_dword(node_to_amd_nb(i)->misc,
0x9c, &w); 0x9c, &w);
if (!(w & 1)) if (!(w & 1))
break; break;
...@@ -135,11 +146,15 @@ static __init int init_amd_nbs(void) ...@@ -135,11 +146,15 @@ static __init int init_amd_nbs(void)
{ {
int err = 0; int err = 0;
err = cache_amd_northbridges(); err = amd_cache_northbridges();
if (err < 0) if (err < 0)
printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
if (amd_cache_gart() < 0)
printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
"GART support disabled.\n");
return err; return err;
} }
......
...@@ -333,7 +333,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) ...@@ -333,7 +333,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
{ {
struct amd_l3_cache *l3; struct amd_l3_cache *l3;
struct pci_dev *dev = node_to_amd_nb_misc(node); struct pci_dev *dev = node_to_amd_nb(node)->misc;
l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
if (!l3) { if (!l3) {
...@@ -370,7 +370,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, ...@@ -370,7 +370,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
return; return;
/* not in virtualized environments */ /* not in virtualized environments */
if (amd_northbridges.num == 0) if (amd_nb_num() == 0)
return; return;
/* /*
...@@ -378,7 +378,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, ...@@ -378,7 +378,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
* never freed but this is done only on shutdown so it doesn't matter. * never freed but this is done only on shutdown so it doesn't matter.
*/ */
if (!l3_caches) { if (!l3_caches) {
int size = amd_northbridges.num * sizeof(struct amd_l3_cache *); int size = amd_nb_num() * sizeof(struct amd_l3_cache *);
l3_caches = kzalloc(size, GFP_ATOMIC); l3_caches = kzalloc(size, GFP_ATOMIC);
if (!l3_caches) if (!l3_caches)
......
...@@ -561,11 +561,11 @@ static void enable_gart_translations(void) ...@@ -561,11 +561,11 @@ static void enable_gart_translations(void)
{ {
int i; int i;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return; return;
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = amd_northbridges.nb_misc[i]; struct pci_dev *dev = node_to_amd_nb(i)->misc;
enable_gart_translation(dev, __pa(agp_gatt_table)); enable_gart_translation(dev, __pa(agp_gatt_table));
} }
...@@ -596,13 +596,13 @@ static void gart_fixup_northbridges(struct sys_device *dev) ...@@ -596,13 +596,13 @@ static void gart_fixup_northbridges(struct sys_device *dev)
if (!fix_up_north_bridges) if (!fix_up_north_bridges)
return; return;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return; return;
pr_info("PCI-DMA: Restoring GART aperture settings\n"); pr_info("PCI-DMA: Restoring GART aperture settings\n");
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = amd_northbridges.nb_misc[i]; struct pci_dev *dev = node_to_amd_nb(i)->misc;
/* /*
* Don't enable translations just yet. That is the next * Don't enable translations just yet. That is the next
...@@ -656,8 +656,8 @@ static __init int init_amd_gatt(struct agp_kern_info *info) ...@@ -656,8 +656,8 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
aper_size = aper_base = info->aper_size = 0; aper_size = aper_base = info->aper_size = 0;
dev = NULL; dev = NULL;
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
dev = amd_northbridges.nb_misc[i]; dev = node_to_amd_nb(i)->misc;
new_aper_base = read_aperture(dev, &new_aper_size); new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base) if (!new_aper_base)
goto nommu; goto nommu;
...@@ -725,13 +725,13 @@ static void gart_iommu_shutdown(void) ...@@ -725,13 +725,13 @@ static void gart_iommu_shutdown(void)
if (!no_agp) if (!no_agp)
return; return;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return; return;
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
u32 ctl; u32 ctl;
dev = amd_northbridges.nb_misc[i]; dev = node_to_amd_nb(i)->misc;
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
ctl &= ~GARTEN; ctl &= ~GARTEN;
...@@ -749,7 +749,7 @@ int __init gart_iommu_init(void) ...@@ -749,7 +749,7 @@ int __init gart_iommu_init(void)
unsigned long scratch; unsigned long scratch;
long i; long i;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return 0; return 0;
#ifndef CONFIG_AGP_AMD64 #ifndef CONFIG_AGP_AMD64
......
...@@ -124,7 +124,7 @@ static int amd64_fetch_size(void) ...@@ -124,7 +124,7 @@ static int amd64_fetch_size(void)
u32 temp; u32 temp;
struct aper_size_info_32 *values; struct aper_size_info_32 *values;
dev = amd_northbridges.nb_misc[0]; dev = node_to_amd_nb(0)->misc;
if (dev==NULL) if (dev==NULL)
return 0; return 0;
...@@ -181,14 +181,13 @@ static int amd_8151_configure(void) ...@@ -181,14 +181,13 @@ static int amd_8151_configure(void)
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
int i; int i;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return 0; return 0;
/* Configure AGP regs in each x86-64 host bridge. */ /* Configure AGP regs in each x86-64 host bridge. */
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
agp_bridge->gart_bus_addr = agp_bridge->gart_bus_addr =
amd64_configure(amd_northbridges.nb_misc[i], amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
gatt_bus);
} }
amd_flush_garts(); amd_flush_garts();
return 0; return 0;
...@@ -200,11 +199,11 @@ static void amd64_cleanup(void) ...@@ -200,11 +199,11 @@ static void amd64_cleanup(void)
u32 tmp; u32 tmp;
int i; int i;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return; return;
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = amd_northbridges.nb_misc[i]; struct pci_dev *dev = node_to_amd_nb(i)->misc;
/* disable gart translation */ /* disable gart translation */
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
tmp &= ~GARTEN; tmp &= ~GARTEN;
...@@ -331,15 +330,15 @@ static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) ...@@ -331,15 +330,15 @@ static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
{ {
int i; int i;
if (cache_amd_northbridges() < 0) if (amd_cache_northbridges() < 0)
return -ENODEV; return -ENODEV;
if (!amd_northbridges.gart_supported) if (!amd_nb_has_feature(AMD_NB_GART))
return -ENODEV; return -ENODEV;
i = 0; i = 0;
for (i = 0; i < amd_northbridges.num; i++) { for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = amd_northbridges.nb_misc[i]; struct pci_dev *dev = node_to_amd_nb(i)->misc;
if (fix_northbridge(dev, pdev, cap_ptr) < 0) { if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
dev_err(&dev->dev, "no usable aperture found\n"); dev_err(&dev->dev, "no usable aperture found\n");
#ifdef __x86_64__ #ifdef __x86_64__
...@@ -416,7 +415,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) ...@@ -416,7 +415,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
} }
/* shadow x86-64 registers into ULi registers */ /* shadow x86-64 registers into ULi registers */
pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
&httfea); &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */ /* if x86-64 aperture base is beyond 4G, exit here */
...@@ -484,7 +483,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) ...@@ -484,7 +483,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
/* shadow x86-64 registers into NVIDIA registers */ /* shadow x86-64 registers into NVIDIA registers */
pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
&apbase); &apbase);
/* if x86-64 aperture base is beyond 4G, exit here */ /* if x86-64 aperture base is beyond 4G, exit here */
...@@ -778,7 +777,7 @@ int __init agp_amd64_init(void) ...@@ -778,7 +777,7 @@ int __init agp_amd64_init(void)
} }
/* First check that we have at least one AMD64 NB */ /* First check that we have at least one AMD64 NB */
if (!pci_dev_present(amd_nb_ids)) if (!pci_dev_present(amd_nb_misc_ids))
return -ENODEV; return -ENODEV;
/* Look for any AGP bridge */ /* Look for any AGP bridge */
......
...@@ -2917,7 +2917,7 @@ static int __init amd64_edac_init(void) ...@@ -2917,7 +2917,7 @@ static int __init amd64_edac_init(void)
opstate_init(); opstate_init();
if (cache_amd_northbridges() < 0) if (amd_cache_northbridges() < 0)
goto err_ret; goto err_ret;
msrs = msrs_alloc(); msrs = msrs_alloc();
...@@ -2934,7 +2934,7 @@ static int __init amd64_edac_init(void) ...@@ -2934,7 +2934,7 @@ static int __init amd64_edac_init(void)
* to finish initialization of the MC instances. * to finish initialization of the MC instances.
*/ */
err = -ENODEV; err = -ENODEV;
for (nb = 0; nb < amd_northbridges.num; nb++) { for (nb = 0; nb < amd_nb_num(); nb++) {
if (!pvt_lookup[nb]) if (!pvt_lookup[nb])
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment