Commit efeb6b50 authored by Dave Jones's avatar Dave Jones Committed by Dave Jones

[AGPGART] fix macros that expect agp_bridge in global scope

From Christoph Hellwig
parent 59bafd8f
...@@ -147,20 +147,17 @@ struct agp_bridge_data { ...@@ -147,20 +147,17 @@ struct agp_bridge_data {
#define MB(x) (KB (KB (x))) #define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x))) #define GB(x) (MB (KB (x)))
#define CACHE_FLUSH agp_bridge->cache_flush
#define A_SIZE_8(x) ((struct aper_size_info_8 *) x) #define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
#define A_SIZE_16(x) ((struct aper_size_info_16 *) x) #define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
#define A_SIZE_32(x) ((struct aper_size_info_32 *) x) #define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x) #define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x) #define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
#define A_IDX8() (A_SIZE_8(agp_bridge->aperture_sizes) + i) #define A_IDX8(bridge) (A_SIZE_8((bridge)->aperture_sizes) + i)
#define A_IDX16() (A_SIZE_16(agp_bridge->aperture_sizes) + i) #define A_IDX16(bridge) (A_SIZE_16((bridge)->aperture_sizes) + i)
#define A_IDX32() (A_SIZE_32(agp_bridge->aperture_sizes) + i) #define A_IDX32(bridge) (A_SIZE_32((bridge)->aperture_sizes) + i)
#define A_IDXLVL2() (A_SIZE_LVL2(agp_bridge->aperture_sizes) + i)
#define A_IDXFIX() (A_SIZE_FIX(agp_bridge->aperture_sizes) + i)
#define MAXKEY (4096 * 32) #define MAXKEY (4096 * 32)
#define PGE_EMPTY(p) (!(p) || (p) == (unsigned long) agp_bridge->scratch_page) #define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
/* intel register */ /* intel register */
#define INTEL_APBASE 0x10 #define INTEL_APBASE 0x10
......
...@@ -33,7 +33,7 @@ static int amd_create_page_map(struct amd_page_map *page_map) ...@@ -33,7 +33,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
return -ENOMEM; return -ENOMEM;
} }
SetPageReserved(virt_to_page(page_map->real)); SetPageReserved(virt_to_page(page_map->real));
CACHE_FLUSH(); global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE); PAGE_SIZE);
if (page_map->remapped == NULL) { if (page_map->remapped == NULL) {
...@@ -42,7 +42,7 @@ static int amd_create_page_map(struct amd_page_map *page_map) ...@@ -42,7 +42,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
page_map->real = NULL; page_map->real = NULL;
return -ENOMEM; return -ENOMEM;
} }
CACHE_FLUSH(); global_cache_flush();
for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
page_map->remapped[i] = agp_bridge->scratch_page; page_map->remapped[i] = agp_bridge->scratch_page;
...@@ -297,14 +297,13 @@ static int amd_insert_memory(agp_memory * mem, ...@@ -297,14 +297,13 @@ static int amd_insert_memory(agp_memory * mem,
while (j < (pg_start + mem->page_count)) { while (j < (pg_start + mem->page_count)) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr); cur_gatt = GET_GATT(addr);
if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { if (!PGE_EMPTY(agp_bridge, cur_gatt[GET_GATT_OFF(addr)]))
return -EBUSY; return -EBUSY;
}
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (mem->is_flushed == FALSE) {
CACHE_FLUSH(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = TRUE;
} }
...@@ -402,7 +401,6 @@ static struct agp_driver amd_k7_agp_driver = { ...@@ -402,7 +401,6 @@ static struct agp_driver amd_k7_agp_driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
/* Supported Device Scanning routine */
static int __init agp_amdk7_probe(struct pci_dev *pdev, static int __init agp_amdk7_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
......
...@@ -50,13 +50,13 @@ static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type) ...@@ -50,13 +50,13 @@ static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
/* gatt table should be empty. */ /* gatt table should be empty. */
while (j < (pg_start + mem->page_count)) { while (j < (pg_start + mem->page_count)) {
if (!PGE_EMPTY(agp_bridge->gatt_table[j])) if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[j]))
return -EBUSY; return -EBUSY;
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (mem->is_flushed == FALSE) {
CACHE_FLUSH(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = TRUE;
} }
...@@ -296,6 +296,11 @@ static int __init agp_amdk8_probe(struct pci_dev *pdev, ...@@ -296,6 +296,11 @@ static int __init agp_amdk8_probe(struct pci_dev *pdev,
return 0; return 0;
} }
static void __exit agp_amdk8_remove(struct pci_dev *pdev)
{
agp_unregister_driver(&amd_k8_agp_driver);
}
static struct pci_device_id agp_amdk8_pci_table[] __initdata = { static struct pci_device_id agp_amdk8_pci_table[] __initdata = {
{ {
.class = (PCI_CLASS_BRIDGE_HOST << 8), .class = (PCI_CLASS_BRIDGE_HOST << 8),
...@@ -314,6 +319,7 @@ static struct __initdata pci_driver agp_amdk8_pci_driver = { ...@@ -314,6 +319,7 @@ static struct __initdata pci_driver agp_amdk8_pci_driver = {
.name = "agpgart-amd-k8", .name = "agpgart-amd-k8",
.id_table = agp_amdk8_pci_table, .id_table = agp_amdk8_pci_table,
.probe = agp_amdk8_probe, .probe = agp_amdk8_probe,
.remove = agp_amdk8_remove,
}; };
/* Not static due to IOMMU code calling it early. */ /* Not static due to IOMMU code calling it early. */
...@@ -324,7 +330,6 @@ int __init agp_amdk8_init(void) ...@@ -324,7 +330,6 @@ int __init agp_amdk8_init(void)
static void __exit agp_amdk8_cleanup(void) static void __exit agp_amdk8_cleanup(void)
{ {
agp_unregister_driver(&amd_k8_agp_driver);
pci_unregister_driver(&agp_amdk8_pci_driver); pci_unregister_driver(&agp_amdk8_pci_driver);
} }
......
...@@ -318,7 +318,7 @@ int agp_bind_memory(agp_memory * curr, off_t pg_start) ...@@ -318,7 +318,7 @@ int agp_bind_memory(agp_memory * curr, off_t pg_start)
return -EINVAL; return -EINVAL;
} }
if (curr->is_flushed == FALSE) { if (curr->is_flushed == FALSE) {
CACHE_FLUSH(); agp_bridge->cache_flush();
curr->is_flushed = TRUE; curr->is_flushed = TRUE;
} }
ret_val = agp_bridge->insert_memory(curr, pg_start, curr->type); ret_val = agp_bridge->insert_memory(curr, pg_start, curr->type);
...@@ -537,17 +537,15 @@ int agp_generic_create_gatt_table(void) ...@@ -537,17 +537,15 @@ int agp_generic_create_gatt_table(void)
i++; i++;
switch (agp_bridge->size_type) { switch (agp_bridge->size_type) {
case U8_APER_SIZE: case U8_APER_SIZE:
agp_bridge->current_size = A_IDX8(); agp_bridge->current_size = A_IDX8(agp_bridge);
break; break;
case U16_APER_SIZE: case U16_APER_SIZE:
agp_bridge->current_size = A_IDX16(); agp_bridge->current_size = A_IDX16(agp_bridge);
break; break;
case U32_APER_SIZE: case U32_APER_SIZE:
agp_bridge->current_size = A_IDX32(); agp_bridge->current_size = A_IDX32(agp_bridge);
break; break;
/* This case will never really /* This case will never really happen. */
* happen.
*/
case FIXED_APER_SIZE: case FIXED_APER_SIZE:
case LVL2_APER_SIZE: case LVL2_APER_SIZE:
default: default:
...@@ -577,10 +575,11 @@ int agp_generic_create_gatt_table(void) ...@@ -577,10 +575,11 @@ int agp_generic_create_gatt_table(void)
agp_bridge->gatt_table_real = (u32 *) table; agp_bridge->gatt_table_real = (u32 *) table;
agp_gatt_table = (void *)table; agp_gatt_table = (void *)table;
CACHE_FLUSH();
agp_bridge->cache_flush();
agp_bridge->gatt_table = ioremap_nocache(virt_to_phys(table), agp_bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order))); (PAGE_SIZE * (1 << page_order)));
CACHE_FLUSH(); agp_bridge->cache_flush();
if (agp_bridge->gatt_table == NULL) { if (agp_bridge->gatt_table == NULL) {
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
...@@ -709,14 +708,14 @@ int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type) ...@@ -709,14 +708,14 @@ int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type)
j = pg_start; j = pg_start;
while (j < (pg_start + mem->page_count)) { while (j < (pg_start + mem->page_count)) {
if (!PGE_EMPTY(agp_bridge->gatt_table[j])) { if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[j])) {
return -EBUSY; return -EBUSY;
} }
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (mem->is_flushed == FALSE) {
CACHE_FLUSH(); agp_bridge->cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = TRUE;
} }
......
...@@ -285,7 +285,7 @@ static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type) ...@@ -285,7 +285,7 @@ static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
} }
if (mem->is_flushed == FALSE) { if (mem->is_flushed == FALSE) {
CACHE_FLUSH(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = TRUE;
} }
......
...@@ -294,7 +294,7 @@ static int i460_insert_memory_small_io_page (agp_memory *mem, off_t pg_start, in ...@@ -294,7 +294,7 @@ static int i460_insert_memory_small_io_page (agp_memory *mem, off_t pg_start, in
j = io_pg_start; j = io_pg_start;
while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
if (!PGE_EMPTY(RD_GATT(j))) { if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
j, RD_GATT(j)); j, RD_GATT(j));
return -EBUSY; return -EBUSY;
......
...@@ -89,7 +89,7 @@ static int intel_i810_configure(void) ...@@ -89,7 +89,7 @@ static int intel_i810_configure(void)
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL, OUTREG32(intel_i810_private.registers, I810_PGETBL_CTL,
agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED); agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED);
CACHE_FLUSH(); global_cache_flush();
if (agp_bridge->needs_scratch_page == TRUE) { if (agp_bridge->needs_scratch_page == TRUE) {
for (i = 0; i < current_size->num_entries; i++) { for (i = 0; i < current_size->num_entries; i++) {
...@@ -130,22 +130,21 @@ static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, ...@@ -130,22 +130,21 @@ static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
return -EINVAL; return -EINVAL;
} }
for (j = pg_start; j < (pg_start + mem->page_count); j++) { for (j = pg_start; j < (pg_start + mem->page_count); j++) {
if (!PGE_EMPTY(agp_bridge->gatt_table[j])) { if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[j]))
return -EBUSY; return -EBUSY;
}
} }
if (type != 0 || mem->type != 0) { if (type != 0 || mem->type != 0) {
if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) { if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
/* special insert */ /* special insert */
CACHE_FLUSH(); global_cache_flush();
for (i = pg_start; i < (pg_start + mem->page_count); i++) { for (i = pg_start; i < (pg_start + mem->page_count); i++) {
OUTREG32(intel_i810_private.registers, OUTREG32(intel_i810_private.registers,
I810_PTE_BASE + (i * 4), I810_PTE_BASE + (i * 4),
(i * 4096) | I810_PTE_LOCAL | (i * 4096) | I810_PTE_LOCAL |
I810_PTE_VALID); I810_PTE_VALID);
} }
CACHE_FLUSH(); global_cache_flush();
agp_bridge->tlb_flush(mem); agp_bridge->tlb_flush(mem);
return 0; return 0;
} }
...@@ -155,13 +154,13 @@ static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start, ...@@ -155,13 +154,13 @@ static int intel_i810_insert_entries(agp_memory * mem, off_t pg_start,
} }
insert: insert:
CACHE_FLUSH(); global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
OUTREG32(intel_i810_private.registers, OUTREG32(intel_i810_private.registers,
I810_PTE_BASE + (j * 4), I810_PTE_BASE + (j * 4),
agp_bridge->mask_memory(mem->memory[i], mem->type)); agp_bridge->mask_memory(mem->memory[i], mem->type));
} }
CACHE_FLUSH(); global_cache_flush();
agp_bridge->tlb_flush(mem); agp_bridge->tlb_flush(mem);
return 0; return 0;
...@@ -178,7 +177,7 @@ static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start, ...@@ -178,7 +177,7 @@ static int intel_i810_remove_entries(agp_memory * mem, off_t pg_start,
agp_bridge->scratch_page); agp_bridge->scratch_page);
} }
CACHE_FLUSH(); global_cache_flush();
agp_bridge->tlb_flush(mem); agp_bridge->tlb_flush(mem);
return 0; return 0;
} }
...@@ -350,7 +349,7 @@ static int intel_i830_create_gatt_table(void) ...@@ -350,7 +349,7 @@ static int intel_i830_create_gatt_table(void)
if (!intel_i830_private.registers) return (-ENOMEM); if (!intel_i830_private.registers) return (-ENOMEM);
temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000; temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
CACHE_FLUSH(); global_cache_flush();
/* we have to call this as early as possible after the MMIO base address is known */ /* we have to call this as early as possible after the MMIO base address is known */
intel_i830_init_gtt_entries(); intel_i830_init_gtt_entries();
...@@ -417,7 +416,7 @@ static int intel_i830_configure(void) ...@@ -417,7 +416,7 @@ static int intel_i830_configure(void)
pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl); pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED); OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED);
CACHE_FLUSH(); global_cache_flush();
if (agp_bridge->needs_scratch_page == TRUE) if (agp_bridge->needs_scratch_page == TRUE)
for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
...@@ -458,13 +457,13 @@ static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type) ...@@ -458,13 +457,13 @@ static int intel_i830_insert_entries(agp_memory *mem,off_t pg_start,int type)
(mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
return (-EINVAL); return (-EINVAL);
CACHE_FLUSH(); global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4), OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),
agp_bridge->mask_memory(mem->memory[i], mem->type)); agp_bridge->mask_memory(mem->memory[i], mem->type));
CACHE_FLUSH(); global_cache_flush();
agp_bridge->tlb_flush(mem); agp_bridge->tlb_flush(mem);
...@@ -475,7 +474,7 @@ static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type) ...@@ -475,7 +474,7 @@ static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type)
{ {
int i; int i;
CACHE_FLUSH (); global_cache_flush();
if (pg_start < intel_i830_private.gtt_entries) { if (pg_start < intel_i830_private.gtt_entries) {
printk ("Trying to disable local/stolen memory\n"); printk ("Trying to disable local/stolen memory\n");
...@@ -485,7 +484,7 @@ static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type) ...@@ -485,7 +484,7 @@ static int intel_i830_remove_entries(agp_memory *mem,off_t pg_start,int type)
for (i = pg_start; i < (mem->page_count + pg_start); i++) for (i = pg_start; i < (mem->page_count + pg_start); i++)
OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge->scratch_page); OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge->scratch_page);
CACHE_FLUSH(); global_cache_flush();
agp_bridge->tlb_flush(mem); agp_bridge->tlb_flush(mem);
......
...@@ -147,12 +147,12 @@ static int nvidia_insert_memory(agp_memory * mem, off_t pg_start, int type) ...@@ -147,12 +147,12 @@ static int nvidia_insert_memory(agp_memory * mem, off_t pg_start, int type)
return -EINVAL; return -EINVAL;
for(j = pg_start; j < (pg_start + mem->page_count); j++) { for(j = pg_start; j < (pg_start + mem->page_count); j++) {
if (!PGE_EMPTY(agp_bridge->gatt_table[nvidia_private.pg_offset + j])) if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[nvidia_private.pg_offset + j]))
return -EBUSY; return -EBUSY;
} }
if (mem->is_flushed == FALSE) { if (mem->is_flushed == FALSE) {
CACHE_FLUSH(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = TRUE;
} }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
......
...@@ -35,7 +35,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map) ...@@ -35,7 +35,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
return -ENOMEM; return -ENOMEM;
} }
SetPageReserved(virt_to_page(page_map->real)); SetPageReserved(virt_to_page(page_map->real));
CACHE_FLUSH(); global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real), page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE); PAGE_SIZE);
if (page_map->remapped == NULL) { if (page_map->remapped == NULL) {
...@@ -44,7 +44,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map) ...@@ -44,7 +44,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
page_map->real = NULL; page_map->real = NULL;
return -ENOMEM; return -ENOMEM;
} }
CACHE_FLUSH(); global_cache_flush();
for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
page_map->remapped[i] = agp_bridge->scratch_page; page_map->remapped[i] = agp_bridge->scratch_page;
...@@ -336,14 +336,14 @@ static int serverworks_insert_memory(agp_memory * mem, ...@@ -336,14 +336,14 @@ static int serverworks_insert_memory(agp_memory * mem,
while (j < (pg_start + mem->page_count)) { while (j < (pg_start + mem->page_count)) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr); cur_gatt = SVRWRKS_GET_GATT(addr);
if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) { if (!PGE_EMPTY(agp_bridge, cur_gatt[GET_GATT_OFF(addr)])) {
return -EBUSY; return -EBUSY;
} }
j++; j++;
} }
if (mem->is_flushed == FALSE) { if (mem->is_flushed == FALSE) {
CACHE_FLUSH(); global_cache_flush();
mem->is_flushed = TRUE; mem->is_flushed = TRUE;
} }
...@@ -368,7 +368,7 @@ static int serverworks_remove_memory(agp_memory * mem, off_t pg_start, ...@@ -368,7 +368,7 @@ static int serverworks_remove_memory(agp_memory * mem, off_t pg_start,
return -EINVAL; return -EINVAL;
} }
CACHE_FLUSH(); global_cache_flush();
agp_bridge->tlb_flush(mem); agp_bridge->tlb_flush(mem);
for (i = pg_start; i < (mem->page_count + pg_start); i++) { for (i = pg_start; i < (mem->page_count + pg_start); i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment