Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3e69f8b2
Commit
3e69f8b2
authored
May 09, 2003
by
David Mosberger
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ia64: Patch by John Marvin: Add virtual mem-map support.
parent
1695a925
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
221 additions
and
14 deletions
+221
-14
arch/ia64/kernel/ia64_ksyms.c
arch/ia64/kernel/ia64_ksyms.c
+6
-0
arch/ia64/mm/fault.c
arch/ia64/mm/fault.c
+15
-0
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+194
-13
include/asm-ia64/page.h
include/asm-ia64/page.h
+6
-1
No files found.
arch/ia64/kernel/ia64_ksyms.c
View file @
3e69f8b2
...
@@ -57,6 +57,12 @@ EXPORT_SYMBOL_NOVERS(__up);
...
@@ -57,6 +57,12 @@ EXPORT_SYMBOL_NOVERS(__up);
#include <asm/page.h>
#include <asm/page.h>
EXPORT_SYMBOL
(
clear_page
);
EXPORT_SYMBOL
(
clear_page
);
#ifdef CONFIG_VIRTUAL_MEM_MAP
#include <asm/pgtable.h>
EXPORT_SYMBOL
(
vmalloc_end
);
EXPORT_SYMBOL
(
ia64_pfn_valid
);
#endif
#include <asm/processor.h>
#include <asm/processor.h>
EXPORT_SYMBOL
(
cpu_info__per_cpu
);
EXPORT_SYMBOL
(
cpu_info__per_cpu
);
EXPORT_SYMBOL
(
kernel_thread
);
EXPORT_SYMBOL
(
kernel_thread
);
...
...
arch/ia64/mm/fault.c
View file @
3e69f8b2
...
@@ -58,6 +58,18 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
...
@@ -58,6 +58,18 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if
(
in_atomic
()
||
!
mm
)
if
(
in_atomic
()
||
!
mm
)
goto
no_context
;
goto
no_context
;
#ifdef CONFIG_VIRTUAL_MEM_MAP
/*
* If fault is in region 5 and we are in the kernel, we may already
* have the mmap_sem (pfn_valid macro is called during mmap). There
* is no vma for region 5 addr's anyway, so skip getting the semaphore
* and go directly to the exception handling code.
*/
if
((
REGION_NUMBER
(
address
)
==
5
)
&&
!
user_mode
(
regs
))
goto
bad_area_no_up
;
#endif
down_read
(
&
mm
->
mmap_sem
);
down_read
(
&
mm
->
mmap_sem
);
vma
=
find_vma_prev
(
mm
,
address
,
&
prev_vma
);
vma
=
find_vma_prev
(
mm
,
address
,
&
prev_vma
);
...
@@ -139,6 +151,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
...
@@ -139,6 +151,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
bad_area:
bad_area:
up_read
(
&
mm
->
mmap_sem
);
up_read
(
&
mm
->
mmap_sem
);
#ifdef CONFIG_VIRTUAL_MEM_MAP
bad_area_no_up:
#endif
if
((
isr
&
IA64_ISR_SP
)
if
((
isr
&
IA64_ISR_SP
)
||
((
isr
&
IA64_ISR_NA
)
&&
(
isr
&
IA64_ISR_CODE_MASK
)
==
IA64_ISR_CODE_LFETCH
))
||
((
isr
&
IA64_ISR_NA
)
&&
(
isr
&
IA64_ISR_CODE_MASK
)
==
IA64_ISR_CODE_LFETCH
))
{
{
...
...
arch/ia64/mm/init.c
View file @
3e69f8b2
...
@@ -38,6 +38,13 @@ extern void ia64_tlb_init (void);
...
@@ -38,6 +38,13 @@ extern void ia64_tlb_init (void);
unsigned
long
MAX_DMA_ADDRESS
=
PAGE_OFFSET
+
0x100000000UL
;
unsigned
long
MAX_DMA_ADDRESS
=
PAGE_OFFSET
+
0x100000000UL
;
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000
/* Use virtual mem map if hole is > than this */
unsigned
long
vmalloc_end
=
VMALLOC_END_INIT
;
static
struct
page
*
vmem_map
;
static
unsigned
long
num_dma_physpages
;
#endif
static
int
pgt_cache_water
[
2
]
=
{
25
,
50
};
static
int
pgt_cache_water
[
2
]
=
{
25
,
50
};
void
void
...
@@ -337,6 +344,139 @@ ia64_mmu_init (void *my_cpu_data)
...
@@ -337,6 +344,139 @@ ia64_mmu_init (void *my_cpu_data)
ia64_tlb_init
();
ia64_tlb_init
();
}
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
static
int
create_mem_map_page_table
(
u64
start
,
u64
end
,
void
*
arg
)
{
unsigned
long
address
,
start_page
,
end_page
;
struct
page
*
map_start
,
*
map_end
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
map_start
=
vmem_map
+
(
__pa
(
start
)
>>
PAGE_SHIFT
);
map_end
=
vmem_map
+
(
__pa
(
end
)
>>
PAGE_SHIFT
);
start_page
=
(
unsigned
long
)
map_start
&
PAGE_MASK
;
end_page
=
PAGE_ALIGN
((
unsigned
long
)
map_end
);
for
(
address
=
start_page
;
address
<
end_page
;
address
+=
PAGE_SIZE
)
{
pgd
=
pgd_offset_k
(
address
);
if
(
pgd_none
(
*
pgd
))
pgd_populate
(
&
init_mm
,
pgd
,
alloc_bootmem_pages
(
PAGE_SIZE
));
pmd
=
pmd_offset
(
pgd
,
address
);
if
(
pmd_none
(
*
pmd
))
pmd_populate_kernel
(
&
init_mm
,
pmd
,
alloc_bootmem_pages
(
PAGE_SIZE
));
pte
=
pte_offset_kernel
(
pmd
,
address
);
if
(
pte_none
(
*
pte
))
set_pte
(
pte
,
pfn_pte
(
__pa
(
alloc_bootmem_pages
(
PAGE_SIZE
))
>>
PAGE_SHIFT
,
PAGE_KERNEL
));
}
return
0
;
}
struct
memmap_init_callback_data
{
struct
page
*
start
;
struct
page
*
end
;
int
nid
;
unsigned
long
zone
;
};
static
int
virtual_memmap_init
(
u64
start
,
u64
end
,
void
*
arg
)
{
struct
memmap_init_callback_data
*
args
;
struct
page
*
map_start
,
*
map_end
;
args
=
(
struct
memmap_init_callback_data
*
)
arg
;
map_start
=
vmem_map
+
(
__pa
(
start
)
>>
PAGE_SHIFT
);
map_end
=
vmem_map
+
(
__pa
(
end
)
>>
PAGE_SHIFT
);
if
(
map_start
<
args
->
start
)
map_start
=
args
->
start
;
if
(
map_end
>
args
->
end
)
map_end
=
args
->
end
;
/*
* We have to initialize "out of bounds" struct page elements that fit completely
* on the same pages that were allocated for the "in bounds" elements because they
* may be referenced later (and found to be "reserved").
*/
map_start
-=
((
unsigned
long
)
map_start
&
(
PAGE_SIZE
-
1
))
/
sizeof
(
struct
page
);
map_end
+=
((
PAGE_ALIGN
((
unsigned
long
)
map_end
)
-
(
unsigned
long
)
map_end
)
/
sizeof
(
struct
page
));
if
(
map_start
<
map_end
)
memmap_init_zone
(
map_start
,
(
unsigned
long
)
(
map_end
-
map_start
),
args
->
nid
,
args
->
zone
,
page_to_pfn
(
map_start
));
return
0
;
}
void
memmap_init
(
struct
page
*
start
,
unsigned
long
size
,
int
nid
,
unsigned
long
zone
,
unsigned
long
start_pfn
)
{
if
(
!
vmem_map
)
memmap_init_zone
(
start
,
size
,
nid
,
zone
,
start_pfn
);
else
{
struct
memmap_init_callback_data
args
;
args
.
start
=
start
;
args
.
end
=
start
+
size
;
args
.
nid
=
nid
;
args
.
zone
=
zone
;
efi_memmap_walk
(
virtual_memmap_init
,
&
args
);
}
}
int
ia64_pfn_valid
(
unsigned
long
pfn
)
{
char
byte
;
return
__get_user
(
byte
,
(
char
*
)
pfn_to_page
(
pfn
))
==
0
;
}
static
int
count_dma_pages
(
u64
start
,
u64
end
,
void
*
arg
)
{
unsigned
long
*
count
=
arg
;
if
(
end
<=
MAX_DMA_ADDRESS
)
*
count
+=
(
end
-
start
)
>>
PAGE_SHIFT
;
return
0
;
}
static
int
find_largest_hole
(
u64
start
,
u64
end
,
void
*
arg
)
{
u64
*
max_gap
=
arg
;
static
u64
last_end
=
PAGE_OFFSET
;
/* NOTE: this algorithm assumes efi memmap table is ordered */
if
(
*
max_gap
<
(
start
-
last_end
))
*
max_gap
=
start
-
last_end
;
last_end
=
end
;
return
0
;
}
#endif
/* CONFIG_VIRTUAL_MEM_MAP */
static
int
count_pages
(
u64
start
,
u64
end
,
void
*
arg
)
{
unsigned
long
*
count
=
arg
;
*
count
+=
(
end
-
start
)
>>
PAGE_SHIFT
;
return
0
;
}
/*
/*
* Set up the page tables.
* Set up the page tables.
*/
*/
...
@@ -348,18 +488,70 @@ paging_init (void)
...
@@ -348,18 +488,70 @@ paging_init (void)
extern
void
discontig_paging_init
(
void
);
extern
void
discontig_paging_init
(
void
);
discontig_paging_init
();
discontig_paging_init
();
efi_memmap_walk
(
count_pages
,
&
num_physpages
);
}
}
#else
/* !CONFIG_DISCONTIGMEM */
#else
/* !CONFIG_DISCONTIGMEM */
void
void
paging_init
(
void
)
paging_init
(
void
)
{
{
unsigned
long
max_dma
,
zones_size
[
MAX_NR_ZONES
];
unsigned
long
max_dma
;
unsigned
long
zones_size
[
MAX_NR_ZONES
];
# ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned
long
zholes_size
[
MAX_NR_ZONES
];
unsigned
long
max_gap
;
# endif
/* initialize mem_map[] */
/* initialize mem_map[] */
memset
(
zones_size
,
0
,
sizeof
(
zones_size
));
memset
(
zones_size
,
0
,
sizeof
(
zones_size
));
num_physpages
=
0
;
efi_memmap_walk
(
count_pages
,
&
num_physpages
);
max_dma
=
virt_to_phys
((
void
*
)
MAX_DMA_ADDRESS
)
>>
PAGE_SHIFT
;
max_dma
=
virt_to_phys
((
void
*
)
MAX_DMA_ADDRESS
)
>>
PAGE_SHIFT
;
# ifdef CONFIG_VIRTUAL_MEM_MAP
memset
(
zholes_size
,
0
,
sizeof
(
zholes_size
));
num_dma_physpages
=
0
;
efi_memmap_walk
(
count_dma_pages
,
&
num_dma_physpages
);
if
(
max_low_pfn
<
max_dma
)
{
zones_size
[
ZONE_DMA
]
=
max_low_pfn
;
zholes_size
[
ZONE_DMA
]
=
max_low_pfn
-
num_dma_physpages
;
}
else
{
zones_size
[
ZONE_DMA
]
=
max_dma
;
zholes_size
[
ZONE_DMA
]
=
max_dma
-
num_dma_physpages
;
if
(
num_physpages
>
num_dma_physpages
)
{
zones_size
[
ZONE_NORMAL
]
=
max_low_pfn
-
max_dma
;
zholes_size
[
ZONE_NORMAL
]
=
((
max_low_pfn
-
max_dma
)
-
(
num_physpages
-
num_dma_physpages
));
}
}
max_gap
=
0
;
efi_memmap_walk
(
find_largest_hole
,
(
u64
*
)
&
max_gap
);
if
(
max_gap
<
LARGE_GAP
)
{
vmem_map
=
(
struct
page
*
)
0
;
free_area_init_node
(
0
,
&
contig_page_data
,
NULL
,
zones_size
,
0
,
zholes_size
);
mem_map
=
contig_page_data
.
node_mem_map
;
}
else
{
unsigned
long
map_size
;
/* allocate virtual_mem_map */
map_size
=
PAGE_ALIGN
(
max_low_pfn
*
sizeof
(
struct
page
));
vmalloc_end
-=
map_size
;
vmem_map
=
(
struct
page
*
)
vmalloc_end
;
efi_memmap_walk
(
create_mem_map_page_table
,
0
);
free_area_init_node
(
0
,
&
contig_page_data
,
vmem_map
,
zones_size
,
0
,
zholes_size
);
mem_map
=
contig_page_data
.
node_mem_map
;
printk
(
"Virtual mem_map starts at 0x%p
\n
"
,
mem_map
);
}
# else
/* !CONFIG_VIRTUAL_MEM_MAP */
if
(
max_low_pfn
<
max_dma
)
if
(
max_low_pfn
<
max_dma
)
zones_size
[
ZONE_DMA
]
=
max_low_pfn
;
zones_size
[
ZONE_DMA
]
=
max_low_pfn
;
else
{
else
{
...
@@ -367,18 +559,10 @@ paging_init (void)
...
@@ -367,18 +559,10 @@ paging_init (void)
zones_size
[
ZONE_NORMAL
]
=
max_low_pfn
-
max_dma
;
zones_size
[
ZONE_NORMAL
]
=
max_low_pfn
-
max_dma
;
}
}
free_area_init
(
zones_size
);
free_area_init
(
zones_size
);
# endif
/* !CONFIG_VIRTUAL_MEM_MAP */
}
}
#endif
/* !CONFIG_DISCONTIGMEM */
#endif
/* !CONFIG_DISCONTIGMEM */
static
int
count_pages
(
u64
start
,
u64
end
,
void
*
arg
)
{
unsigned
long
*
count
=
arg
;
*
count
+=
(
end
-
start
)
>>
PAGE_SHIFT
;
return
0
;
}
static
int
static
int
count_reserved_pages
(
u64
start
,
u64
end
,
void
*
arg
)
count_reserved_pages
(
u64
start
,
u64
end
,
void
*
arg
)
{
{
...
@@ -415,9 +599,6 @@ mem_init (void)
...
@@ -415,9 +599,6 @@ mem_init (void)
max_mapnr
=
max_low_pfn
;
max_mapnr
=
max_low_pfn
;
#endif
#endif
num_physpages
=
0
;
efi_memmap_walk
(
count_pages
,
&
num_physpages
);
high_memory
=
__va
(
max_low_pfn
*
PAGE_SIZE
);
high_memory
=
__va
(
max_low_pfn
*
PAGE_SIZE
);
for_each_pgdat
(
pgdat
)
for_each_pgdat
(
pgdat
)
...
...
include/asm-ia64/page.h
View file @
3e69f8b2
...
@@ -91,7 +91,12 @@ do { \
...
@@ -91,7 +91,12 @@ do { \
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#ifndef CONFIG_DISCONTIGMEM
#ifndef CONFIG_DISCONTIGMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
# ifdef CONFIG_VIRTUAL_MEM_MAP
extern
int
ia64_pfn_valid
(
unsigned
long
pfn
);
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
# else
# define pfn_valid(pfn) ((pfn) < max_mapnr)
# endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_pfn(page) ((unsigned long) (page - mem_map))
#define page_to_pfn(page) ((unsigned long) (page - mem_map))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define pfn_to_page(pfn) (mem_map + (pfn))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment