Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
bc8f63af
Commit
bc8f63af
authored
Oct 09, 2003
by
David Mosberger
Browse files
Options
Browse Files
Download
Plain Diff
Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5
into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents
f66525a4
ad2ac7d8
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
94 additions
and
47 deletions
+94
-47
arch/ia64/ia32/sys_ia32.c
arch/ia64/ia32/sys_ia32.c
+29
-27
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+12
-0
arch/ia64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
+49
-17
arch/ia64/sn/io/machvec/pci_bus_cvlink.c
arch/ia64/sn/io/machvec/pci_bus_cvlink.c
+3
-0
arch/ia64/sn/kernel/setup.c
arch/ia64/sn/kernel/setup.c
+0
-2
include/asm-ia64/sn/nodepda.h
include/asm-ia64/sn/nodepda.h
+1
-1
No files found.
arch/ia64/ia32/sys_ia32.c
View file @
bc8f63af
...
...
@@ -2724,8 +2724,8 @@ sys32_open (const char * filename, int flags, int mode)
struct
epoll_event32
{
u32
events
;
u
64
data
;
}
__attribute__
((
packed
));
u
32
data
[
2
]
;
}
;
asmlinkage
long
sys32_epoll_ctl
(
int
epfd
,
int
op
,
int
fd
,
struct
epoll_event32
*
event
)
...
...
@@ -2740,10 +2740,10 @@ sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event)
return
error
;
__get_user
(
event64
.
events
,
&
event
->
events
);
__get_user
(
data_halfword
,
(
u32
*
)(
&
event
->
data
)
);
__get_user
(
data_halfword
,
&
event
->
data
[
0
]
);
event64
.
data
=
data_halfword
;
__get_user
(
data_halfword
,
((
u32
*
)(
&
event
->
data
)
+
1
)
);
event64
.
data
|=
(
(
u64
)
data_halfword
)
<<
32
;
__get_user
(
data_halfword
,
&
event
->
data
[
1
]
);
event64
.
data
|=
(
u64
)
data_halfword
<<
32
;
set_fs
(
KERNEL_DS
);
error
=
sys_epoll_ctl
(
epfd
,
op
,
fd
,
&
event64
);
...
...
@@ -2758,8 +2758,9 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
{
struct
epoll_event
*
events64
=
NULL
;
mm_segment_t
old_fs
=
get_fs
();
int
error
;
int
error
,
numevents
,
size
;
int
evt_idx
;
int
do_free_pages
=
0
;
if
(
maxevents
<=
0
)
{
return
-
EINVAL
;
...
...
@@ -2770,43 +2771,44 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
maxevents
*
sizeof
(
struct
epoll_event32
))))
return
error
;
/* Allocate the space needed for the intermediate copy */
events64
=
kmalloc
(
maxevents
*
sizeof
(
struct
epoll_event
),
GFP_KERNEL
);
/*
* Allocate space for the intermediate copy. If the space needed
* is large enough to cause kmalloc to fail, then try again with
* __get_free_pages.
*/
size
=
maxevents
*
sizeof
(
struct
epoll_event
);
events64
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
events64
==
NULL
)
{
events64
=
__get_free_pages
(
GFP_KERNEL
,
get_order
(
size
));
if
(
events64
==
NULL
)
return
-
ENOMEM
;
}
/* Expand the 32-bit structures into the 64-bit structures */
for
(
evt_idx
=
0
;
evt_idx
<
maxevents
;
evt_idx
++
)
{
u32
data_halfword
;
__get_user
(
events64
[
evt_idx
].
events
,
&
events
[
evt_idx
].
events
);
__get_user
(
data_halfword
,
(
u32
*
)(
&
events
[
evt_idx
].
data
));
events64
[
evt_idx
].
data
=
data_halfword
;
__get_user
(
data_halfword
,
((
u32
*
)(
&
events
[
evt_idx
].
data
)
+
1
));
events64
[
evt_idx
].
data
|=
((
u64
)
data_halfword
)
<<
32
;
do_free_pages
=
1
;
}
/* Do the system call */
set_fs
(
KERNEL_DS
);
/* copy_to/from_user should work on kernel mem*/
error
=
sys_epoll_wait
(
epfd
,
events64
,
maxevents
,
timeout
);
numevents
=
sys_epoll_wait
(
epfd
,
events64
,
maxevents
,
timeout
);
set_fs
(
old_fs
);
/* Don't modify userspace memory if we're returning an error */
if
(
!
error
)
{
if
(
numevents
>
0
)
{
/* Translate the 64-bit structures back into the 32-bit
structures */
for
(
evt_idx
=
0
;
evt_idx
<
max
events
;
evt_idx
++
)
{
for
(
evt_idx
=
0
;
evt_idx
<
num
events
;
evt_idx
++
)
{
__put_user
(
events64
[
evt_idx
].
events
,
&
events
[
evt_idx
].
events
);
__put_user
((
u32
)
(
events64
[
evt_idx
].
data
)
,
(
u32
*
)(
&
events
[
evt_idx
].
data
)
);
__put_user
((
u32
)
events64
[
evt_idx
].
data
,
&
events
[
evt_idx
].
data
[
0
]
);
__put_user
((
u32
)(
events64
[
evt_idx
].
data
>>
32
),
((
u32
*
)(
&
events
[
evt_idx
].
data
)
+
1
)
);
&
events
[
evt_idx
].
data
[
1
]
);
}
}
if
(
do_free_pages
)
free_pages
(
events64
,
get_order
(
size
));
else
kfree
(
events64
);
return
error
;
return
numevents
;
}
#ifdef NOTYET
/* UNTESTED FOR IA64 FROM HERE DOWN */
...
...
arch/ia64/kernel/acpi.c
View file @
bc8f63af
...
...
@@ -454,6 +454,12 @@ acpi_numa_arch_fixup (void)
{
int
i
,
j
,
node_from
,
node_to
;
/* If there's no SRAT, fix the phys_id */
if
(
srat_num_cpus
==
0
)
{
node_cpuid
[
0
].
phys_id
=
hard_smp_processor_id
();
return
;
}
/* calculate total number of nodes in system from PXM bitmap */
numnodes
=
0
;
/* init total nodes in system */
...
...
@@ -614,6 +620,12 @@ acpi_boot_init (void)
smp_build_cpu_map
();
# ifdef CONFIG_NUMA
if
(
srat_num_cpus
==
0
)
{
int
cpu
,
i
=
1
;
for
(
cpu
=
0
;
cpu
<
smp_boot_data
.
cpu_count
;
cpu
++
)
if
(
smp_boot_data
.
cpu_phys_id
[
cpu
]
!=
hard_smp_processor_id
())
node_cpuid
[
i
++
].
phys_id
=
smp_boot_data
.
cpu_phys_id
[
cpu
];
}
build_cpu_to_node_map
();
# endif
#endif
...
...
arch/ia64/mm/hugetlbpage.c
View file @
bc8f63af
...
...
@@ -24,9 +24,42 @@ static long htlbpagemem;
int
htlbpage_max
;
static
long
htlbzone_pages
;
static
LIST_HEAD
(
htlbpage_freelist
)
;
static
struct
list_head
hugepage_freelists
[
MAX_NUMNODES
]
;
static
spinlock_t
htlbpage_lock
=
SPIN_LOCK_UNLOCKED
;
static
void
enqueue_huge_page
(
struct
page
*
page
)
{
list_add
(
&
page
->
list
,
&
hugepage_freelists
[
page_zone
(
page
)
->
zone_pgdat
->
node_id
]);
}
static
struct
page
*
dequeue_huge_page
(
void
)
{
int
nid
=
numa_node_id
();
struct
page
*
page
=
NULL
;
if
(
list_empty
(
&
hugepage_freelists
[
nid
]))
{
for
(
nid
=
0
;
nid
<
MAX_NUMNODES
;
++
nid
)
if
(
!
list_empty
(
&
hugepage_freelists
[
nid
]))
break
;
}
if
(
nid
>=
0
&&
nid
<
MAX_NUMNODES
&&
!
list_empty
(
&
hugepage_freelists
[
nid
]))
{
page
=
list_entry
(
hugepage_freelists
[
nid
].
next
,
struct
page
,
list
);
list_del
(
&
page
->
list
);
}
return
page
;
}
static
struct
page
*
alloc_fresh_huge_page
(
void
)
{
static
int
nid
=
0
;
struct
page
*
page
;
page
=
alloc_pages_node
(
nid
,
GFP_HIGHUSER
,
HUGETLB_PAGE_ORDER
);
nid
=
(
nid
+
1
)
%
numnodes
;
return
page
;
}
void
free_huge_page
(
struct
page
*
page
);
static
struct
page
*
alloc_hugetlb_page
(
void
)
...
...
@@ -35,13 +68,11 @@ static struct page *alloc_hugetlb_page(void)
struct
page
*
page
;
spin_lock
(
&
htlbpage_lock
);
if
(
list_empty
(
&
htlbpage_freelist
))
{
page
=
dequeue_huge_page
();
if
(
!
page
)
{
spin_unlock
(
&
htlbpage_lock
);
return
NULL
;
}
page
=
list_entry
(
htlbpage_freelist
.
next
,
struct
page
,
list
);
list_del
(
&
page
->
list
);
htlbpagemem
--
;
spin_unlock
(
&
htlbpage_lock
);
set_page_count
(
page
,
1
);
...
...
@@ -228,7 +259,7 @@ void free_huge_page(struct page *page)
INIT_LIST_HEAD
(
&
page
->
list
);
spin_lock
(
&
htlbpage_lock
);
list_add
(
&
page
->
list
,
&
htlbpage_freelist
);
enqueue_huge_page
(
page
);
htlbpagemem
++
;
spin_unlock
(
&
htlbpage_lock
);
}
...
...
@@ -371,7 +402,7 @@ int try_to_free_low(int count)
map
=
NULL
;
spin_lock
(
&
htlbpage_lock
);
list_for_each
(
p
,
&
h
tlbpage_freelist
)
{
list_for_each
(
p
,
&
h
ugepage_freelists
[
0
]
)
{
if
(
map
)
{
list_del
(
&
map
->
list
);
update_and_free_page
(
map
);
...
...
@@ -408,11 +439,11 @@ int set_hugetlb_mem_size(int count)
return
(
int
)
htlbzone_pages
;
if
(
lcount
>
0
)
{
/* Increase the mem size. */
while
(
lcount
--
)
{
page
=
alloc_
pages
(
__GFP_HIGHMEM
,
HUGETLB_PAGE_ORDER
);
page
=
alloc_
fresh_huge_page
(
);
if
(
page
==
NULL
)
break
;
spin_lock
(
&
htlbpage_lock
);
list_add
(
&
page
->
list
,
&
htlbpage_freelist
);
enqueue_huge_page
(
page
);
htlbpagemem
++
;
htlbzone_pages
++
;
spin_unlock
(
&
htlbpage_lock
);
...
...
@@ -449,17 +480,18 @@ __setup("hugepages=", hugetlb_setup);
static
int
__init
hugetlb_init
(
void
)
{
int
i
,
j
;
int
i
;
struct
page
*
page
;
for
(
i
=
0
;
i
<
MAX_NUMNODES
;
++
i
)
INIT_LIST_HEAD
(
&
hugepage_freelists
[
i
]);
for
(
i
=
0
;
i
<
htlbpage_max
;
++
i
)
{
page
=
alloc_
pages
(
__GFP_HIGHMEM
,
HUGETLB_PAGE_ORDER
);
page
=
alloc_
fresh_huge_page
(
);
if
(
!
page
)
break
;
for
(
j
=
0
;
j
<
HPAGE_SIZE
/
PAGE_SIZE
;
++
j
)
SetPageReserved
(
&
page
[
j
]);
spin_lock
(
&
htlbpage_lock
);
list_add
(
&
page
->
list
,
&
htlbpage_freelist
);
enqueue_huge_page
(
page
);
spin_unlock
(
&
htlbpage_lock
);
}
htlbpage_max
=
htlbpagemem
=
htlbzone_pages
=
i
;
...
...
arch/ia64/sn/io/machvec/pci_bus_cvlink.c
View file @
bc8f63af
...
...
@@ -867,6 +867,9 @@ sn_pci_init (void)
int
i
=
0
;
struct
pci_controller
*
controller
;
if
(
!
ia64_platform_is
(
"sn2"
))
return
0
;
/*
* set pci_raw_ops, etc.
*/
...
...
arch/ia64/sn/kernel/setup.c
View file @
bc8f63af
...
...
@@ -147,7 +147,6 @@ char drive_info[4*16];
* Sets up an initial console to aid debugging. Intended primarily
* for bringup. See start_kernel() in init/main.c.
*/
#if defined(CONFIG_IA64_EARLY_PRINTK_SGI_SN) || defined(CONFIG_IA64_SGI_SN_SIM)
void
__init
early_sn_setup
(
void
)
...
...
@@ -189,7 +188,6 @@ early_sn_setup(void)
printk
(
KERN_DEBUG
"early_sn_setup: setting master_node_bedrock_address to 0x%lx
\n
"
,
master_node_bedrock_address
);
}
}
#endif
/* CONFIG_IA64_EARLY_PRINTK_SGI_SN */
#ifdef CONFIG_IA64_MCA
extern
int
platform_intr_list
[];
...
...
include/asm-ia64/sn/nodepda.h
View file @
bc8f63af
...
...
@@ -128,7 +128,7 @@ typedef struct irqpda_s irqpda_t;
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnode) (!
any_online_cpu(node_to_cpumask(cnode))
)
#define is_headless_node(cnode) (!
node_to_cpu_mask[cnode]
)
/*
* Check if given a node vertex handle the corresponding node has all the
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment