Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3d7af078
Commit
3d7af078
authored
Mar 04, 2002
by
Rusty Russell
Committed by
Linus Torvalds
Mar 04, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] per-cpu areas
This is the Richard Henderson-approved, cleaner, brighter per-cpu patch.
parent
09c1076e
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
58 additions
and
2 deletions
+58
-2
arch/i386/vmlinux.lds
arch/i386/vmlinux.lds
+4
-0
arch/ppc/vmlinux.lds
arch/ppc/vmlinux.lds
+4
-0
include/linux/cache.h
include/linux/cache.h
+3
-1
include/linux/compiler.h
include/linux/compiler.h
+7
-0
include/linux/smp.h
include/linux/smp.h
+15
-1
init/main.c
init/main.c
+25
-0
No files found.
arch/i386/vmlinux.lds
View file @
3d7af078
...
...
@@ -57,6 +57,10 @@ SECTIONS
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
...
...
arch/ppc/vmlinux.lds
View file @
3d7af078
...
...
@@ -111,6 +111,10 @@ SECTIONS
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
...
...
include/linux/cache.h
View file @
3d7af078
...
...
@@ -4,8 +4,10 @@
#include <linux/config.h>
#include <asm/cache.h>
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x)
(((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)
)
#define L1_CACHE_ALIGN(x)
ALIGN(x, L1_CACHE_BYTES
)
#endif
#ifndef SMP_CACHE_BYTES
...
...
include/linux/compiler.h
View file @
3d7af078
...
...
@@ -13,4 +13,11 @@
#define likely(x) __builtin_expect((x),1)
#define unlikely(x) __builtin_expect((x),0)
/* This macro obfuscates arithmetic on a variable address so that gcc
shouldn't recognize the original var, and make assumptions about it */
strcpy
(
s
,
"xxx"
+
X
)
=>
memcpy
(
s
,
"xxx"
+
X
,
4
-
X
)
*/
#define RELOC_HIDE(var, off) \
({ __typeof__(&(var)) __ptr; \
__asm__ ("" : "=g"(__ptr) : "0"((void *)&(var) + (off))); \
*__ptr; })
#endif
/* __LINUX_COMPILER_H */
include/linux/smp.h
View file @
3d7af078
...
...
@@ -11,6 +11,7 @@
#ifdef CONFIG_SMP
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <asm/smp.h>
/*
...
...
@@ -71,7 +72,17 @@ extern volatile int smp_msg_id;
#define MSG_RESCHEDULE 0x0003
/* Reschedule request from master CPU*/
#define MSG_CALL_FUNCTION 0x0004
/* Call function on all other CPUs */
#else
#define __per_cpu_data __attribute__((section(".data.percpu")))
#ifndef __HAVE_ARCH_PER_CPU
extern
unsigned
long
__per_cpu_offset
[
NR_CPUS
];
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) RELOC_HIDE(var, per_cpu_offset(cpu))
#define this_cpu(var) per_cpu(var, smp_processor_id())
#endif
/* !__HAVE_ARCH_PER_CPU */
#else
/* !SMP */
/*
* These macros fold the SMP functionality into a single CPU system
...
...
@@ -90,6 +101,9 @@ extern volatile int smp_msg_id;
#define cpu_online_map 1
static
inline
void
smp_send_reschedule
(
int
cpu
)
{
}
static
inline
void
smp_send_reschedule_all
(
void
)
{
}
#define __per_cpu_data
#define per_cpu(var, cpu) var
#define this_cpu(var) var
#endif
#endif
init/main.c
View file @
3d7af078
...
...
@@ -270,8 +270,32 @@ static void __init smp_init(void)
#define smp_init() do { } while (0)
#endif
static
inline
void
setup_per_cpu_areas
(
void
)
{
}
#else
#ifndef __HAVE_ARCH_PER_CPU
unsigned
long
__per_cpu_offset
[
NR_CPUS
];
static
void
__init
setup_per_cpu_areas
(
void
)
{
unsigned
long
size
,
i
;
char
*
ptr
;
/* Created by linker magic */
extern
char
__per_cpu_start
[],
__per_cpu_end
[];
/* Copy section for each CPU (we discard the original) */
size
=
ALIGN
(
__per_cpu_end
-
__per_cpu_start
,
SMP_CACHE_BYTES
);
ptr
=
alloc_bootmem
(
size
*
NR_CPUS
);
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
,
ptr
+=
size
)
{
__per_cpu_offset
[
i
]
=
ptr
-
__per_cpu_start
;
memcpy
(
ptr
,
__per_cpu_start
,
size
);
}
}
#endif
/* !__HAVE_ARCH_PER_CPU */
/* Called by boot processor to activate the rest. */
static
void
__init
smp_init
(
void
)
{
...
...
@@ -314,6 +338,7 @@ asmlinkage void __init start_kernel(void)
lock_kernel
();
printk
(
linux_banner
);
setup_arch
(
&
command_line
);
setup_per_cpu_areas
();
printk
(
"Kernel command line: %s
\n
"
,
saved_command_line
);
parse_options
(
command_line
);
trap_init
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment