Commit 7de08b4e authored by Gustavo F. Padovan's avatar Gustavo F. Padovan Committed by Ingo Molnar

x86: coding styles fixes to arch/x86/kernel/process_64.c

Fix about 50 errors and many warnings without change process_64.o

arch/x86/kernel/process_64.o:
text    data     bss     dec     hex filename
5236       8      24    5268    1494 process_64.o.after
5236       8      24    5268    1494 process_64.o.before
md5:
9c35e9debdea4e471288c6e8ca267a75  process_64.o.after
9c35e9debdea4e471288c6e8ca267a75  process_64.o.before
Signed-off-by: default avatarGustavo F. Padovan <gustavo@las.ic.unicamp.br>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3964cd3a
...@@ -37,11 +37,11 @@ ...@@ -37,11 +37,11 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/prctl.h> #include <linux/prctl.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -88,7 +88,7 @@ void exit_idle(void) ...@@ -88,7 +88,7 @@ void exit_idle(void)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
DECLARE_PER_CPU(int, cpu_state); DECLARE_PER_CPU(int, cpu_state);
#include <asm/nmi.h> #include <linux/nmi.h>
/* We halt the CPU with physical CPU hotplug */ /* We halt the CPU with physical CPU hotplug */
static inline void play_dead(void) static inline void play_dead(void)
{ {
...@@ -152,7 +152,7 @@ void cpu_idle(void) ...@@ -152,7 +152,7 @@ void cpu_idle(void)
} }
/* Prints also some state that isn't saved in the pt_regs */ /* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs * regs) void __show_regs(struct pt_regs *regs)
{ {
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7; unsigned long d0, d1, d2, d3, d6, d7;
...@@ -177,28 +177,28 @@ void __show_regs(struct pt_regs * regs) ...@@ -177,28 +177,28 @@ void __show_regs(struct pt_regs * regs)
printk("RBP: %016lx R08: %016lx R09: %016lx\n", printk("RBP: %016lx R08: %016lx R09: %016lx\n",
regs->bp, regs->r8, regs->r9); regs->bp, regs->r8, regs->r9);
printk("R10: %016lx R11: %016lx R12: %016lx\n", printk("R10: %016lx R11: %016lx R12: %016lx\n",
regs->r10, regs->r11, regs->r12); regs->r10, regs->r11, regs->r12);
printk("R13: %016lx R14: %016lx R15: %016lx\n", printk("R13: %016lx R14: %016lx R15: %016lx\n",
regs->r13, regs->r14, regs->r15); regs->r13, regs->r14, regs->r15);
asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es)); asm("movl %%es,%0" : "=r" (es));
asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex)); asm("movl %%gs,%0" : "=r" (gsindex));
rdmsrl(MSR_FS_BASE, fs); rdmsrl(MSR_FS_BASE, fs);
rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_GS_BASE, gs);
rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
cr0 = read_cr0(); cr0 = read_cr0();
cr2 = read_cr2(); cr2 = read_cr2();
cr3 = read_cr3(); cr3 = read_cr3();
cr4 = read_cr4(); cr4 = read_cr4();
printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
fs,fsindex,gs,gsindex,shadowgs); fs, fsindex, gs, gsindex, shadowgs);
printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
get_debugreg(d0, 0); get_debugreg(d0, 0);
...@@ -314,10 +314,10 @@ void prepare_to_copy(struct task_struct *tsk) ...@@ -314,10 +314,10 @@ void prepare_to_copy(struct task_struct *tsk)
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused, unsigned long unused,
struct task_struct * p, struct pt_regs * regs) struct task_struct *p, struct pt_regs *regs)
{ {
int err; int err;
struct pt_regs * childregs; struct pt_regs *childregs;
struct task_struct *me = current; struct task_struct *me = current;
childregs = ((struct pt_regs *) childregs = ((struct pt_regs *)
...@@ -362,10 +362,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -362,10 +362,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
if (test_thread_flag(TIF_IA32)) if (test_thread_flag(TIF_IA32))
err = do_set_thread_area(p, -1, err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0); (struct user_desc __user *)childregs->si, 0);
else else
#endif #endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
if (err) if (err)
goto out; goto out;
} }
err = 0; err = 0;
...@@ -544,7 +544,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -544,7 +544,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
unsigned fsindex, gsindex; unsigned fsindex, gsindex;
/* we're going to use this soon, after a few expensive things */ /* we're going to use this soon, after a few expensive things */
if (next_p->fpu_counter>5) if (next_p->fpu_counter > 5)
prefetch(next->xstate); prefetch(next->xstate);
/* /*
...@@ -552,13 +552,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -552,13 +552,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/ */
load_sp0(tss, next); load_sp0(tss, next);
/* /*
* Switch DS and ES. * Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok. * This won't pick up thread selector changes, but I guess that is ok.
*/ */
savesegment(es, prev->es); savesegment(es, prev->es);
if (unlikely(next->es | prev->es)) if (unlikely(next->es | prev->es))
loadsegment(es, next->es); loadsegment(es, next->es);
savesegment(ds, prev->ds); savesegment(ds, prev->ds);
if (unlikely(next->ds | prev->ds)) if (unlikely(next->ds | prev->ds))
...@@ -584,7 +584,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -584,7 +584,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/ */
arch_leave_lazy_cpu_mode(); arch_leave_lazy_cpu_mode();
/* /*
* Switch FS and GS. * Switch FS and GS.
* *
* Segment register != 0 always requires a reload. Also * Segment register != 0 always requires a reload. Also
...@@ -593,13 +593,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -593,13 +593,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/ */
if (unlikely(fsindex | next->fsindex | prev->fs)) { if (unlikely(fsindex | next->fsindex | prev->fs)) {
loadsegment(fs, next->fsindex); loadsegment(fs, next->fsindex);
/* /*
* Check if the user used a selector != 0; if yes * Check if the user used a selector != 0; if yes
* clear 64bit base, since overloaded base is always * clear 64bit base, since overloaded base is always
* mapped to the Null selector * mapped to the Null selector
*/ */
if (fsindex) if (fsindex)
prev->fs = 0; prev->fs = 0;
} }
/* when next process has a 64bit base use it */ /* when next process has a 64bit base use it */
if (next->fs) if (next->fs)
...@@ -609,7 +609,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -609,7 +609,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(gsindex | next->gsindex | prev->gs)) { if (unlikely(gsindex | next->gsindex | prev->gs)) {
load_gs_index(next->gsindex); load_gs_index(next->gsindex);
if (gsindex) if (gsindex)
prev->gs = 0; prev->gs = 0;
} }
if (next->gs) if (next->gs)
wrmsrl(MSR_KERNEL_GS_BASE, next->gs); wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
...@@ -618,12 +618,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -618,12 +618,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Must be after DS reload */ /* Must be after DS reload */
unlazy_fpu(prev_p); unlazy_fpu(prev_p);
/* /*
* Switch the PDA and FPU contexts. * Switch the PDA and FPU contexts.
*/ */
prev->usersp = read_pda(oldrsp); prev->usersp = read_pda(oldrsp);
write_pda(oldrsp, next->usersp); write_pda(oldrsp, next->usersp);
write_pda(pcurrent, next_p); write_pda(pcurrent, next_p);
write_pda(kernelstack, write_pda(kernelstack,
(unsigned long)task_stack_page(next_p) + (unsigned long)task_stack_page(next_p) +
...@@ -664,7 +664,7 @@ long sys_execve(char __user *name, char __user * __user *argv, ...@@ -664,7 +664,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
char __user * __user *envp, struct pt_regs *regs) char __user * __user *envp, struct pt_regs *regs)
{ {
long error; long error;
char * filename; char *filename;
filename = getname(name); filename = getname(name);
error = PTR_ERR(filename); error = PTR_ERR(filename);
...@@ -722,55 +722,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs) ...@@ -722,55 +722,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {
unsigned long stack; unsigned long stack;
u64 fp,ip; u64 fp, ip;
int count = 0; int count = 0;
if (!p || p == current || p->state==TASK_RUNNING) if (!p || p == current || p->state == TASK_RUNNING)
return 0; return 0;
stack = (unsigned long)task_stack_page(p); stack = (unsigned long)task_stack_page(p);
if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
return 0; return 0;
fp = *(u64 *)(p->thread.sp); fp = *(u64 *)(p->thread.sp);
do { do {
if (fp < (unsigned long)stack || if (fp < (unsigned long)stack ||
fp > (unsigned long)stack+THREAD_SIZE) fp > (unsigned long)stack+THREAD_SIZE)
return 0; return 0;
ip = *(u64 *)(fp+8); ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip)) if (!in_sched_functions(ip))
return ip; return ip;
fp = *(u64 *)fp; fp = *(u64 *)fp;
} while (count++ < 16); } while (count++ < 16);
return 0; return 0;
} }
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
{ {
int ret = 0; int ret = 0;
int doit = task == current; int doit = task == current;
int cpu; int cpu;
switch (code) { switch (code) {
case ARCH_SET_GS: case ARCH_SET_GS:
if (addr >= TASK_SIZE_OF(task)) if (addr >= TASK_SIZE_OF(task))
return -EPERM; return -EPERM;
cpu = get_cpu(); cpu = get_cpu();
/* handle small bases via the GDT because that's faster to /* handle small bases via the GDT because that's faster to
switch. */ switch. */
if (addr <= 0xffffffff) { if (addr <= 0xffffffff) {
set_32bit_tls(task, GS_TLS, addr); set_32bit_tls(task, GS_TLS, addr);
if (doit) { if (doit) {
load_TLS(&task->thread, cpu); load_TLS(&task->thread, cpu);
load_gs_index(GS_TLS_SEL); load_gs_index(GS_TLS_SEL);
} }
task->thread.gsindex = GS_TLS_SEL; task->thread.gsindex = GS_TLS_SEL;
task->thread.gs = 0; task->thread.gs = 0;
} else { } else {
task->thread.gsindex = 0; task->thread.gsindex = 0;
task->thread.gs = addr; task->thread.gs = addr;
if (doit) { if (doit) {
load_gs_index(0); load_gs_index(0);
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
} }
} }
put_cpu(); put_cpu();
break; break;
...@@ -824,8 +824,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) ...@@ -824,8 +824,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
rdmsrl(MSR_KERNEL_GS_BASE, base); rdmsrl(MSR_KERNEL_GS_BASE, base);
else else
base = task->thread.gs; base = task->thread.gs;
} } else
else
base = task->thread.gs; base = task->thread.gs;
ret = put_user(base, (unsigned long __user *)addr); ret = put_user(base, (unsigned long __user *)addr);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment