Commit d6611edf authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/disk1/davem/BK/network-2.5

into nuts.ninka.net:/disk1/davem/BK/net-2.5
parents 82cfd269 edf12049
......@@ -33,6 +33,11 @@ to the developers under a NDA (which allows the develoment of a GPL
driver however), but things are not moving very fast (see
http://r-engine.sourceforge.net/) (PCI vendor/device is 0x10cf/0x2011).
There is a forth model connected on the USB bus in TR1* Vaio laptops.
This camera is not supported at all by the current driver, in fact
little information if any is available for this camera
(USB vendor/device is 0x054c/0x0107).
Driver options:
---------------
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -test8
EXTRAVERSION = -test9
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
......
......@@ -143,3 +143,9 @@ int do_settimeofday(struct timespec *tv)
}
EXPORT_SYMBOL(do_settimeofday);
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
......@@ -711,6 +711,32 @@ efi_mem_attributes (unsigned long phys_addr)
return 0;
}
int
valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size;
efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
efi_desc_size = ia64_boot_param->efi_memdesc_size;
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) {
if (!(md->attribute & EFI_MEMORY_WB))
return 0;
if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr)
*size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr;
return 1;
}
}
return 0;
}
static void __exit
efivars_exit (void)
{
......
.section .data.gate, "ax"
.section .data.gate, "aw"
.incbin "arch/ia64/kernel/gate.so"
......@@ -405,7 +405,7 @@ void enable_irq(unsigned int irq)
spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) {
case 1: {
unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
unsigned int status = desc->status & ~IRQ_DISABLED;
desc->status = status;
if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = status | IRQ_REPLAY;
......
......@@ -322,6 +322,10 @@ module_alloc (unsigned long size)
void
module_free (struct module *mod, void *module_region)
{
if (mod->arch.init_unw_table && module_region == mod->module_init) {
unw_remove_unwind_table(mod->arch.init_unw_table);
mod->arch.init_unw_table = NULL;
}
vfree(module_region);
}
......@@ -843,28 +847,92 @@ apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return -ENOEXEC;
}
/*
* Modules contain a single unwind table which covers both the core and the init text
* sections but since the two are not contiguous, we need to split this table up such that
* we can register (and unregister) each "segment" seperately. Fortunately, this sounds
* more complicated than it really is.
*/
static void
register_unwind_table (struct module *mod)
{
struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
struct unw_table_entry tmp, *e1, *e2, *core, *init;
unsigned long num_init = 0, num_core = 0;
/* First, count how many init and core unwind-table entries there are. */
for (e1 = start; e1 < end; ++e1)
if (in_init(mod, e1->start_offset))
++num_init;
else
++num_core;
/*
* Second, sort the table such that all unwind-table entries for the init and core
* text sections are nicely separated. We do this with a stupid bubble sort
* (unwind tables don't get ridiculously huge).
*/
for (e1 = start; e1 < end; ++e1) {
for (e2 = e1 + 1; e2 < end; ++e2) {
if (e2->start_offset < e1->start_offset) {
tmp = *e1;
*e1 = *e2;
*e2 = tmp;
}
}
}
/*
* Third, locate the init and core segments in the unwind table:
*/
if (in_init(mod, start->start_offset)) {
init = start;
core = start + num_init;
} else {
core = start;
init = start + num_core;
}
DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__,
mod->name, mod->arch.gp, num_init, num_core);
/*
* Fourth, register both tables (if not empty).
*/
if (num_core > 0) {
mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
core, core + num_core);
DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__,
mod->arch.core_unw_table, core, core + num_core);
}
if (num_init > 0) {
mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
init, init + num_init);
DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__,
mod->arch.init_unw_table, init, init + num_init);
}
}
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init);
if (mod->arch.unwind)
mod->arch.unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
(void *) mod->arch.unwind->sh_addr,
((void *) mod->arch.unwind->sh_addr
+ mod->arch.unwind->sh_size));
register_unwind_table(mod);
return 0;
}
void
module_arch_cleanup (struct module *mod)
{
if (mod->arch.unwind)
unw_remove_unwind_table(mod->arch.unw_table);
if (mod->arch.init_unw_table)
unw_remove_unwind_table(mod->arch.init_unw_table);
if (mod->arch.core_unw_table)
unw_remove_unwind_table(mod->arch.core_unw_table);
}
#ifdef CONFIG_SMP
void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
for (i = 0; i < NR_CPUS; i++)
......
......@@ -202,8 +202,8 @@
#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
#define LOCK_PFS() spin_lock(&pfm_sessions.pfs_lock)
#define UNLOCK_PFS() spin_unlock(&pfm_sessions.pfs_lock)
#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
......@@ -618,6 +618,7 @@ static struct file_system_type pfm_fs_type = {
.get_sb = pfmfs_get_sb,
.kill_sb = kill_anon_super,
};
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
......@@ -634,6 +635,8 @@ static struct file_operations pfm_file_ops;
static void pfm_lazy_save_regs (struct task_struct *ta);
#endif
void dump_pmu_state(const char *);
/*
* the HP simulator must be first because
* CONFIG_IA64_HP_SIM is independent of CONFIG_MCKINLEY or CONFIG_ITANIUM
......@@ -1283,10 +1286,11 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
unsigned long flags;
/*
* validy checks on cpu_mask have been done upstream
*/
LOCK_PFS();
LOCK_PFS(flags);
DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
......@@ -1325,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
UNLOCK_PFS();
UNLOCK_PFS(flags);
return 0;
......@@ -1334,7 +1338,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
pfm_sessions.pfs_sys_session[cpu]->pid,
smp_processor_id()));
abort:
UNLOCK_PFS();
UNLOCK_PFS(flags);
return -EBUSY;
......@@ -1343,11 +1347,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
static int
pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
{
unsigned long flags;
/*
* validy checks on cpu_mask have been done upstream
*/
LOCK_PFS();
LOCK_PFS(flags);
DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
pfm_sessions.pfs_sys_sessions,
......@@ -1380,7 +1384,7 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
UNLOCK_PFS();
UNLOCK_PFS(flags);
return 0;
}
......@@ -1655,7 +1659,7 @@ pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned lon
}
/*
* context is locked when coming here
* context is locked when coming here and interrupts are disabled
*/
static inline int
pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
......@@ -1789,6 +1793,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
* even if the task itself is in the middle of being ctxsw out.
*/
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static int
pfm_close(struct inode *inode, struct file *filp)
{
......@@ -1803,10 +1808,6 @@ pfm_close(struct inode *inode, struct file *filp)
int free_possible = 1;
int state, is_system;
{ u64 psr = pfm_get_psr();
BUG_ON((psr & IA64_PSR_I) == 0UL);
}
DPRINT(("pfm_close called private=%p\n", filp->private_data));
if (!inode) {
......@@ -1815,7 +1816,7 @@ pfm_close(struct inode *inode, struct file *filp)
}
if (PFM_IS_FILE(filp) == 0) {
printk(KERN_ERR "perfmon: pfm_close: bad magic [%d]\n", current->pid);
DPRINT(("bad magic for [%d]\n", current->pid));
return -EBADF;
}
......@@ -1824,6 +1825,23 @@ pfm_close(struct inode *inode, struct file *filp)
printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
return -EBADF;
}
/*
* remove our file from the async queue, if we use this mode.
* This can be done without the context being protected. We come
* here when the context has become unreacheable by other tasks.
*
* We may still have active monitoring at this point and we may
* end up in pfm_overflow_handler(). However, fasync_helper()
* operates with interrupts disabled and it cleans up the
* queue. If the PMU handler is called prior to entering
* fasync_helper() then it will send a signal. If it is
* invoked after, it will find an empty queue and no
* signal will be sent. In both case, we are safe
*/
if (filp->f_flags & FASYNC) {
DPRINT(("[%d] cleaning up async_queue=%p\n", current->pid, ctx->ctx_async_queue));
pfm_do_fasync (-1, filp, ctx, 0);
}
PROTECT_CTX(ctx, flags);
......@@ -1832,24 +1850,17 @@ pfm_close(struct inode *inode, struct file *filp)
task = PFM_CTX_TASK(ctx);
/*
* remove our file from the async queue, if we use it
*/
if (filp->f_flags & FASYNC) {
DPRINT(("[%d] before async_queue=%p\n", current->pid, ctx->ctx_async_queue));
pfm_do_fasync (-1, filp, ctx, 0);
DPRINT(("[%d] after async_queue=%p\n", current->pid, ctx->ctx_async_queue));
}
regs = ia64_task_regs(task);
DPRINT(("[%d] ctx_state=%d\n", current->pid, state));
DPRINT(("[%d] ctx_state=%d is_current=%d\n",
current->pid, state,
task == current ? 1 : 0));
if (state == PFM_CTX_UNLOADED || state == PFM_CTX_TERMINATED) {
goto doit;
}
regs = ia64_task_regs(task);
/*
* context still loaded/masked and self monitoring,
* we stop/unload and we destroy right here
......@@ -1898,12 +1909,11 @@ pfm_close(struct inode *inode, struct file *filp)
ctx->ctx_state = PFM_CTX_TERMINATED;
DPRINT(("[%d] ctx_state=%d\n", current->pid, state));
DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state));
}
goto doit;
}
/*
* The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the
......@@ -3482,6 +3492,7 @@ int
pfm_use_debug_registers(struct task_struct *task)
{
pfm_context_t *ctx = task->thread.pfm_context;
unsigned long flags;
int ret = 0;
if (pmu_conf.use_rr_dbregs == 0) return 0;
......@@ -3503,7 +3514,7 @@ pfm_use_debug_registers(struct task_struct *task)
*/
if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
LOCK_PFS();
LOCK_PFS(flags);
/*
* We cannot allow setting breakpoints when system wide monitoring
......@@ -3519,7 +3530,7 @@ pfm_use_debug_registers(struct task_struct *task)
pfm_sessions.pfs_sys_use_dbregs,
task->pid, ret));
UNLOCK_PFS();
UNLOCK_PFS(flags);
return ret;
}
......@@ -3535,11 +3546,12 @@ pfm_use_debug_registers(struct task_struct *task)
int
pfm_release_debug_registers(struct task_struct *task)
{
unsigned long flags;
int ret;
if (pmu_conf.use_rr_dbregs == 0) return 0;
LOCK_PFS();
LOCK_PFS(flags);
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
ret = -1;
......@@ -3547,7 +3559,7 @@ pfm_release_debug_registers(struct task_struct *task)
pfm_sessions.pfs_ptrace_use_dbregs--;
ret = 0;
}
UNLOCK_PFS();
UNLOCK_PFS(flags);
return ret;
}
......@@ -3723,7 +3735,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
memset(pfm_stats, 0, sizeof(pfm_stats));
for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
}
return 0;
}
......@@ -3735,6 +3746,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
{
struct thread_struct *thread = NULL;
pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
unsigned long flags;
dbreg_t dbreg;
unsigned int rnum;
int first_time;
......@@ -3793,7 +3805,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
* written after the context is loaded
*/
if (is_loaded) {
LOCK_PFS();
LOCK_PFS(flags);
if (first_time && is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs)
......@@ -3801,7 +3813,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
else
pfm_sessions.pfs_sys_use_dbregs++;
}
UNLOCK_PFS();
UNLOCK_PFS(flags);
}
if (ret != 0) return ret;
......@@ -3902,11 +3914,11 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
* in case it was our first attempt, we undo the global modifications
*/
if (first_time) {
LOCK_PFS();
LOCK_PFS(flags);
if (ctx->ctx_fl_system) {
pfm_sessions.pfs_sys_use_dbregs--;
}
UNLOCK_PFS();
UNLOCK_PFS(flags);
ctx->ctx_fl_using_dbreg = 0;
}
/*
......@@ -3959,7 +3971,11 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY;
}
DPRINT(("current [%d] task [%d] ctx_state=%d is_system=%d\n",
current->pid,
PFM_CTX_TASK(ctx)->pid,
state,
is_system));
/*
* in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not
......@@ -4157,6 +4173,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
struct task_struct *task;
struct thread_struct *thread;
struct pfm_context_t *old;
unsigned long flags;
#ifndef CONFIG_SMP
struct task_struct *owner_task = NULL;
#endif
......@@ -4217,7 +4234,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
goto error;
}
LOCK_PFS();
LOCK_PFS(flags);
if (is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs) {
......@@ -4230,7 +4247,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
}
}
UNLOCK_PFS();
UNLOCK_PFS(flags);
if (ret) goto error;
}
......@@ -4377,9 +4394,9 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* we must undo the dbregs setting (for system-wide)
*/
if (ret && set_dbregs) {
LOCK_PFS();
LOCK_PFS(flags);
pfm_sessions.pfs_sys_use_dbregs--;
UNLOCK_PFS();
UNLOCK_PFS(flags);
}
/*
* release task, there is now a link with the context
......@@ -4605,11 +4622,14 @@ pfm_exit_thread(struct task_struct *task)
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
break;
}
UNPROTECT_CTX(ctx, flags);
{ u64 psr = pfm_get_psr();
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(GET_PMU_OWNER());
BUG_ON(ia64_psr(regs)->up);
BUG_ON(ia64_psr(regs)->pp);
}
UNPROTECT_CTX(ctx, flags);
/*
* All memory free operations (especially for vmalloc'ed memory)
......@@ -5488,7 +5508,7 @@ pfm_proc_info(char *page)
char *p = page;
struct list_head * pos;
pfm_buffer_fmt_t * entry;
unsigned long psr;
unsigned long psr, flags;
int online_cpus = 0;
int i;
......@@ -5528,7 +5548,7 @@ pfm_proc_info(char *page)
}
}
LOCK_PFS();
LOCK_PFS(flags);
p += sprintf(p, "proc_sessions : %u\n"
"sys_sessions : %u\n"
"sys_use_dbregs : %u\n"
......@@ -5537,7 +5557,7 @@ pfm_proc_info(char *page)
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_sys_use_dbregs,
pfm_sessions.pfs_ptrace_use_dbregs);
UNLOCK_PFS();
UNLOCK_PFS(flags);
spin_lock(&pfm_buffer_fmt_lock);
......@@ -5712,10 +5732,6 @@ pfm_save_regs(struct task_struct *task)
*/
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
{ u64 foo = pfm_get_psr();
BUG_ON(foo & ((IA64_PSR_UP|IA64_PSR_PP)));
}
/*
* release ownership of this PMU.
* PM interrupts are masked, so nothing
......@@ -5771,6 +5787,8 @@ pfm_save_regs(struct task_struct *task)
*/
psr = pfm_get_psr();
BUG_ON(foo & (IA64_PSR_I));
/*
* stop monitoring:
* This is the last instruction which may generate an overflow
......@@ -5785,12 +5803,6 @@ pfm_save_regs(struct task_struct *task)
*/
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
#if 1
{ u64 foo = pfm_get_psr();
BUG_ON(foo & (IA64_PSR_I));
BUG_ON(foo & ((IA64_PSR_UP|IA64_PSR_PP)));
}
#endif
return;
save_error:
printk(KERN_ERR "perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld\n",
......@@ -5805,11 +5817,9 @@ pfm_lazy_save_regs (struct task_struct *task)
struct thread_struct *t;
unsigned long flags;
#if 1
{ u64 foo = pfm_get_psr();
BUG_ON(foo & IA64_PSR_UP);
{ u64 psr = pfm_get_psr();
BUG_ON(psr & IA64_PSR_UP);
}
#endif
ctx = PFM_GET_CTX(task);
t = &task->thread;
......@@ -5851,7 +5861,7 @@ pfm_lazy_save_regs (struct task_struct *task)
/*
* unfreeze PMU if had pending overflows
*/
if (t->pmcs[0] & ~1UL) pfm_unfreeze_pmu();
if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
/*
* now get can unmask PMU interrupts, they will
......@@ -5900,10 +5910,8 @@ pfm_load_regs (struct task_struct *task)
flags = pfm_protect_ctx_ctxsw(ctx);
psr = pfm_get_psr();
#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
#endif
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
struct pt_regs *regs = ia64_task_regs(task);
......@@ -6060,10 +6068,8 @@ pfm_load_regs (struct task_struct *task)
t = &task->thread;
psr = pfm_get_psr();
#if 1
BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
BUG_ON(psr & IA64_PSR_I);
#endif
/*
* we restore ALL the debug registers to avoid picking up
......@@ -6218,7 +6224,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
/*
* clear whatever overflow status bits there were
*/
task->thread.pmcs[0] &= ~0x1;
task->thread.pmcs[0] = 0;
}
ovfl_val = pmu_conf.ovfl_val;
/*
......@@ -6400,6 +6406,11 @@ pfm_init_percpu (void)
pfm_clear_psr_pp();
pfm_clear_psr_up();
/*
* we run with the PMU not frozen at all times
*/
pfm_unfreeze_pmu();
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
......@@ -6427,49 +6438,75 @@ pfm_init_percpu (void)
if (PMD_IS_IMPL(i) == 0) continue;
ia64_set_pmd(i, 0UL);
}
/*
* we run with the PMU not frozen at all times
*/
pfm_unfreeze_pmu();
}
/*
* used for debug purposes only
*/
void
dump_pmu_state(void)
dump_pmu_state(const char *from)
{
struct task_struct *task;
struct thread_struct *t;
struct pt_regs *regs;
pfm_context_t *ctx;
unsigned long psr;
int i;
unsigned long psr, dcr, info, flags;
int i, this_cpu;
local_irq_save(flags);
printk("current [%d] %s\n", current->pid, current->comm);
this_cpu = smp_processor_id();
regs = ia64_task_regs(current);
info = PFM_CPUINFO_GET();
dcr = ia64_getreg(_IA64_REG_CR_DCR);
if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
local_irq_restore(flags);
return;
}
printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
this_cpu,
from,
current->pid,
regs->cr_iip,
current->comm);
task = GET_PMU_OWNER();
ctx = GET_PMU_CTX();
printk("owner [%d] ctx=%p\n", task ? task->pid : -1, ctx);
printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
psr = pfm_get_psr();
printk("psr.pp=%ld psr.up=%ld\n", (psr >> IA64_PSR_PP_BIT) &0x1UL, (psr >> IA64_PSR_PP_BIT)&0x1UL);
printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
this_cpu,
ia64_get_pmc(0),
psr & IA64_PSR_PP ? 1 : 0,
psr & IA64_PSR_UP ? 1 : 0,
dcr & IA64_DCR_PP ? 1 : 0,
info,
ia64_psr(regs)->up,
ia64_psr(regs)->pp);
ia64_psr(regs)->up = 0;
ia64_psr(regs)->pp = 0;
t = &current->thread;
for (i=1; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
printk("pmc[%d]=0x%lx tpmc=0x%lx\n", i, ia64_get_pmc(i), t->pmcs[i]);
printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
}
for (i=1; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue;
printk("pmd[%d]=0x%lx tpmd=0x%lx\n", i, ia64_get_pmd(i), t->pmds[i]);
printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
}
if (ctx) {
printk("ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
this_cpu,
ctx->ctx_state,
ctx->ctx_smpl_vaddr,
ctx->ctx_smpl_hdr,
......@@ -6477,6 +6514,7 @@ dump_pmu_state(void)
ctx->ctx_msgq_tail,
ctx->ctx_saved_psr_up);
}
local_irq_restore(flags);
}
/*
......@@ -6499,10 +6537,8 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
PFM_SET_WORK_PENDING(task, 0);
/*
* restore default psr settings
* the psr bits are already set properly in copy_threads()
*/
ia64_psr(regs)->pp = ia64_psr(regs)->up = 0;
ia64_psr(regs)->sp = 1;
}
#else /* !CONFIG_PERFMON */
asmlinkage long
......
......@@ -353,9 +353,13 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16;
/* stop some PSR bits from being inherited: */
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~IA64_PSR_BITS_TO_CLEAR);
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
/*
* NOTE: The calling convention considers all floating point
......
/*
* Copyright (C) 2000, 2002 Hewlett-Packard Co
* Copyright (C) 2000, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Kernel unwind support.
......@@ -45,12 +45,6 @@ struct unw_info_block {
/* personality routine and language-specific data follow behind descriptors */
};
struct unw_table_entry {
u64 start_offset;
u64 end_offset;
u64 info_offset;
};
struct unw_table {
struct unw_table *next; /* must be first member! */
const char *name;
......
......@@ -251,6 +251,33 @@ acpi_parse_hpet (
}
#endif
#ifdef CONFIG_ACPI_BUS
/*
* Set specified PIC IRQ to level triggered mode.
*
* Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
* for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
* ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
* ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
*
* As the BIOS should have done this for us,
* print a warning if the IRQ wasn't already set to level.
*/
void acpi_pic_set_level_irq(unsigned int irq)
{
unsigned char mask = 1 << (irq & 7);
unsigned int port = 0x4d0 + (irq >> 3);
unsigned char val = inb(port);
if (!(val & mask)) {
printk(KERN_WARNING PREFIX "IRQ %d was Edge Triggered, "
"setting to Level Triggerd\n", irq);
outb(val | mask, port);
}
}
#endif /* CONFIG_ACPI_BUS */
static unsigned long __init
acpi_scan_rsdp (
unsigned long start,
......
......@@ -566,8 +566,14 @@ error_kernelspace:
incl %ebx
/* There are two places in the kernel that can potentially fault with
usergs. Handle them here. The exception handlers after
iret run with kernel gs again, so don't set the user space flag. */
cmpq $iret_label,RIP(%rsp)
iret run with kernel gs again, so don't set the user space flag.
B stepping K8s sometimes report an truncated RIP for IRET
exceptions returning to compat mode. Check for these here too. */
leaq iret_label(%rip),%rbp
cmpq %rbp,RIP(%rsp)
je error_swapgs
movl %ebp,%ebp /* zero extend */
cmpq %rbp,RIP(%rsp)
je error_swapgs
cmpq $gs_change,RIP(%rsp)
je error_swapgs
......
......@@ -622,11 +622,13 @@ static inline int IO_APIC_irq_trigger(int irq)
return 0;
}
int irq_vector[NR_IRQS] = { FIRST_DEVICE_VECTOR , 0 };
/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
static int __init assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
BUG_ON(irq >= NR_IRQ_VECTORS);
if (IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
next:
......
......@@ -189,8 +189,7 @@ void pda_init(int cpu)
pda->irqstackptr += IRQSTACKSIZE-64;
}
#define EXCEPTION_STK_ORDER 0 /* >= N_EXCEPTION_STACKS*EXCEPTION_STKSZ */
char boot_exception_stacks[N_EXCEPTION_STACKS*EXCEPTION_STKSZ];
char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ];
void syscall_init(void)
{
......@@ -226,15 +225,12 @@ void __init cpu_init (void)
#endif
struct tss_struct * t = &init_tss[cpu];
unsigned long v, efer;
char *estacks;
char *estacks = NULL;
struct task_struct *me;
/* CPU 0 is initialised in head64.c */
if (cpu != 0) {
pda_init(cpu);
estacks = (char *)__get_free_pages(GFP_ATOMIC, 0);
if (!estacks)
panic("Can't allocate exception stacks for CPU %d\n",cpu);
} else
estacks = boot_exception_stacks;
......@@ -282,10 +278,15 @@ void __init cpu_init (void)
/*
* set up and load the per-CPU TSS
*/
estacks += EXCEPTION_STKSZ;
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
t->ist[v] = (unsigned long)estacks;
if (cpu) {
estacks = (char *)__get_free_pages(GFP_ATOMIC, 0);
if (!estacks)
panic("Cannot allocate exception stack %ld %d\n",
v, cpu);
}
estacks += EXCEPTION_STKSZ;
t->ist[v] = (unsigned long)estacks;
}
t->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
......
......@@ -487,25 +487,3 @@ asmlinkage void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished);
}
}
/* Slow. Should be only used for debugging. */
int slow_smp_processor_id(void)
{
int stack_location;
unsigned long sp = (unsigned long)&stack_location;
int offset = 0, cpu;
for (offset = 0; next_cpu(offset, cpu_online_map) < NR_CPUS; offset = cpu + 1) {
cpu = next_cpu(offset, cpu_online_map);
if (sp >= (u64)cpu_pda[cpu].irqstackptr - IRQSTACKSIZE &&
sp <= (u64)cpu_pda[cpu].irqstackptr)
return cpu;
unsigned long estack = init_tss[cpu].ist[0] - EXCEPTION_STKSZ;
if (sp >= estack && sp <= estack+(1<<(PAGE_SHIFT+EXCEPTION_STK_ORDER)))
return cpu;
}
return stack_smp_processor_id();
}
......@@ -111,6 +111,14 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec = xtime.tv_nsec / 1000;
/*
* If time_adjust is negative then NTP is slowing the clock
* so make sure not to go into next possible interval.
* Better to lose some accuracy than have time go backwards..
*/
if (unlikely(time_adjust < 0) && usec > tickadj)
usec = tickadj;
t = (jiffies - wall_jiffies) * (1000000L / HZ) +
do_gettimeoffset();
usec += t;
......@@ -477,22 +485,28 @@ unsigned long get_cmos_time(void)
static unsigned int ref_freq = 0;
static unsigned long loops_per_jiffy_ref = 0;
//static unsigned long fast_gettimeoffset_ref = 0;
static unsigned long cpu_khz_ref = 0;
static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
unsigned long *lpj;
#ifdef CONFIG_SMP
lpj = &cpu_data[freq->cpu].loops_per_jiffy;
#else
lpj = &boot_cpu_data.loops_per_jiffy;
#endif
if (!ref_freq) {
ref_freq = freq->old;
loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
loops_per_jiffy_ref = *lpj;
cpu_khz_ref = cpu_khz;
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
cpu_data[freq->cpu].loops_per_jiffy =
*lpj =
cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
......
......@@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
extern void * memset(void *,int,__kernel_size_t);
extern size_t strlen(const char *);
extern char * bcopy(const char * src, char * dest, int count);
extern void bcopy(const char * src, char * dest, int count);
extern void * memmove(void * dest,const void *src,size_t count);
extern char * strcpy(char * dest,const char *src);
extern int strcmp(const char * cs,const char * ct);
......
......@@ -14,6 +14,10 @@ search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
/* Work around a B stepping K8 bug */
if ((value >> 32) == 0)
value |= 0xffffffffUL << 32;
while (first <= last) {
const struct exception_table_entry *mid;
long diff;
......
......@@ -164,5 +164,8 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
rr++;
}
if (found == 1)
fake_node = 1;
return 0;
}
......@@ -104,6 +104,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
if (nodeid + 1 > numnodes)
numnodes = nodeid + 1;
nodes_present |= (1UL << nodeid);
node_set_online(nodeid);
}
/* Initialize final allocator for a zone */
......
......@@ -37,7 +37,7 @@
#ifdef __KERNEL__
#define SONYPI_DRIVER_MAJORVERSION 1
#define SONYPI_DRIVER_MINORVERSION 20
#define SONYPI_DRIVER_MINORVERSION 21
#define SONYPI_DEVICE_MODEL_TYPE1 1
#define SONYPI_DEVICE_MODEL_TYPE2 2
......@@ -329,8 +329,8 @@ struct sonypi_eventtypes {
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_HELP_MASK, sonypi_helpev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_ZOOM_MASK, sonypi_zoomev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
......
......@@ -184,11 +184,19 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
atkbd->resend = 0;
#endif
switch (code) {
case ATKBD_RET_ACK:
atkbd->ack = 1;
goto out;
case ATKBD_RET_NAK:
atkbd->ack = -1;
goto out;
}
if (atkbd->translated) do {
if (atkbd->emul != 1) {
if (code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1 ||
code == ATKBD_RET_ACK || code == ATKBD_RET_NAK)
if (code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1)
break;
if (code == ATKBD_RET_BAT) {
if (!atkbd->bat_xl)
......@@ -212,15 +220,6 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
} while (0);
switch (code) {
case ATKBD_RET_ACK:
atkbd->ack = 1;
goto out;
case ATKBD_RET_NAK:
atkbd->ack = -1;
goto out;
}
if (atkbd->cmdcnt) {
atkbd->cmdbuf[--atkbd->cmdcnt] = code;
goto out;
......
......@@ -31,7 +31,7 @@
#define _MEYE_PRIV_H_
#define MEYE_DRIVER_MAJORVERSION 1
#define MEYE_DRIVER_MINORVERSION 7
#define MEYE_DRIVER_MINORVERSION 8
#include <linux/config.h>
#include <linux/types.h>
......
......@@ -964,13 +964,17 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
error = first;
goto out_fail;
}
if (FAT_FIRST_ENT(sb, media) != first
&& (media != 0xf8 || FAT_FIRST_ENT(sb, 0xfe) != first)) {
if (!silent) {
if (FAT_FIRST_ENT(sb, media) == first) {
/* all is as it should be */
} else if (media == 0xf8 && FAT_FIRST_ENT(sb, 0xfe) == first) {
/* bad, reported on pc9800 */
} else if (first == 0) {
/* bad, reported with a SmartMedia card */
} else {
if (!silent)
printk(KERN_ERR "FAT: invalid first entry of FAT "
"(0x%x != 0x%x)\n",
FAT_FIRST_ENT(sb, media), first);
}
goto out_invalid;
}
......
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
......@@ -72,6 +72,9 @@ phys_to_virt (unsigned long address)
return (void *) (address + PAGE_OFFSET);
}
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
/*
* The following two macros are deprecated and scheduled for removal.
* Please use the PCI-DMA interface defined in <asm/pci.h> instead.
......
......@@ -18,7 +18,8 @@ struct mod_arch_specific {
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
void *unw_table; /* unwind-table cookie returned by unwinder */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
};
......
......@@ -9,7 +9,7 @@
#include <asm/ptrace.h>
#include <asm/system.h>
#define EMUL_PREFIX_LINUX_IA32 "emul/ia32-linux/"
#define EMUL_PREFIX_LINUX_IA32 "/emul/ia32-linux/"
static inline char *
__emul_prefix (void)
......
......@@ -93,6 +93,12 @@ struct unw_frame_info {
* The official API follows below:
*/
struct unw_table_entry {
u64 start_offset;
u64 end_offset;
u64 info_offset;
};
/*
* Initialize unwind support.
*/
......
......@@ -76,8 +76,8 @@ struct hw_interrupt_type;
#ifndef __ASSEMBLY__
extern int irq_vector[NR_IRQS];
#define IO_APIC_VECTOR(irq) irq_vector[irq]
extern u8 irq_vector[NR_IRQ_VECTORS];
#define IO_APIC_VECTOR(irq) ((int)irq_vector[irq])
/*
* Various low-level irq details needed by irq.c, process.c,
......
......@@ -22,6 +22,7 @@
* the usable vector space is 0x20-0xff (224 vectors)
*/
#define NR_IRQS 224
#define NR_IRQ_VECTORS NR_IRQS
static __inline__ int irq_canonicalize(int irq)
{
......
......@@ -24,6 +24,8 @@ extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM (pci_mem_start)
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int bus);
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
......
......@@ -263,8 +263,8 @@ struct thread_struct {
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define N_EXCEPTION_STACKS 3 /* hw limit: 7 */
#define EXCEPTION_STKSZ 1024
#define EXCEPTION_STK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define EXCEPTION_STACK_ORDER 0
#define start_thread(regs,new_rip,new_rsp) do { \
asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
......
......@@ -74,15 +74,7 @@ extern __inline int hard_smp_processor_id(void)
return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
}
extern int slow_smp_processor_id(void);
extern inline int safe_smp_processor_id(void)
{
if (disable_apic)
return slow_smp_processor_id();
else
return hard_smp_processor_id();
}
#define safe_smp_processor_id() (cpuid_ebx(1) >> 24)
#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
#endif /* !ASSEMBLY */
......
......@@ -10,13 +10,15 @@
/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
extern int fake_node;
/* This is actually a cpumask_t, but doesn't matter because we don't have
>BITS_PER_LONG CPUs */
extern unsigned long cpu_online_map;
#define cpu_to_node(cpu) (fake_node ? 0 : (cpu))
#define memblk_to_node(memblk) (fake_node ? 0 : (memblk))
#define parent_node(node) (node)
#define node_to_first_cpu(node) (fake_node ? 0 : (node))
#define node_to_cpu_mask(node) (fake_node ? cpu_online_map : (1UL << (node)))
#define node_to_cpumask(node) (fake_node ? cpu_online_map : (1UL << (node)))
#define node_to_memblk(node) (node)
static inline unsigned long pcibus_to_cpumask(int bus)
......
......@@ -483,6 +483,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
/* Not implemented yet, only for 486*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_DEAD 0x00000008 /* Dead */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
......
......@@ -688,6 +688,7 @@ static void exit_notify(struct task_struct *tsk)
}
tsk->state = TASK_ZOMBIE;
tsk->flags |= PF_DEAD;
/*
* In the preemption case it must be impossible for the task
* to get runnable again, so use "_raw_" unlock to keep
......
......@@ -742,7 +742,7 @@ static inline void finish_task_switch(task_t *prev)
{
runqueue_t *rq = this_rq();
struct mm_struct *mm = rq->prev_mm;
int drop_task_ref;
unsigned long prev_task_flags;
rq->prev_mm = NULL;
......@@ -757,14 +757,11 @@ static inline void finish_task_switch(task_t *prev)
* be dropped twice.
* Manfred Spraul <manfred@colorfullife.com>
*/
drop_task_ref = 0;
if (unlikely(prev->state & (TASK_DEAD | TASK_ZOMBIE)))
drop_task_ref = 1;
prev_task_flags = prev->flags;
finish_arch_switch(rq, prev);
if (mm)
mmdrop(mm);
if (drop_task_ref)
if (unlikely(prev_task_flags & PF_DEAD))
put_task_struct(prev);
}
......
......@@ -25,25 +25,34 @@
uint32_t __div64_32(uint64_t *n, uint32_t base)
{
uint32_t low, low2, high, rem;
low = *n & 0xffffffff;
high = *n >> 32;
rem = high % (uint32_t)base;
high = high / (uint32_t)base;
low2 = low >> 16;
low2 += rem << 16;
rem = low2 % (uint32_t)base;
low2 = low2 / (uint32_t)base;
low = low & 0xffff;
low += rem << 16;
rem = low % (uint32_t)base;
low = low / (uint32_t)base;
*n = low +
((uint64_t)low2 << 16) +
((uint64_t)high << 32);
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment