Commit 2600990e authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] kpsd and other AP/SP improvements.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent bce1a286
...@@ -1476,6 +1476,16 @@ config MIPS_VPE_APSP_API ...@@ -1476,6 +1476,16 @@ config MIPS_VPE_APSP_API
depends on MIPS_VPE_LOADER depends on MIPS_VPE_LOADER
help help
config MIPS_APSP_KSPD
bool "Enable KSPD"
depends on MIPS_VPE_APSP_API
default y
help
KSPD is a kernel daemon that accepts syscall requests from the SP
side, actions them and returns the results. It also handles the
"exit" syscall notifying other kernel modules the SP program is
exiting. You probably want to say yes here.
config SB1_PASS_1_WORKAROUNDS config SB1_PASS_1_WORKAROUNDS
bool bool
depends on CPU_SB1_PASS_1 depends on CPU_SB1_PASS_1
......
...@@ -36,6 +36,7 @@ obj-$(CONFIG_SMP) += smp.o ...@@ -36,6 +36,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o
obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
......
/*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/unistd.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/syscalls.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
#include <asm/kspd.h>
static struct workqueue_struct *workqueue = NULL;
static struct work_struct work;
extern unsigned long cpu_khz;
struct mtsp_syscall {
int cmd;
unsigned char abi;
unsigned char size;
};
struct mtsp_syscall_ret {
int retval;
int errno;
};
struct mtsp_syscall_generic {
int arg0;
int arg1;
int arg2;
int arg3;
int arg4;
int arg5;
int arg6;
};
static struct list_head kspd_notifylist;
static int sp_stopping = 0;
/* these should match with those in the SDE kit */
#define MTSP_SYSCALL_BASE 0
#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0)
#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1)
#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2)
#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3)
#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4)
#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5)
#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6)
#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
#define MTSP_O_RDONLY 0x0000
#define MTSP_O_WRONLY 0x0001
#define MTSP_O_RDWR 0x0002
#define MTSP_O_NONBLOCK 0x0004
#define MTSP_O_APPEND 0x0008
#define MTSP_O_SHLOCK 0x0010
#define MTSP_O_EXLOCK 0x0020
#define MTSP_O_ASYNC 0x0040
#define MTSP_O_FSYNC O_SYNC
#define MTSP_O_NOFOLLOW 0x0100
#define MTSP_O_SYNC 0x0080
#define MTSP_O_CREAT 0x0200
#define MTSP_O_TRUNC 0x0400
#define MTSP_O_EXCL 0x0800
#define MTSP_O_BINARY 0x8000
#define SP_VPE 1
struct apsp_table {
int sp;
int ap;
};
/* we might want to do the mode flags too */
struct apsp_table open_flags_table[] = {
{ MTSP_O_RDWR, O_RDWR },
{ MTSP_O_WRONLY, O_WRONLY },
{ MTSP_O_CREAT, O_CREAT },
{ MTSP_O_TRUNC, O_TRUNC },
{ MTSP_O_NONBLOCK, O_NONBLOCK },
{ MTSP_O_APPEND, O_APPEND },
{ MTSP_O_NOFOLLOW, O_NOFOLLOW }
};
struct apsp_table syscall_command_table[] = {
{ MTSP_SYSCALL_OPEN, __NR_open },
{ MTSP_SYSCALL_CLOSE, __NR_close },
{ MTSP_SYSCALL_READ, __NR_read },
{ MTSP_SYSCALL_WRITE, __NR_write },
{ MTSP_SYSCALL_LSEEK32, __NR_lseek }
};
static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
{
register long int _num __asm__ ("$2") = num;
register long int _arg0 __asm__ ("$4") = arg0;
register long int _arg1 __asm__ ("$5") = arg1;
register long int _arg2 __asm__ ("$6") = arg2;
register long int _arg3 __asm__ ("$7") = arg3;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
__asm__ __volatile__ (
" syscall \n"
: "=r" (_num), "=r" (_arg3)
: "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3));
set_fs(old_fs);
/* $a3 is error flag */
if (_arg3)
return -_num;
return _num;
}
static int translate_syscall_command(int cmd)
{
int i;
int ret = -1;
for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) {
if ((cmd == syscall_command_table[i].sp))
return syscall_command_table[i].ap;
}
return ret;
}
static unsigned int translate_open_flags(int flags)
{
int i;
unsigned int ret = 0;
for (i = 0; i < (sizeof(open_flags_table) / sizeof(struct apsp_table));
i++) {
if( (flags & open_flags_table[i].sp) ) {
ret |= open_flags_table[i].ap;
}
}
return ret;
}
static void sp_setfsuidgid( uid_t uid, gid_t gid)
{
current->fsuid = uid;
current->fsgid = gid;
key_fsuid_changed(current);
key_fsgid_changed(current);
}
/*
* Expects a request to be on the sysio channel. Reads it. Decides whether
* its a linux syscall and runs it, or whatever. Puts the return code back
* into the request and sends the whole thing back.
*/
void sp_work_handle_request(void)
{
struct mtsp_syscall sc;
struct mtsp_syscall_generic generic;
struct mtsp_syscall_ret ret;
struct kspd_notifications *n;
struct timeval tv;
struct timezone tz;
int cmd;
char *vcwd;
mm_segment_t old_fs;
int size;
ret.retval = -1;
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) {
printk(KERN_ERR "Expected request but nothing to read\n");
return;
}
size = sc.size;
if (size) {
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) {
printk(KERN_ERR "Expected request but nothing to read\n");
return;
}
}
/* Run the syscall at the priviledge of the user who loaded the
SP program */
if (vpe_getuid(SP_VPE))
sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE));
switch (sc.cmd) {
/* needs the flags argument translating from SDE kit to
linux */
case MTSP_SYSCALL_PIPEFREQ:
ret.retval = cpu_khz * 1000;
ret.errno = 0;
break;
case MTSP_SYSCALL_GETTOD:
memset(&tz, 0, sizeof(tz));
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
(int)&tz, 0,0)) == 0)
ret.retval = tv.tv_sec;
ret.errno = errno;
break;
case MTSP_SYSCALL_EXIT:
list_for_each_entry(n, &kspd_notifylist, list)
n->kspd_sp_exit(SP_VPE);
sp_stopping = 1;
printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
generic.arg0);
break;
case MTSP_SYSCALL_OPEN:
generic.arg1 = translate_open_flags(generic.arg1);
vcwd = vpe_getcwd(SP_VPE);
/* change to the cwd of the process that loaded the SP program */
old_fs = get_fs();
set_fs(KERNEL_DS);
sys_chdir(vcwd);
set_fs(old_fs);
sc.cmd = __NR_open;
/* fall through */
default:
if ((sc.cmd >= __NR_Linux) &&
(sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) )
cmd = sc.cmd;
else
cmd = translate_syscall_command(sc.cmd);
if (cmd >= 0) {
ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
generic.arg2, generic.arg3);
ret.errno = errno;
} else
printk(KERN_WARNING
"KSPD: Unknown SP syscall number %d\n", sc.cmd);
break;
} /* switch */
if (vpe_getuid(SP_VPE))
sp_setfsuidgid( 0, 0);
if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0))
< sizeof(struct mtsp_syscall_ret))
printk("KSPD: sp_work_handle_request failed to send to SP\n");
}
static void sp_cleanup(void)
{
struct files_struct *files = current->files;
int i, j;
struct fdtable *fdt;
j = 0;
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock
*/
fdt = files_fdtable(files);
for (;;) {
unsigned long set;
i = j * __NFDBITS;
if (i >= fdt->max_fdset || i >= fdt->max_fds)
break;
set = fdt->open_fds->fds_bits[j++];
while (set) {
if (set & 1) {
struct file * file = xchg(&fdt->fd[i], NULL);
if (file)
filp_close(file, files);
}
i++;
set >>= 1;
}
}
}
static int channel_open = 0;
/* the work handler */
static void sp_work(void *data)
{
if (!channel_open) {
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
printk("KSPD: unable to open sp channel\n");
sp_stopping = 1;
} else {
channel_open++;
printk(KERN_DEBUG "KSPD: SP channel opened\n");
}
} else {
/* wait for some data, allow it to sleep */
rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1);
/* Check we haven't been woken because we are stopping */
if (!sp_stopping)
sp_work_handle_request();
}
if (!sp_stopping)
queue_work(workqueue, &work);
else
sp_cleanup();
}
static void startwork(int vpe)
{
sp_stopping = channel_open = 0;
if (workqueue == NULL) {
if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) {
printk(KERN_ERR "unable to start kspd\n");
return;
}
INIT_WORK(&work, sp_work, NULL);
queue_work(workqueue, &work);
} else
queue_work(workqueue, &work);
}
static void stopwork(int vpe)
{
sp_stopping = 1;
printk(KERN_DEBUG "KSPD: SP stopping\n");
}
void kspd_notify(struct kspd_notifications *notify)
{
list_add(&notify->list, &kspd_notifylist);
}
static struct vpe_notifications notify;
static int kspd_module_init(void)
{
INIT_LIST_HEAD(&kspd_notifylist);
notify.start = startwork;
notify.stop = stopwork;
vpe_notify(SP_VPE, &notify);
return 0;
}
static void kspd_module_exit(void)
{
}
module_init(kspd_module_init);
module_exit(kspd_module_exit);
MODULE_DESCRIPTION("MIPS KSPD");
MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL");
...@@ -21,45 +21,44 @@ ...@@ -21,45 +21,44 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <asm/bitops.h> #include <asm/cacheflush.h>
#include <asm/atomic.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/rtlx.h> #include <asm/rtlx.h>
#include <asm/uaccess.h>
#define RTLX_TARG_VPE 1 #define RTLX_TARG_VPE 1
static struct rtlx_info *rtlx; static struct rtlx_info *rtlx;
static int major; static int major;
static char module_name[] = "rtlx"; static char module_name[] = "rtlx";
static struct irqaction irq;
static int irq_num;
static inline int spacefree(int read, int write, int size)
{
if (read == write) {
/*
* never fill the buffer completely, so indexes are always
* equal if empty and only empty, or !equal if data available
*/
return size - 1;
}
return ((read + size - write) % size) - 1;
}
static struct chan_waitqueues { static struct chan_waitqueues {
wait_queue_head_t rt_queue; wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue; wait_queue_head_t lx_queue;
int in_open;
} channel_wqs[RTLX_CHANNELS]; } channel_wqs[RTLX_CHANNELS];
static struct irqaction irq;
static int irq_num;
static struct vpe_notifications notify;
static int sp_stopping = 0;
extern void *vpe_get_shared(int index); extern void *vpe_get_shared(int index);
static void rtlx_dispatch(struct pt_regs *regs) static void rtlx_dispatch(struct pt_regs *regs)
...@@ -67,174 +66,298 @@ static void rtlx_dispatch(struct pt_regs *regs) ...@@ -67,174 +66,298 @@ static void rtlx_dispatch(struct pt_regs *regs)
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs); do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
} }
/* Interrupt handler may be called before rtlx_init has otherwise had
a chance to run.
*/
static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs) static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
int i; int i;
for (i = 0; i < RTLX_CHANNELS; i++) { for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i]; wake_up(&channel_wqs[i].lx_queue);
wake_up(&channel_wqs[i].rt_queue);
if (chan->lx_read != chan->lx_write)
wake_up_interruptible(&channel_wqs[i].lx_queue);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* call when we have the address of the shared structure from the SP side. */ static __attribute_used__ void dump_rtlx(void)
static int rtlx_init(struct rtlx_info *rtlxi)
{ {
int i; int i;
if (rtlxi->id != RTLX_ID) { printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
return -ENOEXEC;
}
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) { for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue); struct rtlx_channel *chan = &rtlx->channel[i];
init_waitqueue_head(&channel_wqs[i].lx_queue);
}
/* set up for interrupt handling */ printk(" rt_state %d lx_state %d buffer_size %d\n",
memset(&irq, 0, sizeof(struct irqaction)); chan->rt_state, chan->lx_state, chan->buffer_size);
if (cpu_has_vint) printk(" rt_read %d rt_write %d\n",
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); chan->rt_read, chan->rt_write);
irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; printk(" lx_read %d lx_write %d\n",
irq.handler = rtlx_interrupt; chan->lx_read, chan->lx_write);
irq.flags = SA_INTERRUPT;
irq.name = "RTLX"; printk(" rt_buffer <%s>\n", chan->rt_buffer);
irq.dev_id = rtlx; printk(" lx_buffer <%s>\n", chan->lx_buffer);
setup_irq(irq_num, &irq); }
}
/* call when we have the address of the shared structure from the SP side. */
static int rtlx_init(struct rtlx_info *rtlxi)
{
if (rtlxi->id != RTLX_ID) {
printk(KERN_ERR "no valid RTLX id at 0x%p 0x%x\n", rtlxi, rtlxi->id);
return -ENOEXEC;
}
rtlx = rtlxi; rtlx = rtlxi;
return 0; return 0;
} }
/* only allow one open process at a time to open each channel */ /* notifications */
static int rtlx_open(struct inode *inode, struct file *filp) static void starting(int vpe)
{ {
int minor, ret; int i;
sp_stopping = 0;
/* force a reload of rtlx */
rtlx=NULL;
/* wake up any sleeping rtlx_open's */
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
static void stopping(int vpe)
{
int i;
sp_stopping = 1;
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
int rtlx_open(int index, int can_sleep)
{
int ret;
struct rtlx_channel *chan; struct rtlx_channel *chan;
volatile struct rtlx_info **p;
/* assume only 1 device at the mo. */ if (index >= RTLX_CHANNELS) {
minor = MINOR(inode->i_rdev); printk(KERN_DEBUG "rtlx_open index out of range\n");
return -ENOSYS;
}
if (channel_wqs[index].in_open) {
printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
return -EBUSY;
}
channel_wqs[index].in_open++;
if (rtlx == NULL) { if (rtlx == NULL) {
struct rtlx_info **p;
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
printk(KERN_ERR "vpe_get_shared is NULL. " if (can_sleep) {
"Has an SP program been loaded?\n"); DECLARE_WAITQUEUE(wait, current);
return -EFAULT;
/* go to sleep */
add_wait_queue(&channel_wqs[index].lx_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
/* back running */
} else {
printk( KERN_DEBUG "No SP program loaded, and device "
"opened with O_NONBLOCK\n");
channel_wqs[index].in_open = 0;
return -ENOSYS;
}
} }
if (*p == NULL) { if (*p == NULL) {
printk(KERN_ERR "vpe_shared %p %p\n", p, *p); if (can_sleep) {
return -EFAULT; DECLARE_WAITQUEUE(wait, current);
/* go to sleep */
add_wait_queue(&channel_wqs[index].lx_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (*p == NULL) {
schedule();
/* reset task state to interruptable otherwise
we'll whizz round here like a very fast loopy
thing. schedule() appears to return with state
set to TASK_RUNNING.
If the loaded SP program, for whatever reason,
doesn't set up the shared structure *p will never
become true. So whoever connected to either /dev/rt?
or if it was kspd, will then take up rather a lot of
processor cycles.
*/
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
/* back running */
}
else {
printk(" *vpe_get_shared is NULL. "
"Has an SP program been loaded?\n");
channel_wqs[index].in_open = 0;
return -ENOSYS;
}
}
if ((unsigned int)*p < KSEG0) {
printk(KERN_WARNING "vpe_get_shared returned an invalid pointer "
"maybe an error code %d\n", (int)*p);
channel_wqs[index].in_open = 0;
return -ENOSYS;
} }
if ((ret = rtlx_init(*p)) < 0) if ((ret = rtlx_init(*p)) < 0) {
return ret; channel_wqs[index].in_open = 0;
return ret;
}
} }
chan = &rtlx->channel[minor]; chan = &rtlx->channel[index];
if (test_and_set_bit(RTLX_STATE_OPENED, &chan->lx_state)) if (chan->lx_state == RTLX_STATE_OPENED) {
return -EBUSY; channel_wqs[index].in_open = 0;
return -EBUSY;
}
chan->lx_state = RTLX_STATE_OPENED;
channel_wqs[index].in_open = 0;
return 0; return 0;
} }
static int rtlx_release(struct inode *inode, struct file *filp) int rtlx_release(int index)
{ {
int minor = MINOR(inode->i_rdev); rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
clear_bit(RTLX_STATE_OPENED, &rtlx->channel[minor].lx_state);
smp_mb__after_clear_bit();
return 0; return 0;
} }
static unsigned int rtlx_poll(struct file *file, poll_table * wait) unsigned int rtlx_read_poll(int index, int can_sleep)
{ {
int minor; struct rtlx_channel *chan;
unsigned int mask = 0;
struct rtlx_channel *chan;
minor = MINOR(file->f_dentry->d_inode->i_rdev); if (rtlx == NULL)
chan = &rtlx->channel[minor]; return 0;
poll_wait(file, &channel_wqs[minor].rt_queue, wait); chan = &rtlx->channel[index];
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
/* data available to read? */ /* data available to read? */
if (chan->lx_read != chan->lx_write) if (chan->lx_read == chan->lx_write) {
mask |= POLLIN | POLLRDNORM; if (can_sleep) {
DECLARE_WAITQUEUE(wait, current);
/* space to write */ /* go to sleep */
if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size)) add_wait_queue(&channel_wqs[index].lx_queue, &wait);
mask |= POLLOUT | POLLWRNORM;
return mask; set_current_state(TASK_INTERRUPTIBLE);
while (chan->lx_read == chan->lx_write) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
if (sp_stopping) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
return 0;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
/* back running */
}
else
return 0;
}
return (chan->lx_write + chan->buffer_size - chan->lx_read)
% chan->buffer_size;
} }
static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count, static inline int write_spacefree(int read, int write, int size)
loff_t * ppos)
{ {
unsigned long failed; if (read == write) {
size_t fl = 0L; /*
int minor; * Never fill the buffer completely, so indexes are always
struct rtlx_channel *lx; * equal if empty and only empty, or !equal if data available
DECLARE_WAITQUEUE(wait, current); */
return size - 1;
}
minor = MINOR(file->f_dentry->d_inode->i_rdev); return ((read + size - write) % size) - 1;
lx = &rtlx->channel[minor]; }
/* data available? */ unsigned int rtlx_write_poll(int index)
if (lx->lx_write == lx->lx_read) { {
if (file->f_flags & O_NONBLOCK) struct rtlx_channel *chan = &rtlx->channel[index];
return 0; /* -EAGAIN makes cat whinge */ return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
}
/* go to sleep */ static inline void copy_to(void *dst, void *src, size_t count, int user)
add_wait_queue(&channel_wqs[minor].lx_queue, &wait); {
set_current_state(TASK_INTERRUPTIBLE); if (user)
copy_to_user(dst, src, count);
else
memcpy(dst, src, count);
}
while (lx->lx_write == lx->lx_read) static inline void copy_from(void *dst, void *src, size_t count, int user)
schedule(); {
if (user)
copy_from_user(dst, src, count);
else
memcpy(dst, src, count);
}
set_current_state(TASK_RUNNING); ssize_t rtlx_read(int index, void *buff, size_t count, int user)
remove_wait_queue(&channel_wqs[minor].lx_queue, &wait); {
size_t fl = 0L;
struct rtlx_channel *lx;
/* back running */ if (rtlx == NULL)
} return -ENOSYS;
lx = &rtlx->channel[index];
/* find out how much in total */ /* find out how much in total */
count = min(count, count = min(count,
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read)
% lx->buffer_size);
/* then how much from the read pointer onwards */ /* then how much from the read pointer onwards */
fl = min(count, (size_t)lx->buffer_size - lx->lx_read); fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
failed = copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl); copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user);
if (failed) {
count = fl - failed;
goto out;
}
/* and if there is anything left at the beginning of the buffer */ /* and if there is anything left at the beginning of the buffer */
if (count - fl) { if ( count - fl )
failed = copy_to_user (buffer + fl, lx->lx_buffer, count - fl); copy_to (buff + fl, lx->lx_buffer, count - fl, user);
if (failed) {
count -= failed;
goto out;
}
}
out:
/* update the index */ /* update the index */
lx->lx_read += count; lx->lx_read += count;
lx->lx_read %= lx->buffer_size; lx->lx_read %= lx->buffer_size;
...@@ -242,20 +365,101 @@ static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count, ...@@ -242,20 +365,101 @@ static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
return count; return count;
} }
static ssize_t rtlx_write(struct file *file, const char __user * buffer, ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
{
struct rtlx_channel *rt;
size_t fl;
if (rtlx == NULL)
return(-ENOSYS);
rt = &rtlx->channel[index];
/* total number of bytes to copy */
count = min(count,
(size_t)write_spacefree(rt->rt_read, rt->rt_write,
rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user);
/* if there's any left copy to the beginning of the buffer */
if( count - fl )
copy_from (rt->rt_buffer, buffer + fl, count - fl, user);
rt->rt_write += count;
rt->rt_write %= rt->buffer_size;
return(count);
}
static int file_open(struct inode *inode, struct file *filp)
{
int minor = MINOR(inode->i_rdev);
return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
}
static int file_release(struct inode *inode, struct file *filp)
{
int minor;
minor = MINOR(inode->i_rdev);
return rtlx_release(minor);
}
static unsigned int file_poll(struct file *file, poll_table * wait)
{
int minor;
unsigned int mask = 0;
minor = MINOR(file->f_dentry->d_inode->i_rdev);
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
if (rtlx == NULL)
return 0;
/* data available to read? */
if (rtlx_read_poll(minor, 0))
mask |= POLLIN | POLLRDNORM;
/* space to write */
if (rtlx_write_poll(minor))
mask |= POLLOUT | POLLWRNORM;
return mask;
}
static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
loff_t * ppos)
{
int minor = MINOR(file->f_dentry->d_inode->i_rdev);
/* data available? */
if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
return 0; // -EAGAIN makes cat whinge
}
return rtlx_read(minor, buffer, count, 1);
}
static ssize_t file_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos) size_t count, loff_t * ppos)
{ {
unsigned long failed;
int minor; int minor;
struct rtlx_channel *rt; struct rtlx_channel *rt;
size_t fl;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
minor = MINOR(file->f_dentry->d_inode->i_rdev); minor = MINOR(file->f_dentry->d_inode->i_rdev);
rt = &rtlx->channel[minor]; rt = &rtlx->channel[minor];
/* any space left... */ /* any space left... */
if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) { if (!rtlx_write_poll(minor)) {
if (file->f_flags & O_NONBLOCK) if (file->f_flags & O_NONBLOCK)
return -EAGAIN; return -EAGAIN;
...@@ -263,61 +467,64 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer, ...@@ -263,61 +467,64 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer,
add_wait_queue(&channel_wqs[minor].rt_queue, &wait); add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) while (!rtlx_write_poll(minor))
schedule(); schedule();
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
} }
/* total number of bytes to copy */ return rtlx_write(minor, (void *)buffer, count, 1);
count = min(count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
failed = copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
if (failed) {
count = fl - failed;
goto out;
}
/* if there's any left copy to the beginning of the buffer */
if (count - fl) {
failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
if (failed) {
count -= failed;
goto out;
}
}
out:
rt->rt_write += count;
rt->rt_write %= rt->buffer_size;
return count;
} }
static struct file_operations rtlx_fops = { static struct file_operations rtlx_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = rtlx_open, .open = file_open,
.release = rtlx_release, .release = file_release,
.write = rtlx_write, .write = file_write,
.read = rtlx_read, .read = file_read,
.poll = rtlx_poll .poll = file_poll
}; };
static struct irqaction rtlx_irq = {
.handler = rtlx_interrupt,
.flags = SA_INTERRUPT,
.name = "RTLX",
};
static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
static char register_chrdev_failed[] __initdata = static char register_chrdev_failed[] __initdata =
KERN_ERR "rtlx_module_init: unable to register device\n"; KERN_ERR "rtlx_module_init: unable to register device\n";
static int __init rtlx_module_init(void) static int rtlx_module_init(void)
{ {
int i;
major = register_chrdev(0, module_name, &rtlx_fops); major = register_chrdev(0, module_name, &rtlx_fops);
if (major < 0) { if (major < 0) {
printk(register_chrdev_failed); printk(register_chrdev_failed);
return major; return major;
} }
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
channel_wqs[i].in_open = 0;
}
/* set up notifiers */
notify.start = starting;
notify.stop = stopping;
vpe_notify(RTLX_TARG_VPE, &notify);
if (cpu_has_vint)
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
rtlx_irq.dev_id = rtlx;
setup_irq(rtlx_irq_num, &rtlx_irq);
return 0; return 0;
} }
...@@ -330,5 +537,5 @@ module_init(rtlx_module_init); ...@@ -330,5 +537,5 @@ module_init(rtlx_module_init);
module_exit(rtlx_module_exit); module_exit(rtlx_module_exit);
MODULE_DESCRIPTION("MIPS RTLX"); MODULE_DESCRIPTION("MIPS RTLX");
MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc."); MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
* You should have received a copy of the GNU General Public License along * You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc., * with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/ */
/* /*
...@@ -27,11 +26,8 @@ ...@@ -27,11 +26,8 @@
* *
* To load and run, simply cat a SP 'program file' to /dev/vpe1. * To load and run, simply cat a SP 'program file' to /dev/vpe1.
* i.e cat spapp >/dev/vpe1. * i.e cat spapp >/dev/vpe1.
*
* You'll need to have the following device files.
* mknod /dev/vpe0 c 63 0
* mknod /dev/vpe1 c 63 1
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -55,6 +51,8 @@ ...@@ -55,6 +51,8 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/vpe.h>
#include <asm/kspd.h>
typedef void *vpe_handle; typedef void *vpe_handle;
...@@ -68,6 +66,11 @@ typedef void *vpe_handle; ...@@ -68,6 +66,11 @@ typedef void *vpe_handle;
static char module_name[] = "vpe"; static char module_name[] = "vpe";
static int major; static int major;
#ifdef CONFIG_MIPS_APSP_KSPD
static struct kspd_notifications kspd_events;
static int kspd_events_reqd = 0;
#endif
/* grab the likely amount of memory we will need. */ /* grab the likely amount of memory we will need. */
#ifdef CONFIG_MIPS_VPE_LOADER_TOM #ifdef CONFIG_MIPS_VPE_LOADER_TOM
#define P_SIZE (2 * 1024 * 1024) #define P_SIZE (2 * 1024 * 1024)
...@@ -76,7 +79,10 @@ static int major; ...@@ -76,7 +79,10 @@ static int major;
#define P_SIZE (256 * 1024) #define P_SIZE (256 * 1024)
#endif #endif
extern unsigned long physical_memsize;
#define MAX_VPES 16 #define MAX_VPES 16
#define VPE_PATH_MAX 256
enum vpe_state { enum vpe_state {
VPE_STATE_UNUSED = 0, VPE_STATE_UNUSED = 0,
...@@ -102,6 +108,8 @@ struct vpe { ...@@ -102,6 +108,8 @@ struct vpe {
unsigned long len; unsigned long len;
char *pbuffer; char *pbuffer;
unsigned long plen; unsigned long plen;
unsigned int uid, gid;
char cwd[VPE_PATH_MAX];
unsigned long __start; unsigned long __start;
...@@ -113,6 +121,9 @@ struct vpe { ...@@ -113,6 +121,9 @@ struct vpe {
/* shared symbol address */ /* shared symbol address */
void *shared_ptr; void *shared_ptr;
/* the list of who wants to know when something major happens */
struct list_head notify;
}; };
struct tc { struct tc {
...@@ -138,7 +149,7 @@ struct vpecontrol_ { ...@@ -138,7 +149,7 @@ struct vpecontrol_ {
} vpecontrol; } vpecontrol;
static void release_progmem(void *ptr); static void release_progmem(void *ptr);
static void dump_vpe(struct vpe * v); /* static __attribute_used__ void dump_vpe(struct vpe * v); */
extern void save_gp_address(unsigned int secbase, unsigned int rel); extern void save_gp_address(unsigned int secbase, unsigned int rel);
/* get the vpe associated with this minor */ /* get the vpe associated with this minor */
...@@ -146,12 +157,14 @@ struct vpe *get_vpe(int minor) ...@@ -146,12 +157,14 @@ struct vpe *get_vpe(int minor)
{ {
struct vpe *v; struct vpe *v;
if (!cpu_has_mipsmt)
return NULL;
list_for_each_entry(v, &vpecontrol.vpe_list, list) { list_for_each_entry(v, &vpecontrol.vpe_list, list) {
if (v->minor == minor) if (v->minor == minor)
return v; return v;
} }
printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor);
return NULL; return NULL;
} }
...@@ -165,8 +178,6 @@ struct tc *get_tc(int index) ...@@ -165,8 +178,6 @@ struct tc *get_tc(int index)
return t; return t;
} }
printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index);
return NULL; return NULL;
} }
...@@ -179,8 +190,6 @@ struct tc *get_tc_unused(void) ...@@ -179,8 +190,6 @@ struct tc *get_tc_unused(void)
return t; return t;
} }
printk(KERN_DEBUG "VPE: All TC's are in use\n");
return NULL; return NULL;
} }
...@@ -190,13 +199,13 @@ struct vpe *alloc_vpe(int minor) ...@@ -190,13 +199,13 @@ struct vpe *alloc_vpe(int minor)
struct vpe *v; struct vpe *v;
if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) { if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "VPE: alloc_vpe no mem\n");
return NULL; return NULL;
} }
INIT_LIST_HEAD(&v->tc); INIT_LIST_HEAD(&v->tc);
list_add_tail(&v->list, &vpecontrol.vpe_list); list_add_tail(&v->list, &vpecontrol.vpe_list);
INIT_LIST_HEAD(&v->notify);
v->minor = minor; v->minor = minor;
return v; return v;
} }
...@@ -207,7 +216,6 @@ struct tc *alloc_tc(int index) ...@@ -207,7 +216,6 @@ struct tc *alloc_tc(int index)
struct tc *t; struct tc *t;
if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "VPE: alloc_tc no mem\n");
return NULL; return NULL;
} }
...@@ -236,20 +244,16 @@ void dump_mtregs(void) ...@@ -236,20 +244,16 @@ void dump_mtregs(void)
printk("config3 0x%lx MT %ld\n", val, printk("config3 0x%lx MT %ld\n", val,
(val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
val = read_c0_mvpconf0();
printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
(val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
val = read_c0_mvpcontrol(); val = read_c0_mvpcontrol();
printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
(val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
(val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
(val & MVPCONTROL_EVP)); (val & MVPCONTROL_EVP));
val = read_c0_vpeconf0(); val = read_c0_mvpconf0();
printk("VPEConf0 0x%lx MVP %ld\n", val, printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
(val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
} }
/* Find some VPE program space */ /* Find some VPE program space */
...@@ -354,9 +358,9 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location, ...@@ -354,9 +358,9 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
} }
if( (rel > 32768) || (rel < -32768) ) { if( (rel > 32768) || (rel < -32768) ) {
printk(KERN_ERR printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
"apply_r_mips_gprel16: relative address out of range 0x%x %d\n", "relative address 0x%x out of range of gp register\n",
rel, rel); rel);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -374,8 +378,8 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location, ...@@ -374,8 +378,8 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
rel -= 1; // and one instruction less due to the branch delay slot. rel -= 1; // and one instruction less due to the branch delay slot.
if( (rel > 32768) || (rel < -32768) ) { if( (rel > 32768) || (rel < -32768) ) {
printk(KERN_ERR printk(KERN_DEBUG "VPE loader: "
"apply_r_mips_pc16: relative address out of range 0x%x\n", rel); "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -396,7 +400,8 @@ static int apply_r_mips_26(struct module *me, uint32_t *location, ...@@ -396,7 +400,8 @@ static int apply_r_mips_26(struct module *me, uint32_t *location,
Elf32_Addr v) Elf32_Addr v)
{ {
if (v % 4) { if (v % 4) {
printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name); printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
" unaligned relocation\n");
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -459,12 +464,13 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, ...@@ -459,12 +464,13 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
/* /*
* The value for the HI16 had best be the same. * The value for the HI16 had best be the same.
*/ */
if (v != l->value) { if (v != l->value) {
printk("%d != %d\n", v, l->value); printk(KERN_DEBUG "VPE loader: "
goto out_danger; "apply_r_mips_lo16/hi16: "
"inconsistent value information\n");
return -ENOEXEC;
} }
/* /*
* Do the HI16 relocation. Note that we actually don't * Do the HI16 relocation. Note that we actually don't
* need to know anything about the LO16 itself, except * need to know anything about the LO16 itself, except
...@@ -500,11 +506,6 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, ...@@ -500,11 +506,6 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
*location = insnlo; *location = insnlo;
return 0; return 0;
out_danger:
printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
return -ENOEXEC;
} }
static int (*reloc_handlers[]) (struct module *me, uint32_t *location, static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
...@@ -518,6 +519,15 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location, ...@@ -518,6 +519,15 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
[R_MIPS_PC16] = apply_r_mips_pc16 [R_MIPS_PC16] = apply_r_mips_pc16
}; };
static char *rstrs[] = {
[R_MIPS_NONE] = "MIPS_NONE",
[R_MIPS_32] = "MIPS_32",
[R_MIPS_26] = "MIPS_26",
[R_MIPS_HI16] = "MIPS_HI16",
[R_MIPS_LO16] = "MIPS_LO16",
[R_MIPS_GPREL16] = "MIPS_GPREL16",
[R_MIPS_PC16] = "MIPS_PC16"
};
int apply_relocations(Elf32_Shdr *sechdrs, int apply_relocations(Elf32_Shdr *sechdrs,
const char *strtab, const char *strtab,
...@@ -552,15 +562,13 @@ int apply_relocations(Elf32_Shdr *sechdrs, ...@@ -552,15 +562,13 @@ int apply_relocations(Elf32_Shdr *sechdrs,
res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
if( res ) { if( res ) {
printk(KERN_DEBUG char *r = rstrs[ELF32_R_TYPE(r_info)];
"relocation error 0x%x sym refer <%s> value 0x%x " printk(KERN_WARNING "VPE loader: .text+0x%x "
"type 0x%x r_info 0x%x\n", "relocation type %s for symbol \"%s\" failed\n",
(unsigned int)location, strtab + sym->st_name, v, rel[i].r_offset, r ? r : "UNKNOWN",
r_info, ELF32_R_TYPE(r_info)); strtab + sym->st_name);
}
if (res)
return res; return res;
}
} }
return 0; return 0;
...@@ -576,7 +584,7 @@ void save_gp_address(unsigned int secbase, unsigned int rel) ...@@ -576,7 +584,7 @@ void save_gp_address(unsigned int secbase, unsigned int rel)
/* Change all symbols so that sh_value encodes the pointer directly. */ /* Change all symbols so that sh_value encodes the pointer directly. */
static int simplify_symbols(Elf_Shdr * sechdrs, static void simplify_symbols(Elf_Shdr * sechdrs,
unsigned int symindex, unsigned int symindex,
const char *strtab, const char *strtab,
const char *secstrings, const char *secstrings,
...@@ -585,18 +593,21 @@ static int simplify_symbols(Elf_Shdr * sechdrs, ...@@ -585,18 +593,21 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
unsigned long secbase, bssbase = 0; unsigned long secbase, bssbase = 0;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
int ret = 0, size; int size;
/* find the .bss section for COMMON symbols */ /* find the .bss section for COMMON symbols */
for (i = 0; i < nsecs; i++) { for (i = 0; i < nsecs; i++) {
if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
bssbase = sechdrs[i].sh_addr; bssbase = sechdrs[i].sh_addr;
break;
}
} }
for (i = 1; i < n; i++) { for (i = 1; i < n; i++) {
switch (sym[i].st_shndx) { switch (sym[i].st_shndx) {
case SHN_COMMON: case SHN_COMMON:
/* Allocate space for the symbol in the .bss section. st_value is currently size. /* Allocate space for the symbol in the .bss section.
st_value is currently size.
We want it to have the address of the symbol. */ We want it to have the address of the symbol. */
size = sym[i].st_value; size = sym[i].st_value;
...@@ -614,11 +625,9 @@ static int simplify_symbols(Elf_Shdr * sechdrs, ...@@ -614,11 +625,9 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
break; break;
case SHN_MIPS_SCOMMON: case SHN_MIPS_SCOMMON:
printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON"
printk(KERN_DEBUG "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
"simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", sym[i].st_shndx);
strtab + sym[i].st_name, sym[i].st_shndx);
// .sbss section // .sbss section
break; break;
...@@ -632,10 +641,7 @@ static int simplify_symbols(Elf_Shdr * sechdrs, ...@@ -632,10 +641,7 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
sym[i].st_value += secbase; sym[i].st_value += secbase;
break; break;
} }
} }
return ret;
} }
#ifdef DEBUG_ELFLOADER #ifdef DEBUG_ELFLOADER
...@@ -655,9 +661,26 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, ...@@ -655,9 +661,26 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
static void dump_tc(struct tc *t) static void dump_tc(struct tc *t)
{ {
printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n", unsigned long val;
t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt());
printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart()); settc(t->index);
printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld "
"TCStatus 0x%lx halt 0x%lx\n",
t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC,
read_tc_c0_tcstatus(), read_tc_c0_tchalt());
printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart());
printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind());
val = read_c0_vpeconf0();
printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val,
(val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT);
printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status());
printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause());
printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr());
printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc());
} }
static void dump_tclist(void) static void dump_tclist(void)
...@@ -672,96 +695,108 @@ static void dump_tclist(void) ...@@ -672,96 +695,108 @@ static void dump_tclist(void)
/* We are prepared so configure and start the VPE... */ /* We are prepared so configure and start the VPE... */
int vpe_run(struct vpe * v) int vpe_run(struct vpe * v)
{ {
unsigned long val; struct vpe_notifications *n;
unsigned long val, dmt_flag;
struct tc *t; struct tc *t;
/* check we are the Master VPE */ /* check we are the Master VPE */
val = read_c0_vpeconf0(); val = read_c0_vpeconf0();
if (!(val & VPECONF0_MVP)) { if (!(val & VPECONF0_MVP)) {
printk(KERN_WARNING printk(KERN_WARNING
"VPE: only Master VPE's are allowed to configure MT\n"); "VPE loader: only Master VPE's are allowed to configure MT\n");
return -1; return -1;
} }
/* disable MT (using dvpe) */ /* disable MT (using dvpe) */
dvpe(); dvpe();
if (!list_empty(&v->tc)) {
if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
t->index);
return -ENOEXEC;
}
} else {
printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
v->minor);
return -ENOEXEC;
}
/* Put MVPE's into 'configuration state' */ /* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC); set_c0_mvpcontrol(MVPCONTROL_VPC);
if (!list_empty(&v->tc)) {
if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
printk(KERN_WARNING "VPE: TC %d is already in use.\n",
t->index);
return -ENOEXEC;
}
} else {
printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n",
v->minor);
return -ENOEXEC;
}
settc(t->index); settc(t->index);
val = read_vpe_c0_vpeconf0();
/* should check it is halted, and not activated */ /* should check it is halted, and not activated */
if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
printk(KERN_WARNING "VPE: TC %d is already doing something!\n", printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n",
t->index); t->index);
dump_tclist(); dump_tclist();
return -ENOEXEC; return -ENOEXEC;
} }
/*
* Disable multi-threaded execution whilst we activate, clear the
* halt bit and bound the tc to the other VPE...
*/
dmt_flag = dmt();
/* Write the address we want it to start running from in the TCPC register. */ /* Write the address we want it to start running from in the TCPC register. */
write_tc_c0_tcrestart((unsigned long)v->__start); write_tc_c0_tcrestart((unsigned long)v->__start);
/* write the sivc_info address to tccontext */
write_tc_c0_tccontext((unsigned long)0); write_tc_c0_tccontext((unsigned long)0);
/*
/* Set up the XTC bit in vpeconf0 to point at our tc */ * Mark the TC as activated, not interrupt exempt and not dynamically
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT)); * allocatable
*/
/* mark the TC as activated, not interrupt exempt and not dynamically allocatable */
val = read_tc_c0_tcstatus(); val = read_tc_c0_tcstatus();
val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
write_tc_c0_tcstatus(val); write_tc_c0_tcstatus(val);
write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
/* set up VPE1 */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE
/* /*
* The sde-kit passes 'memsize' to __start in $a3, so set something * The sde-kit passes 'memsize' to __start in $a3, so set something
* here... * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
* Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and
* DFLT_HEAP_SIZE when you compile your program * DFLT_HEAP_SIZE when you compile your program
*/ */
mttgpr(7, physical_memsize);
/* set up VPE1 */
/*
* bind the TC to VPE 1 as late as possible so we only have the final
* VPE registers to set up, and so an EJTAG probe can trigger on it
*/
write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor);
mttgpr(7, 0); /* Set up the XTC bit in vpeconf0 to point at our tc */
write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
| (t->index << VPECONF0_XTC_SHIFT));
/* set config to be the same as vpe0, particularly kseg0 coherency alg */ /* enable this VPE */
write_vpe_c0_config(read_c0_config()); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* clear out any left overs from a previous program */ /* clear out any left overs from a previous program */
write_vpe_c0_status(0);
write_vpe_c0_cause(0); write_vpe_c0_cause(0);
/* take system out of configuration state */ /* take system out of configuration state */
clear_c0_mvpcontrol(MVPCONTROL_VPC); clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */ /* now safe to re-enable multi-threading */
write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL)); emt(dmt_flag);
/* set it running */ /* set it running */
evpe(EVPE_ENABLE); evpe(EVPE_ENABLE);
list_for_each_entry(n, &v->notify, list) {
n->start(v->minor);
}
return 0; return 0;
} }
static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
unsigned int symindex, const char *strtab, unsigned int symindex, const char *strtab,
struct module *mod) struct module *mod)
{ {
...@@ -778,26 +813,28 @@ static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, ...@@ -778,26 +813,28 @@ static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
} }
} }
if ( (v->__start == 0) || (v->shared_ptr == NULL))
return -1;
return 0; return 0;
} }
/* /*
* Allocates a VPE with some program code space(the load address), copies * Allocates a VPE with some program code space(the load address), copies the
* the contents of the program (p)buffer performing relocatations/etc, * contents of the program (p)buffer performing relocatations/etc, free's it
* free's it when finished. * when finished.
*/ */
int vpe_elfload(struct vpe * v) int vpe_elfload(struct vpe * v)
{ {
Elf_Ehdr *hdr; Elf_Ehdr *hdr;
Elf_Shdr *sechdrs; Elf_Shdr *sechdrs;
long err = 0; long err = 0;
char *secstrings, *strtab = NULL; char *secstrings, *strtab = NULL;
unsigned int len, i, symindex = 0, strindex = 0; unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
struct module mod; // so we can re-use the relocations code struct module mod; // so we can re-use the relocations code
memset(&mod, 0, sizeof(struct module)); memset(&mod, 0, sizeof(struct module));
strcpy(mod.name, "VPE dummy prog module"); strcpy(mod.name, "VPE loader");
hdr = (Elf_Ehdr *) v->pbuffer; hdr = (Elf_Ehdr *) v->pbuffer;
len = v->plen; len = v->plen;
...@@ -805,16 +842,22 @@ int vpe_elfload(struct vpe * v) ...@@ -805,16 +842,22 @@ int vpe_elfload(struct vpe * v)
/* Sanity checks against insmoding binaries or wrong arch, /* Sanity checks against insmoding binaries or wrong arch,
weird elf version */ weird elf version */
if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
|| hdr->e_type != ET_REL || !elf_check_arch(hdr) || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
|| !elf_check_arch(hdr)
|| hdr->e_shentsize != sizeof(*sechdrs)) { || hdr->e_shentsize != sizeof(*sechdrs)) {
printk(KERN_WARNING printk(KERN_WARNING
"VPE program, wrong arch or weird elf version\n"); "VPE loader: program wrong arch or weird elf version\n");
return -ENOEXEC; return -ENOEXEC;
} }
if (hdr->e_type == ET_REL)
relocate = 1;
if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
printk(KERN_ERR "VPE program length %u truncated\n", len); printk(KERN_ERR "VPE loader: program length %u truncated\n",
len);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -826,82 +869,126 @@ int vpe_elfload(struct vpe * v) ...@@ -826,82 +869,126 @@ int vpe_elfload(struct vpe * v)
/* And these should exist, but gcc whinges if we don't init them */ /* And these should exist, but gcc whinges if we don't init them */
symindex = strindex = 0; symindex = strindex = 0;
for (i = 1; i < hdr->e_shnum; i++) { if (relocate) {
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_NOBITS if (sechdrs[i].sh_type != SHT_NOBITS
&& len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
printk(KERN_ERR "VPE program length %u truncated\n", printk(KERN_ERR "VPE program length %u truncated\n",
len); len);
return -ENOEXEC; return -ENOEXEC;
} }
/* Mark all sections sh_addr with their address in the /* Mark all sections sh_addr with their address in the
temporary image. */ temporary image. */
sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
/* Internal symbols and strings. */ /* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) { if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i; symindex = i;
strindex = sechdrs[i].sh_link; strindex = sechdrs[i].sh_link;
strtab = (char *)hdr + sechdrs[strindex].sh_offset; strtab = (char *)hdr + sechdrs[strindex].sh_offset;
}
} }
layout_sections(&mod, hdr, sechdrs, secstrings);
} }
layout_sections(&mod, hdr, sechdrs, secstrings);
v->load_addr = alloc_progmem(mod.core_size); v->load_addr = alloc_progmem(mod.core_size);
memset(v->load_addr, 0, mod.core_size); memset(v->load_addr, 0, mod.core_size);
printk("VPE elf_loader: loading to %p\n", v->load_addr); printk("VPE loader: loading to %p\n", v->load_addr);
for (i = 0; i < hdr->e_shnum; i++) { if (relocate) {
void *dest; for (i = 0; i < hdr->e_shnum; i++) {
void *dest;
if (!(sechdrs[i].sh_flags & SHF_ALLOC)) if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue; continue;
dest = v->load_addr + sechdrs[i].sh_entsize; dest = v->load_addr + sechdrs[i].sh_entsize;
if (sechdrs[i].sh_type != SHT_NOBITS) if (sechdrs[i].sh_type != SHT_NOBITS)
memcpy(dest, (void *)sechdrs[i].sh_addr, memcpy(dest, (void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size); sechdrs[i].sh_size);
/* Update sh_addr to point to copy in image. */ /* Update sh_addr to point to copy in image. */
sechdrs[i].sh_addr = (unsigned long)dest; sechdrs[i].sh_addr = (unsigned long)dest;
}
/* Fix up syms, so that st_value is a pointer to location. */ printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
err = secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
simplify_symbols(sechdrs, symindex, strtab, secstrings, }
hdr->e_shnum, &mod);
if (err < 0) {
printk(KERN_WARNING "VPE: unable to simplify symbols\n");
goto cleanup;
}
/* Now do relocations. */ /* Fix up syms, so that st_value is a pointer to location. */
for (i = 1; i < hdr->e_shnum; i++) { simplify_symbols(sechdrs, symindex, strtab, secstrings,
const char *strtab = (char *)sechdrs[strindex].sh_addr; hdr->e_shnum, &mod);
unsigned int info = sechdrs[i].sh_info;
/* Now do relocations. */
/* Not a valid relocation section? */ for (i = 1; i < hdr->e_shnum; i++) {
if (info >= hdr->e_shnum) const char *strtab = (char *)sechdrs[strindex].sh_addr;
continue; unsigned int info = sechdrs[i].sh_info;
/* Don't bother with non-allocated sections */ /* Not a valid relocation section? */
if (!(sechdrs[info].sh_flags & SHF_ALLOC)) if (info >= hdr->e_shnum)
continue; continue;
if (sechdrs[i].sh_type == SHT_REL) /* Don't bother with non-allocated sections */
err = if (!(sechdrs[info].sh_flags & SHF_ALLOC))
apply_relocations(sechdrs, strtab, symindex, i, &mod); continue;
else if (sechdrs[i].sh_type == SHT_RELA)
err = apply_relocate_add(sechdrs, strtab, symindex, i, if (sechdrs[i].sh_type == SHT_REL)
&mod); err = apply_relocations(sechdrs, strtab, symindex, i,
if (err < 0) { &mod);
printk(KERN_WARNING else if (sechdrs[i].sh_type == SHT_RELA)
"vpe_elfload: error in relocations err %ld\n", err = apply_relocate_add(sechdrs, strtab, symindex, i,
err); &mod);
goto cleanup; if (err < 0)
return err;
}
} else {
for (i = 0; i < hdr->e_shnum; i++) {
/* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
strindex = sechdrs[i].sh_link;
strtab = (char *)hdr + sechdrs[strindex].sh_offset;
/* mark the symtab's address for when we try to find the
magic symbols */
sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
}
/* filter sections we dont want in the final image */
if (!(sechdrs[i].sh_flags & SHF_ALLOC) ||
(sechdrs[i].sh_type == SHT_MIPS_REGINFO)) {
printk( KERN_DEBUG " ignoring section, "
"name %s type %x address 0x%x \n",
secstrings + sechdrs[i].sh_name,
sechdrs[i].sh_type, sechdrs[i].sh_addr);
continue;
}
if (sechdrs[i].sh_addr < (unsigned int)v->load_addr) {
printk( KERN_WARNING "VPE loader: "
"fully linked image has invalid section, "
"name %s type %x address 0x%x, before load "
"address of 0x%x\n",
secstrings + sechdrs[i].sh_name,
sechdrs[i].sh_type, sechdrs[i].sh_addr,
(unsigned int)v->load_addr);
return -ENOEXEC;
}
printk(KERN_DEBUG " copying section sh_name %s, sh_addr 0x%x "
"size 0x%x0 from x%p\n",
secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr,
sechdrs[i].sh_size, hdr + sechdrs[i].sh_offset);
if (sechdrs[i].sh_type != SHT_NOBITS)
memcpy((void *)sechdrs[i].sh_addr,
(char *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size);
else
memset((void *)sechdrs[i].sh_addr, 0, sechdrs[i].sh_size);
} }
} }
...@@ -910,71 +997,104 @@ int vpe_elfload(struct vpe * v) ...@@ -910,71 +997,104 @@ int vpe_elfload(struct vpe * v)
(unsigned long)v->load_addr + v->len); (unsigned long)v->load_addr + v->len);
if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
if (v->__start == 0) {
printk(KERN_WARNING "VPE loader: program does not contain "
"a __start symbol\n");
return -ENOEXEC;
}
printk(KERN_WARNING if (v->shared_ptr == NULL)
"VPE: program doesn't contain __start or vpe_shared symbols\n"); printk(KERN_WARNING "VPE loader: "
err = -ENOEXEC; "program does not contain vpe_shared symbol.\n"
" Unable to use AMVP (AP/SP) facilities.\n");
} }
printk(" elf loaded\n"); printk(" elf loaded\n");
return 0;
cleanup:
return err;
} }
static void dump_vpe(struct vpe * v) __attribute_used__ void dump_vpe(struct vpe * v)
{ {
struct tc *t; struct tc *t;
settc(v->minor);
printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol());
printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0());
list_for_each_entry(t, &vpecontrol.tc_list, list) { list_for_each_entry(t, &vpecontrol.tc_list, list)
dump_tc(t); dump_tc(t);
}
} }
/* checks for VPE is unused and gets ready to load program */ static void cleanup_tc(struct tc *tc)
{
int tmp;
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(tc->index);
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
/* bind it to anything other than VPE1 */
write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
clear_c0_mvpcontrol(MVPCONTROL_VPC);
}
static int getcwd(char *buff, int size)
{
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_getcwd(buff,size);
set_fs(old_fs);
return ret;
}
/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp) static int vpe_open(struct inode *inode, struct file *filp)
{ {
int minor; int minor, ret;
struct vpe *v; struct vpe *v;
struct vpe_notifications *not;
/* assume only 1 device at the mo. */ /* assume only 1 device at the mo. */
if ((minor = MINOR(inode->i_rdev)) != 1) { if ((minor = MINOR(inode->i_rdev)) != 1) {
printk(KERN_WARNING "VPE: only vpe1 is supported\n"); printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
return -ENODEV; return -ENODEV;
} }
if ((v = get_vpe(minor)) == NULL) { if ((v = get_vpe(minor)) == NULL) {
printk(KERN_WARNING "VPE: unable to get vpe\n"); printk(KERN_WARNING "VPE loader: unable to get vpe\n");
return -ENODEV; return -ENODEV;
} }
if (v->state != VPE_STATE_UNUSED) { if (v->state != VPE_STATE_UNUSED) {
unsigned long tmp;
struct tc *t;
printk(KERN_WARNING "VPE: device %d already in use\n", minor);
dvpe(); dvpe();
dump_vpe(v);
printk(KERN_WARNING "VPE: re-initialising %d\n", minor);
release_progmem(v->load_addr);
t = get_tc(minor); printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
settc(minor);
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */ dump_tc(get_tc(minor));
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H); list_for_each_entry(not, &v->notify, list) {
not->stop(minor);
}
release_progmem(v->load_addr);
cleanup_tc(get_tc(minor));
} }
// allocate it so when we get write ops we know it's expected. // allocate it so when we get write ops we know it's expected.
...@@ -986,6 +1106,24 @@ static int vpe_open(struct inode *inode, struct file *filp) ...@@ -986,6 +1106,24 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->load_addr = NULL; v->load_addr = NULL;
v->len = 0; v->len = 0;
v->uid = filp->f_uid;
v->gid = filp->f_gid;
#ifdef CONFIG_MIPS_APSP_KSPD
/* get kspd to tell us when a syscall_exit happens */
if (!kspd_events_reqd) {
kspd_notify(&kspd_events);
kspd_events_reqd++;
}
#endif
v->cwd[0] = 0;
ret = getcwd(v->cwd, VPE_PATH_MAX);
if (ret < 0)
printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
v->shared_ptr = NULL;
v->__start = 0;
return 0; return 0;
} }
...@@ -1006,14 +1144,22 @@ static int vpe_release(struct inode *inode, struct file *filp) ...@@ -1006,14 +1144,22 @@ static int vpe_release(struct inode *inode, struct file *filp)
if (vpe_elfload(v) >= 0) if (vpe_elfload(v) >= 0)
vpe_run(v); vpe_run(v);
else { else {
printk(KERN_WARNING "VPE: ELF load failed.\n"); printk(KERN_WARNING "VPE loader: ELF load failed.\n");
ret = -ENOEXEC; ret = -ENOEXEC;
} }
} else { } else {
printk(KERN_WARNING "VPE: only elf files are supported\n"); printk(KERN_WARNING "VPE loader: only elf files are supported\n");
ret = -ENOEXEC; ret = -ENOEXEC;
} }
/* It's good to be able to run the SP and if it chokes have a look at
the /dev/rt?. But if we reset the pointer to the shared struct we
loose what has happened. So perhaps if garbage is sent to the vpe
device, use it as a trigger for the reset. Hopefully a nice
executable will be along shortly. */
if (ret < 0)
v->shared_ptr = NULL;
// cleanup any temp buffers // cleanup any temp buffers
if (v->pbuffer) if (v->pbuffer)
vfree(v->pbuffer); vfree(v->pbuffer);
...@@ -1033,21 +1179,19 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer, ...@@ -1033,21 +1179,19 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
return -ENODEV; return -ENODEV;
if (v->pbuffer == NULL) { if (v->pbuffer == NULL) {
printk(KERN_ERR "vpe_write: no pbuffer\n"); printk(KERN_ERR "VPE loader: no buffer for program\n");
return -ENOMEM; return -ENOMEM;
} }
if ((count + v->len) > v->plen) { if ((count + v->len) > v->plen) {
printk(KERN_WARNING printk(KERN_WARNING
"VPE Loader: elf size too big. Perhaps strip uneeded symbols\n"); "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
return -ENOMEM; return -ENOMEM;
} }
count -= copy_from_user(v->pbuffer + v->len, buffer, count); count -= copy_from_user(v->pbuffer + v->len, buffer, count);
if (!count) { if (!count)
printk("vpe_write: copy_to_user failed\n");
return -EFAULT; return -EFAULT;
}
v->len += count; v->len += count;
return ret; return ret;
...@@ -1149,16 +1293,70 @@ void *vpe_get_shared(int index) ...@@ -1149,16 +1293,70 @@ void *vpe_get_shared(int index)
{ {
struct vpe *v; struct vpe *v;
if ((v = get_vpe(index)) == NULL) { if ((v = get_vpe(index)) == NULL)
printk(KERN_WARNING "vpe: invalid vpe index %d\n", index);
return NULL; return NULL;
}
return v->shared_ptr; return v->shared_ptr;
} }
EXPORT_SYMBOL(vpe_get_shared); EXPORT_SYMBOL(vpe_get_shared);
int vpe_getuid(int index)
{
struct vpe *v;
if ((v = get_vpe(index)) == NULL)
return -1;
return v->uid;
}
EXPORT_SYMBOL(vpe_getuid);
int vpe_getgid(int index)
{
struct vpe *v;
if ((v = get_vpe(index)) == NULL)
return -1;
return v->gid;
}
EXPORT_SYMBOL(vpe_getgid);
int vpe_notify(int index, struct vpe_notifications *notify)
{
struct vpe *v;
if ((v = get_vpe(index)) == NULL)
return -1;
list_add(&notify->list, &v->notify);
return 0;
}
EXPORT_SYMBOL(vpe_notify);
char *vpe_getcwd(int index)
{
struct vpe *v;
if ((v = get_vpe(index)) == NULL)
return NULL;
return v->cwd;
}
EXPORT_SYMBOL(vpe_getcwd);
#ifdef CONFIG_MIPS_APSP_KSPD
static void kspd_sp_exit( int sp_id)
{
cleanup_tc(get_tc(sp_id));
}
#endif
static int __init vpe_module_init(void) static int __init vpe_module_init(void)
{ {
struct vpe *v = NULL; struct vpe *v = NULL;
...@@ -1201,7 +1399,8 @@ static int __init vpe_module_init(void) ...@@ -1201,7 +1399,8 @@ static int __init vpe_module_init(void)
return -ENODEV; return -ENODEV;
} }
list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */ /* add the tc to the list of this vpe's tc's. */
list_add(&t->tc, &v->tc);
/* deactivate all but vpe0 */ /* deactivate all but vpe0 */
if (i != 0) { if (i != 0) {
...@@ -1222,10 +1421,12 @@ static int __init vpe_module_init(void) ...@@ -1222,10 +1421,12 @@ static int __init vpe_module_init(void)
~(ST0_IM | ST0_IE | ST0_KSU)) ~(ST0_IM | ST0_IE | ST0_KSU))
| ST0_CU0); | ST0_CU0);
/* set config to be the same as vpe0, particularly kseg0 coherency alg */ /*
* Set config to be the same as vpe0,
* particularly kseg0 coherency alg
*/
write_vpe_c0_config(read_c0_config()); write_vpe_c0_config(read_c0_config());
} }
} }
/* TC's */ /* TC's */
...@@ -1234,23 +1435,28 @@ static int __init vpe_module_init(void) ...@@ -1234,23 +1435,28 @@ static int __init vpe_module_init(void)
if (i != 0) { if (i != 0) {
unsigned long tmp; unsigned long tmp;
/* tc 0 will of course be running.... */
if (i == 0)
t->state = TC_STATE_RUNNING;
settc(i); settc(i);
/* bind a TC to each VPE, May as well put all excess TC's /* Any TC that is bound to VPE0 gets left as is - in case
on the last VPE */ we are running SMTC on VPE0. A TC that is bound to any
if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1)) other VPE gets bound to VPE0, ideally I'd like to make
write_tc_c0_tcbind(read_tc_c0_tcbind() | it homeless but it doesn't appear to let me bind a TC
((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); to a non-existent VPE. Which is perfectly reasonable.
else
write_tc_c0_tcbind(read_tc_c0_tcbind() | i); The (un)bound state is visible to an EJTAG probe so may
notify GDB...
*/
if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
/* tc is bound >vpe0 */
write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
t->pvpe = get_vpe(0); /* set the parent vpe */
}
tmp = read_tc_c0_tcstatus(); tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */ /* mark not activated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA); tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp); write_tc_c0_tcstatus(tmp);
...@@ -1262,6 +1468,9 @@ static int __init vpe_module_init(void) ...@@ -1262,6 +1468,9 @@ static int __init vpe_module_init(void)
/* release config state */ /* release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC); clear_c0_mvpcontrol(MVPCONTROL_VPC);
#ifdef CONFIG_MIPS_APSP_KSPD
kspd_events.kspd_sp_exit = kspd_sp_exit;
#endif
return 0; return 0;
} }
...@@ -1281,5 +1490,5 @@ static void __exit vpe_module_exit(void) ...@@ -1281,5 +1490,5 @@ static void __exit vpe_module_exit(void)
module_init(vpe_module_init); module_init(vpe_module_init);
module_exit(vpe_module_exit); module_exit(vpe_module_exit);
MODULE_DESCRIPTION("MIPS VPE Loader"); MODULE_DESCRIPTION("MIPS VPE Loader");
MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
/*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#ifndef _ASM_KSPD_H
#define _ASM_KSPD_H
struct kspd_notifications {
void (*kspd_sp_exit)(int sp_id);
struct list_head list;
};
#ifdef CONFIG_MIPS_APSP_KSPD
extern void kspd_notify(struct kspd_notifications *notify);
#else
static inline void kspd_notify(struct kspd_notifications *notify)
{
}
#endif
#endif
...@@ -234,7 +234,7 @@ static inline void __raw_emt(void) ...@@ -234,7 +234,7 @@ static inline void __raw_emt(void)
__asm__ __volatile__( __asm__ __volatile__(
" .set noreorder \n" " .set noreorder \n"
" .set mips32r2 \n" " .set mips32r2 \n"
" emt \n" " .word 0x41600be1 # emt \n"
" ehb \n" " ehb \n"
" .set mips0 \n" " .set mips0 \n"
" .set reorder"); " .set reorder");
...@@ -364,6 +364,9 @@ do { \ ...@@ -364,6 +364,9 @@ do { \
#define read_vpe_c0_ebase() mftc0(15,1) #define read_vpe_c0_ebase() mftc0(15,1)
#define write_vpe_c0_ebase(val) mttc0(15, 1, val) #define write_vpe_c0_ebase(val) mttc0(15, 1, val)
#define write_vpe_c0_compare(val) mttc0(11, 0, val) #define write_vpe_c0_compare(val) mttc0(11, 0, val)
#define read_vpe_c0_badvaddr() mftc0(8, 0)
#define read_vpe_c0_epc() mftc0(14, 0)
#define write_vpe_c0_epc(val) mttc0(14, 0, val)
/* TC */ /* TC */
......
...@@ -3,32 +3,46 @@ ...@@ -3,32 +3,46 @@
* *
*/ */
#ifndef _RTLX_H #ifndef __ASM_RTLX_H
#define _RTLX_H_ #define __ASM_RTLX_H_
#define LX_NODE_BASE 10 #define LX_NODE_BASE 10
#define MIPSCPU_INT_BASE 16 #define MIPSCPU_INT_BASE 16
#define MIPS_CPU_RTLX_IRQ 0 #define MIPS_CPU_RTLX_IRQ 0
#define RTLX_VERSION 1 #define RTLX_VERSION 2
#define RTLX_xID 0x12345600 #define RTLX_xID 0x12345600
#define RTLX_ID (RTLX_xID | RTLX_VERSION) #define RTLX_ID (RTLX_xID | RTLX_VERSION)
#define RTLX_CHANNELS 8 #define RTLX_CHANNELS 8
#define RTLX_BUFFER_SIZE 1024 #define RTLX_CHANNEL_STDIO 0
#define RTLX_CHANNEL_DBG 1
#define RTLX_CHANNEL_SYSIO 2
/* extern int rtlx_open(int index, int can_sleep);
* lx_state bits extern int rtlx_release(int index);
*/ extern ssize_t rtlx_read(int index, void *buff, size_t count, int user);
#define RTLX_STATE_OPENED 1UL extern ssize_t rtlx_write(int index, void *buffer, size_t count, int user);
extern unsigned int rtlx_read_poll(int index, int can_sleep);
extern unsigned int rtlx_write_poll(int index);
enum rtlx_state {
RTLX_STATE_UNUSED,
RTLX_STATE_INITIALISED,
RTLX_STATE_REMOTE_READY,
RTLX_STATE_OPENED
};
#define RTLX_BUFFER_SIZE 1024
/* each channel supports read and write. /* each channel supports read and write.
linux (vpe0) reads lx_buffer and writes rt_buffer linux (vpe0) reads lx_buffer and writes rt_buffer
SP (vpe1) reads rt_buffer and writes lx_buffer SP (vpe1) reads rt_buffer and writes lx_buffer
*/ */
struct rtlx_channel { struct rtlx_channel {
unsigned long lx_state; enum rtlx_state rt_state;
enum rtlx_state lx_state;
int buffer_size; int buffer_size;
...@@ -38,15 +52,13 @@ struct rtlx_channel { ...@@ -38,15 +52,13 @@ struct rtlx_channel {
int lx_write, lx_read; int lx_write, lx_read;
char *lx_buffer; char *lx_buffer;
void *queues;
}; };
struct rtlx_info { struct rtlx_info {
unsigned long id; unsigned long id;
enum rtlx_state state;
struct rtlx_channel channel[RTLX_CHANNELS]; struct rtlx_channel channel[RTLX_CHANNELS];
}; };
#endif /* _RTLX_H_ */ #endif /* __ASM_RTLX_H_ */
/*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#ifndef _ASM_VPE_H
#define _ASM_VPE_H
struct vpe_notifications {
void (*start)(int vpe);
void (*stop)(int vpe);
struct list_head list;
};
extern int vpe_notify(int index, struct vpe_notifications *notify);
extern void *vpe_get_shared(int index);
extern int vpe_getuid(int index);
extern int vpe_getgid(int index);
extern char *vpe_getcwd(int index);
#endif /* _ASM_VPE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment