Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
07cc0c9e
Commit
07cc0c9e
authored
Jul 27, 2007
by
Ralf Baechle
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MIPS] MT: Enable coexistence of AP/SP with VSMP and SMTC.
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
c3a005f4
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
203 additions
and
169 deletions
+203
-169
arch/mips/Kconfig
arch/mips/Kconfig
+12
-12
arch/mips/kernel/kspd.c
arch/mips/kernel/kspd.c
+9
-10
arch/mips/kernel/mips-mt.c
arch/mips/kernel/mips-mt.c
+22
-0
arch/mips/kernel/rtlx.c
arch/mips/kernel/rtlx.c
+17
-5
arch/mips/kernel/smtc.c
arch/mips/kernel/smtc.c
+0
-16
arch/mips/kernel/vpe.c
arch/mips/kernel/vpe.c
+137
-126
include/asm-mips/mips_mt.h
include/asm-mips/mips_mt.h
+6
-0
No files found.
arch/mips/Kconfig
View file @
07cc0c9e
...
...
@@ -1377,17 +1377,6 @@ config MIPS_MT_SMTC
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on SYS_SUPPORTS_MULTITHREADING
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_SRS
select MIPS_MT
help
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
endchoice
config MIPS_MT
...
...
@@ -1398,8 +1387,19 @@ config SYS_SUPPORTS_MULTITHREADING
config MIPS_MT_FPAFF
bool "Dynamic FPU affinity for FP-intensive threads"
depends on MIPS_MT
default y
depends on MIPS_MT_SMP || MIPS_MT_SMTC
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on SYS_SUPPORTS_MULTITHREADING
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_SRS
select MIPS_MT
help
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
config MIPS_MT_SMTC_INSTANT_REPLAY
bool "Low-latency Dispatch of Deferred SMTC IPIs"
...
...
arch/mips/kernel/kspd.c
View file @
07cc0c9e
...
...
@@ -89,7 +89,7 @@ static int sp_stopping = 0;
#define MTSP_O_EXCL 0x0800
#define MTSP_O_BINARY 0x8000
#define SP_VPE 1
extern
int
tclimit
;
struct
apsp_table
{
int
sp
;
...
...
@@ -225,8 +225,8 @@ void sp_work_handle_request(void)
/* Run the syscall at the priviledge of the user who loaded the
SP program */
if
(
vpe_getuid
(
SP_VPE
))
sp_setfsuidgid
(
vpe_getuid
(
SP_VPE
),
vpe_getgid
(
SP_VPE
));
if
(
vpe_getuid
(
tclimit
))
sp_setfsuidgid
(
vpe_getuid
(
tclimit
),
vpe_getgid
(
tclimit
));
switch
(
sc
.
cmd
)
{
/* needs the flags argument translating from SDE kit to
...
...
@@ -245,7 +245,7 @@ void sp_work_handle_request(void)
case
MTSP_SYSCALL_EXIT
:
list_for_each_entry
(
n
,
&
kspd_notifylist
,
list
)
n
->
kspd_sp_exit
(
SP_VPE
);
n
->
kspd_sp_exit
(
tclimit
);
sp_stopping
=
1
;
printk
(
KERN_DEBUG
"KSPD got exit syscall from SP exitcode %d
\n
"
,
...
...
@@ -255,7 +255,7 @@ void sp_work_handle_request(void)
case
MTSP_SYSCALL_OPEN
:
generic
.
arg1
=
translate_open_flags
(
generic
.
arg1
);
vcwd
=
vpe_getcwd
(
SP_VPE
);
vcwd
=
vpe_getcwd
(
tclimit
);
/* change to the cwd of the process that loaded the SP program */
old_fs
=
get_fs
();
...
...
@@ -283,7 +283,7 @@ void sp_work_handle_request(void)
break
;
}
/* switch */
if
(
vpe_getuid
(
SP_VPE
))
if
(
vpe_getuid
(
tclimit
))
sp_setfsuidgid
(
0
,
0
);
old_fs
=
get_fs
();
...
...
@@ -364,10 +364,9 @@ static void startwork(int vpe)
}
INIT_WORK
(
&
work
,
sp_work
);
queue_work
(
workqueue
,
&
work
);
}
else
queue_work
(
workqueue
,
&
work
);
}
queue_work
(
workqueue
,
&
work
);
}
static
void
stopwork
(
int
vpe
)
...
...
@@ -389,7 +388,7 @@ static int kspd_module_init(void)
notify
.
start
=
startwork
;
notify
.
stop
=
stopwork
;
vpe_notify
(
SP_VPE
,
&
notify
);
vpe_notify
(
tclimit
,
&
notify
);
return
0
;
}
...
...
arch/mips/kernel/mips-mt.c
View file @
07cc0c9e
...
...
@@ -21,6 +21,28 @@
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
int
vpelimit
;
static
int
__init
maxvpes
(
char
*
str
)
{
get_option
(
&
str
,
&
vpelimit
);
return
1
;
}
__setup
(
"maxvpes="
,
maxvpes
);
int
tclimit
;
static
int
__init
maxtcs
(
char
*
str
)
{
get_option
(
&
str
,
&
tclimit
);
return
1
;
}
__setup
(
"maxtcs="
,
maxtcs
);
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
...
...
arch/mips/kernel/rtlx.c
View file @
07cc0c9e
...
...
@@ -40,12 +40,11 @@
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/mips_mt.h>
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
#define RTLX_TARG_VPE 1
static
struct
rtlx_info
*
rtlx
;
static
int
major
;
static
char
module_name
[]
=
"rtlx"
;
...
...
@@ -165,10 +164,10 @@ int rtlx_open(int index, int can_sleep)
}
if
(
rtlx
==
NULL
)
{
if
(
(
p
=
vpe_get_shared
(
RTLX_TARG_VPE
))
==
NULL
)
{
if
(
(
p
=
vpe_get_shared
(
tclimit
))
==
NULL
)
{
if
(
can_sleep
)
{
__wait_event_interruptible
(
channel_wqs
[
index
].
lx_queue
,
(
p
=
vpe_get_shared
(
RTLX_TARG_VPE
)),
(
p
=
vpe_get_shared
(
tclimit
)),
ret
);
if
(
ret
)
goto
out_fail
;
...
...
@@ -477,6 +476,19 @@ static int rtlx_module_init(void)
struct
device
*
dev
;
int
i
,
err
;
if
(
!
cpu_has_mipsmt
)
{
printk
(
"VPE loader: not a MIPS MT capable processor
\n
"
);
return
-
ENODEV
;
}
if
(
tclimit
==
0
)
{
printk
(
KERN_WARNING
"No TCs reserved for AP/SP, not "
"initializing RTLX.
\n
Pass maxtcs=<n> argument as kernel "
"argument
\n
"
);
return
-
ENODEV
;
}
major
=
register_chrdev
(
0
,
module_name
,
&
rtlx_fops
);
if
(
major
<
0
)
{
printk
(
register_chrdev_failed
);
...
...
@@ -501,7 +513,7 @@ static int rtlx_module_init(void)
/* set up notifiers */
notify
.
start
=
starting
;
notify
.
stop
=
stopping
;
vpe_notify
(
RTLX_TARG_VPE
,
&
notify
);
vpe_notify
(
tclimit
,
&
notify
);
if
(
cpu_has_vint
)
set_vi_handler
(
MIPS_CPU_RTLX_IRQ
,
rtlx_dispatch
);
...
...
arch/mips/kernel/smtc.c
View file @
07cc0c9e
...
...
@@ -86,25 +86,11 @@ unsigned int smtc_status = 0;
/* Boot command line configuration overrides */
static
int
vpelimit
=
0
;
static
int
tclimit
=
0
;
static
int
ipibuffers
=
0
;
static
int
nostlb
=
0
;
static
int
asidmask
=
0
;
unsigned
long
smtc_asid_mask
=
0xff
;
static
int
__init
maxvpes
(
char
*
str
)
{
get_option
(
&
str
,
&
vpelimit
);
return
1
;
}
static
int
__init
maxtcs
(
char
*
str
)
{
get_option
(
&
str
,
&
tclimit
);
return
1
;
}
static
int
__init
ipibufs
(
char
*
str
)
{
get_option
(
&
str
,
&
ipibuffers
);
...
...
@@ -137,8 +123,6 @@ static int __init asidmask_set(char *str)
return
1
;
}
__setup
(
"maxvpes="
,
maxvpes
);
__setup
(
"maxtcs="
,
maxtcs
);
__setup
(
"ipibufs="
,
ipibufs
);
__setup
(
"nostlb"
,
stlb_disable
);
__setup
(
"asidmask="
,
asidmask_set
);
...
...
arch/mips/kernel/vpe.c
View file @
07cc0c9e
...
...
@@ -27,7 +27,6 @@
* To load and run, simply cat a SP 'program file' to /dev/vpe1.
* i.e cat spapp >/dev/vpe1.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
...
...
@@ -54,6 +53,7 @@
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/kspd.h>
#include <asm/mips_mt.h>
typedef
void
*
vpe_handle
;
...
...
@@ -132,14 +132,9 @@ struct tc {
enum
tc_state
state
;
int
index
;
/* parent VPE */
struct
vpe
*
pvpe
;
/* The list of TC's with this VPE */
struct
list_head
tc
;
/* The global list of tc's */
struct
list_head
list
;
struct
vpe
*
pvpe
;
/* parent VPE */
struct
list_head
tc
;
/* The list of TC's with this VPE */
struct
list_head
list
;
/* The global list of tc's */
};
struct
{
...
...
@@ -217,18 +212,17 @@ struct vpe *alloc_vpe(int minor)
/* allocate a tc. At startup only tc0 is running, all other can be halted. */
struct
tc
*
alloc_tc
(
int
index
)
{
struct
tc
*
t
;
if
((
t
=
kzalloc
(
sizeof
(
struct
tc
),
GFP_KERNEL
))
==
NULL
)
{
return
NULL
;
}
struct
tc
*
tc
;
INIT_LIST_HEAD
(
&
t
->
tc
);
list_add_tail
(
&
t
->
list
,
&
vpecontrol
.
tc_list
)
;
if
((
tc
=
kzalloc
(
sizeof
(
struct
tc
),
GFP_KERNEL
))
==
NULL
)
goto
out
;
t
->
index
=
index
;
INIT_LIST_HEAD
(
&
tc
->
tc
);
tc
->
index
=
index
;
list_add_tail
(
&
tc
->
list
,
&
vpecontrol
.
tc_list
);
return
t
;
out:
return
tc
;
}
/* clean up and free everything */
...
...
@@ -663,66 +657,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
}
#endif
static
void
dump_tc
(
struct
tc
*
t
)
{
unsigned
long
val
;
settc
(
t
->
index
);
printk
(
KERN_DEBUG
"VPE loader: TC index %d targtc %ld "
"TCStatus 0x%lx halt 0x%lx
\n
"
,
t
->
index
,
read_c0_vpecontrol
()
&
VPECONTROL_TARGTC
,
read_tc_c0_tcstatus
(),
read_tc_c0_tchalt
());
printk
(
KERN_DEBUG
" tcrestart 0x%lx
\n
"
,
read_tc_c0_tcrestart
());
printk
(
KERN_DEBUG
" tcbind 0x%lx
\n
"
,
read_tc_c0_tcbind
());
val
=
read_c0_vpeconf0
();
printk
(
KERN_DEBUG
" VPEConf0 0x%lx MVP %ld
\n
"
,
val
,
(
val
&
VPECONF0_MVP
)
>>
VPECONF0_MVP_SHIFT
);
printk
(
KERN_DEBUG
" c0 status 0x%lx
\n
"
,
read_vpe_c0_status
());
printk
(
KERN_DEBUG
" c0 cause 0x%lx
\n
"
,
read_vpe_c0_cause
());
printk
(
KERN_DEBUG
" c0 badvaddr 0x%lx
\n
"
,
read_vpe_c0_badvaddr
());
printk
(
KERN_DEBUG
" c0 epc 0x%lx
\n
"
,
read_vpe_c0_epc
());
}
static
void
dump_tclist
(
void
)
{
struct
tc
*
t
;
list_for_each_entry
(
t
,
&
vpecontrol
.
tc_list
,
list
)
{
dump_tc
(
t
);
}
}
/* We are prepared so configure and start the VPE... */
static
int
vpe_run
(
struct
vpe
*
v
)
{
unsigned
long
flags
,
val
,
dmt_flag
;
struct
vpe_notifications
*
n
;
unsigned
long
val
,
dmt_flag
;
unsigned
int
vpeflags
;
struct
tc
*
t
;
/* check we are the Master VPE */
local_irq_save
(
flags
);
val
=
read_c0_vpeconf0
();
if
(
!
(
val
&
VPECONF0_MVP
))
{
printk
(
KERN_WARNING
"VPE loader: only Master VPE's are allowed to configure MT
\n
"
);
local_irq_restore
(
flags
);
return
-
1
;
}
/* disable MT (using dvpe) */
dvpe
();
dmt_flag
=
dmt
();
vpeflags
=
dvpe
();
if
(
!
list_empty
(
&
v
->
tc
))
{
if
((
t
=
list_entry
(
v
->
tc
.
next
,
struct
tc
,
tc
))
==
NULL
)
{
printk
(
KERN_WARNING
"VPE loader: TC %d is already in use.
\n
"
,
t
->
index
);
evpe
(
vpeflags
);
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
printk
(
KERN_WARNING
"VPE loader: TC %d is already in use.
\n
"
,
t
->
index
);
return
-
ENOEXEC
;
}
}
else
{
printk
(
KERN_WARNING
"VPE loader: No TC's associated with VPE %d
\n
"
,
evpe
(
vpeflags
);
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
printk
(
KERN_WARNING
"VPE loader: No TC's associated with VPE %d
\n
"
,
v
->
minor
);
return
-
ENOEXEC
;
}
...
...
@@ -733,21 +709,20 @@ static int vpe_run(struct vpe * v)
/* should check it is halted, and not activated */
if
((
read_tc_c0_tcstatus
()
&
TCSTATUS_A
)
||
!
(
read_tc_c0_tchalt
()
&
TCHALT_H
))
{
printk
(
KERN_WARNING
"VPE loader: TC %d is already doing something!
\n
"
,
evpe
(
vpeflags
);
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
printk
(
KERN_WARNING
"VPE loader: TC %d is already active!
\n
"
,
t
->
index
);
dump_tclist
();
return
-
ENOEXEC
;
}
/*
* Disable multi-threaded execution whilst we activate, clear the
* halt bit and bound the tc to the other VPE...
*/
dmt_flag
=
dmt
();
/* Write the address we want it to start running from in the TCPC register. */
write_tc_c0_tcrestart
((
unsigned
long
)
v
->
__start
);
write_tc_c0_tccontext
((
unsigned
long
)
0
);
/*
* Mark the TC as activated, not interrupt exempt and not dynamically
* allocatable
...
...
@@ -763,15 +738,14 @@ static int vpe_run(struct vpe * v)
* here... Or set $a3 to zero and define DFLT_STACK_SIZE and
* DFLT_HEAP_SIZE when you compile your program
*/
mttgpr
(
7
,
physical_memsize
);
mttgpr
(
7
,
physical_memsize
);
/* set up VPE1 */
/*
* bind the TC to VPE 1 as late as possible so we only have the final
* VPE registers to set up, and so an EJTAG probe can trigger on it
*/
write_tc_c0_tcbind
((
read_tc_c0_tcbind
()
&
~
TCBIND_CURVPE
)
|
v
->
minor
);
write_tc_c0_tcbind
((
read_tc_c0_tcbind
()
&
~
TCBIND_CURVPE
)
|
1
);
write_vpe_c0_vpeconf0
(
read_vpe_c0_vpeconf0
()
&
~
(
VPECONF0_VPA
));
...
...
@@ -793,15 +767,16 @@ static int vpe_run(struct vpe * v)
/* take system out of configuration state */
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
/* now safe to re-enable multi-threading */
emt
(
dmt_flag
);
/* set it running */
#ifdef CONFIG_SMP
evpe
(
EVPE_ENABLE
);
#else
evpe
(
vpeflags
);
#endif
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
list_for_each_entry
(
n
,
&
v
->
notify
,
list
)
{
n
->
start
(
v
->
minor
);
}
list_for_each_entry
(
n
,
&
v
->
notify
,
list
)
n
->
start
(
minor
);
return
0
;
}
...
...
@@ -1023,23 +998,15 @@ static int vpe_elfload(struct vpe * v)
return
0
;
}
void
__used
dump_vpe
(
struct
vpe
*
v
)
{
struct
tc
*
t
;
settc
(
v
->
minor
);
printk
(
KERN_DEBUG
"VPEControl 0x%lx
\n
"
,
read_vpe_c0_vpecontrol
());
printk
(
KERN_DEBUG
"VPEConf0 0x%lx
\n
"
,
read_vpe_c0_vpeconf0
());
list_for_each_entry
(
t
,
&
vpecontrol
.
tc_list
,
list
)
dump_tc
(
t
);
}
static
void
cleanup_tc
(
struct
tc
*
tc
)
{
unsigned
long
flags
;
unsigned
int
mtflags
,
vpflags
;
int
tmp
;
local_irq_save
(
flags
);
mtflags
=
dmt
();
vpflags
=
dvpe
();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
...
...
@@ -1054,9 +1021,12 @@ static void cleanup_tc(struct tc *tc)
write_tc_c0_tchalt
(
TCHALT_H
);
/* bind it to anything other than VPE1 */
write_tc_c0_tcbind
(
read_tc_c0_tcbind
()
&
~
TCBIND_CURVPE
);
// | TCBIND_CURVPE
//
write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
evpe
(
vpflags
);
emt
(
mtflags
);
local_irq_restore
(
flags
);
}
static
int
getcwd
(
char
*
buff
,
int
size
)
...
...
@@ -1077,36 +1047,32 @@ static int getcwd(char *buff, int size)
/* checks VPE is unused and gets ready to load program */
static
int
vpe_open
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
int
minor
,
ret
;
enum
vpe_state
state
;
struct
vpe
*
v
;
struct
vpe_notifications
*
not
;
struct
vpe
*
v
;
int
ret
;
/* assume only 1 device at the mo. */
if
((
minor
=
iminor
(
inode
))
!=
1
)
{
if
(
minor
!=
iminor
(
inode
))
{
/* assume only 1 device at the moment. */
printk
(
KERN_WARNING
"VPE loader: only vpe1 is supported
\n
"
);
return
-
ENODEV
;
}
if
((
v
=
get_vpe
(
minor
))
==
NULL
)
{
if
((
v
=
get_vpe
(
tclimit
))
==
NULL
)
{
printk
(
KERN_WARNING
"VPE loader: unable to get vpe
\n
"
);
return
-
ENODEV
;
}
state
=
xchg
(
&
v
->
state
,
VPE_STATE_INUSE
);
if
(
state
!=
VPE_STATE_UNUSED
)
{
dvpe
();
printk
(
KERN_DEBUG
"VPE loader: tc in use dumping regs
\n
"
);
dump_tc
(
get_tc
(
minor
));
list_for_each_entry
(
not
,
&
v
->
notify
,
list
)
{
not
->
stop
(
minor
);
not
->
stop
(
tclimit
);
}
release_progmem
(
v
->
load_addr
);
cleanup_tc
(
get_tc
(
minor
));
cleanup_tc
(
get_tc
(
tclimit
));
}
/* this of-course trashes what was there before... */
...
...
@@ -1133,26 +1099,25 @@ static int vpe_open(struct inode *inode, struct file *filp)
v
->
shared_ptr
=
NULL
;
v
->
__start
=
0
;
return
0
;
}
static
int
vpe_release
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
int
minor
,
ret
=
0
;
struct
vpe
*
v
;
Elf_Ehdr
*
hdr
;
int
ret
=
0
;
minor
=
iminor
(
inode
);
if
(
(
v
=
get_vpe
(
minor
))
==
NULL
)
v
=
get_vpe
(
tclimit
);
if
(
v
==
NULL
)
return
-
ENODEV
;
// simple case of fire and forget, so tell the VPE to run...
hdr
=
(
Elf_Ehdr
*
)
v
->
pbuffer
;
if
(
memcmp
(
hdr
->
e_ident
,
ELFMAG
,
4
)
==
0
)
{
if
(
vpe_elfload
(
v
)
>=
0
)
if
(
vpe_elfload
(
v
)
>=
0
)
{
vpe_run
(
v
);
else
{
}
else
{
printk
(
KERN_WARNING
"VPE loader: ELF load failed.
\n
"
);
ret
=
-
ENOEXEC
;
}
...
...
@@ -1179,12 +1144,14 @@ static int vpe_release(struct inode *inode, struct file *filp)
static
ssize_t
vpe_write
(
struct
file
*
file
,
const
char
__user
*
buffer
,
size_t
count
,
loff_t
*
ppos
)
{
int
minor
;
size_t
ret
=
count
;
struct
vpe
*
v
;
minor
=
iminor
(
file
->
f_path
.
dentry
->
d_inode
);
if
((
v
=
get_vpe
(
minor
))
==
NULL
)
if
(
iminor
(
file
->
f_path
.
dentry
->
d_inode
)
!=
minor
)
return
-
ENODEV
;
v
=
get_vpe
(
tclimit
);
if
(
v
==
NULL
)
return
-
ENODEV
;
if
(
v
->
pbuffer
==
NULL
)
{
...
...
@@ -1370,17 +1337,34 @@ static struct device *vpe_dev;
static
int
__init
vpe_module_init
(
void
)
{
unsigned
int
mtflags
,
vpflags
;
int
hw_tcs
,
hw_vpes
,
tc
,
err
=
0
;
unsigned
long
flags
,
val
;
struct
vpe
*
v
=
NULL
;
struct
device
*
dev
;
struct
tc
*
t
;
unsigned
long
val
;
int
i
,
err
;
if
(
!
cpu_has_mipsmt
)
{
printk
(
"VPE loader: not a MIPS MT capable processor
\n
"
);
return
-
ENODEV
;
}
if
(
vpelimit
==
0
)
{
printk
(
KERN_WARNING
"No VPEs reserved for AP/SP, not "
"initializing VPE loader.
\n
Pass maxvpes=<n> argument as "
"kernel argument
\n
"
);
return
-
ENODEV
;
}
if
(
tclimit
==
0
)
{
printk
(
KERN_WARNING
"No TCs reserved for AP/SP, not "
"initializing VPE loader.
\n
Pass maxtcs=<n> argument as "
"kernel argument
\n
"
);
return
-
ENODEV
;
}
major
=
register_chrdev
(
0
,
module_name
,
&
vpe_fops
);
if
(
major
<
0
)
{
printk
(
"VPE loader: unable to register character device
\n
"
);
...
...
@@ -1388,40 +1372,61 @@ static int __init vpe_module_init(void)
}
dev
=
device_create
(
mt_class
,
NULL
,
MKDEV
(
major
,
minor
),
"
tc
%d"
,
minor
);
"
vpe
%d"
,
minor
);
if
(
IS_ERR
(
dev
))
{
err
=
PTR_ERR
(
dev
);
goto
out_chrdev
;
}
vpe_dev
=
dev
;
dmt
();
dvpe
();
local_irq_save
(
flags
);
mtflags
=
dmt
();
vpflags
=
dvpe
();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
/* dump_mtregs(); */
val
=
read_c0_mvpconf0
();
for
(
i
=
0
;
i
<
((
val
&
MVPCONF0_PTC
)
+
1
);
i
++
)
{
t
=
alloc_tc
(
i
);
hw_tcs
=
(
val
&
MVPCONF0_PTC
)
+
1
;
hw_vpes
=
((
val
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
;
for
(
tc
=
tclimit
;
tc
<
hw_tcs
;
tc
++
)
{
/*
* Must re-enable multithreading temporarily or in case we
* reschedule send IPIs or similar we might hang.
*/
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
evpe
(
vpflags
);
emt
(
mtflags
);
local_irq_restore
(
flags
);
t
=
alloc_tc
(
tc
);
if
(
!
t
)
{
err
=
-
ENOMEM
;
goto
out
;
}
local_irq_save
(
flags
);
mtflags
=
dmt
();
vpflags
=
dvpe
();
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
/* VPE's */
if
(
i
<
((
val
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
)
{
settc
(
i
);
if
(
tc
<
hw_tcs
)
{
settc
(
tc
);
if
((
v
=
alloc_vpe
(
i
))
==
NULL
)
{
if
((
v
=
alloc_vpe
(
tc
))
==
NULL
)
{
printk
(
KERN_WARNING
"VPE: unable to allocate VPE
\n
"
);
return
-
ENODEV
;
goto
out_reenable
;
}
/* add the tc to the list of this vpe's tc's. */
list_add
(
&
t
->
tc
,
&
v
->
tc
);
/* deactivate all but vpe0 */
if
(
i
!=
0
)
{
if
(
tc
>=
tclimit
)
{
unsigned
long
tmp
=
read_vpe_c0_vpeconf0
();
tmp
&=
~
VPECONF0_VPA
;
...
...
@@ -1434,7 +1439,7 @@ static int __init vpe_module_init(void)
/* disable multi-threading with TC's */
write_vpe_c0_vpecontrol
(
read_vpe_c0_vpecontrol
()
&
~
VPECONTROL_TE
);
if
(
i
!=
0
)
{
if
(
tc
>=
vpelimit
)
{
/*
* Set config to be the same as vpe0,
* particularly kseg0 coherency alg
...
...
@@ -1446,10 +1451,10 @@ static int __init vpe_module_init(void)
/* TC's */
t
->
pvpe
=
v
;
/* set the parent vpe */
if
(
i
!=
0
)
{
if
(
tc
>=
tclimit
)
{
unsigned
long
tmp
;
settc
(
i
);
settc
(
tc
);
/* Any TC that is bound to VPE0 gets left as is - in case
we are running SMTC on VPE0. A TC that is bound to any
...
...
@@ -1479,9 +1484,14 @@ static int __init vpe_module_init(void)
}
}
out_reenable:
/* release config state */
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
evpe
(
vpflags
);
emt
(
mtflags
);
local_irq_restore
(
flags
);
#ifdef CONFIG_MIPS_APSP_KSPD
kspd_events
.
kspd_sp_exit
=
kspd_sp_exit
;
#endif
...
...
@@ -1490,6 +1500,7 @@ static int __init vpe_module_init(void)
out_chrdev:
unregister_chrdev
(
major
,
module_name
);
out:
return
err
;
}
...
...
include/asm-mips/mips_mt.h
View file @
07cc0c9e
...
...
@@ -8,6 +8,12 @@
#include <linux/cpumask.h>
/*
* How many VPEs and TCs is Linux allowed to use? 0 means no limit.
*/
extern
int
tclimit
;
extern
int
vpelimit
;
extern
cpumask_t
mt_fpu_cpumask
;
extern
unsigned
long
mt_fpemul_threshold
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment