Commit 6af866af authored by Li Zefan's avatar Li Zefan Committed by Linus Torvalds

cpuset: remove remaining pointers to cpumask_t

Impact: cleanups, use new cpumask API

Final trivial cleanups: mainly s/cpumask_t/struct cpumask

Note there is a FIXME in generate_sched_domains(). A future patch will
change struct cpumask *doms to struct cpumask *doms[].
(I suppose Rusty will do this.)
Signed-off-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: default avatarMike Travis <travis@sgi.com>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 300ed6cb
...@@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ ...@@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init_early(void); extern int cpuset_init_early(void);
extern int cpuset_init(void); extern int cpuset_init(void);
extern void cpuset_init_smp(void); extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); extern void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed) #define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void); void cpuset_init_current_mems_allowed(void);
...@@ -86,12 +87,13 @@ static inline int cpuset_init_early(void) { return 0; } ...@@ -86,12 +87,13 @@ static inline int cpuset_init_early(void) { return 0; }
static inline int cpuset_init(void) { return 0; } static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {} static inline void cpuset_init_smp(void) {}
static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{ {
*mask = cpu_possible_map; *mask = cpu_possible_map;
} }
static inline void cpuset_cpus_allowed_locked(struct task_struct *p, static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
cpumask_t *mask) struct cpumask *mask)
{ {
*mask = cpu_possible_map; *mask = cpu_possible_map;
} }
......
...@@ -289,7 +289,8 @@ static struct file_system_type cpuset_fs_type = { ...@@ -289,7 +289,8 @@ static struct file_system_type cpuset_fs_type = {
* Call with callback_mutex held. * Call with callback_mutex held.
*/ */
static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) static void guarantee_online_cpus(const struct cpuset *cs,
struct cpumask *pmask)
{ {
while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask)) while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
cs = cs->parent; cs = cs->parent;
...@@ -610,7 +611,8 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) ...@@ -610,7 +611,8 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
* element of the partition (one sched domain) to be passed to * element of the partition (one sched domain) to be passed to
* partition_sched_domains(). * partition_sched_domains().
*/ */
static int generate_sched_domains(cpumask_t **domains, /* FIXME: see the FIXME in partition_sched_domains() */
static int generate_sched_domains(struct cpumask **domains,
struct sched_domain_attr **attributes) struct sched_domain_attr **attributes)
{ {
LIST_HEAD(q); /* queue of cpusets to be scanned */ LIST_HEAD(q); /* queue of cpusets to be scanned */
...@@ -618,10 +620,10 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -618,10 +620,10 @@ static int generate_sched_domains(cpumask_t **domains,
struct cpuset **csa; /* array of all cpuset ptrs */ struct cpuset **csa; /* array of all cpuset ptrs */
int csn; /* how many cpuset ptrs in csa so far */ int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */ int i, j, k; /* indices for partition finding loops */
cpumask_t *doms; /* resulting partition; i.e. sched domains */ struct cpumask *doms; /* resulting partition; i.e. sched domains */
struct sched_domain_attr *dattr; /* attributes for custom domains */ struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */ int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] cpumask_t slot */ int nslot; /* next empty doms[] struct cpumask slot */
doms = NULL; doms = NULL;
dattr = NULL; dattr = NULL;
...@@ -629,7 +631,7 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -629,7 +631,7 @@ static int generate_sched_domains(cpumask_t **domains,
/* Special case for the 99% of systems with one, full, sched domain */ /* Special case for the 99% of systems with one, full, sched domain */
if (is_sched_load_balance(&top_cpuset)) { if (is_sched_load_balance(&top_cpuset)) {
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); doms = kmalloc(cpumask_size(), GFP_KERNEL);
if (!doms) if (!doms)
goto done; goto done;
...@@ -708,7 +710,7 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -708,7 +710,7 @@ static int generate_sched_domains(cpumask_t **domains,
* Now we know how many domains to create. * Now we know how many domains to create.
* Convert <csn, csa> to <ndoms, doms> and populate cpu masks. * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
*/ */
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL);
if (!doms) if (!doms)
goto done; goto done;
...@@ -720,7 +722,7 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -720,7 +722,7 @@ static int generate_sched_domains(cpumask_t **domains,
for (nslot = 0, i = 0; i < csn; i++) { for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i]; struct cpuset *a = csa[i];
cpumask_t *dp; struct cpumask *dp;
int apn = a->pn; int apn = a->pn;
if (apn < 0) { if (apn < 0) {
...@@ -743,7 +745,7 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -743,7 +745,7 @@ static int generate_sched_domains(cpumask_t **domains,
continue; continue;
} }
cpus_clear(*dp); cpumask_clear(dp);
if (dattr) if (dattr)
*(dattr + nslot) = SD_ATTR_INIT; *(dattr + nslot) = SD_ATTR_INIT;
for (j = i; j < csn; j++) { for (j = i; j < csn; j++) {
...@@ -790,7 +792,7 @@ static int generate_sched_domains(cpumask_t **domains, ...@@ -790,7 +792,7 @@ static int generate_sched_domains(cpumask_t **domains,
static void do_rebuild_sched_domains(struct work_struct *unused) static void do_rebuild_sched_domains(struct work_struct *unused)
{ {
struct sched_domain_attr *attr; struct sched_domain_attr *attr;
cpumask_t *doms; struct cpumask *doms;
int ndoms; int ndoms;
get_online_cpus(); get_online_cpus();
...@@ -2044,7 +2046,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, ...@@ -2044,7 +2046,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
unsigned long phase, void *unused_cpu) unsigned long phase, void *unused_cpu)
{ {
struct sched_domain_attr *attr; struct sched_domain_attr *attr;
cpumask_t *doms; struct cpumask *doms;
int ndoms; int ndoms;
switch (phase) { switch (phase) {
...@@ -2114,7 +2116,7 @@ void __init cpuset_init_smp(void) ...@@ -2114,7 +2116,7 @@ void __init cpuset_init_smp(void)
/** /**
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
* @pmask: pointer to cpumask_t variable to receive cpus_allowed set. * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
* *
* Description: Returns the cpumask_var_t cpus_allowed of the cpuset * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty * attached to the specified @tsk. Guaranteed to return some non-empty
...@@ -2122,7 +2124,7 @@ void __init cpuset_init_smp(void) ...@@ -2122,7 +2124,7 @@ void __init cpuset_init_smp(void)
* tasks cpuset. * tasks cpuset.
**/ **/
void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{ {
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cpuset_cpus_allowed_locked(tsk, pmask); cpuset_cpus_allowed_locked(tsk, pmask);
...@@ -2133,7 +2135,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask) ...@@ -2133,7 +2135,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
* Must be called with callback_mutex held. * Must be called with callback_mutex held.
**/ **/
void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask) void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
{ {
task_lock(tsk); task_lock(tsk);
guarantee_online_cpus(task_cs(tsk), pmask); guarantee_online_cpus(task_cs(tsk), pmask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment