fork.c 20.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/fork.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 *  'fork.c' contains the help-routines for the 'fork' system call
 * (see also entry.S and others).
 * Fork is rather simple, once you get the hang of it, but the memory
Linus Torvalds's avatar
Linus Torvalds committed
11
 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
Linus Torvalds's avatar
Linus Torvalds committed
12 13 14
 */

#include <linux/config.h>
Linus Torvalds's avatar
Linus Torvalds committed
15
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
16 17 18 19 20
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
#include <linux/completion.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/namespace.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/personality.h>
Linus Torvalds's avatar
Linus Torvalds committed
24
#include <linux/file.h>
25 26
#include <linux/binfmts.h>
#include <linux/fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
27 28 29 30 31

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
32 33
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
Linus Torvalds's avatar
Linus Torvalds committed
34

35 36
static kmem_cache_t *task_struct_cachep;

Dave Olien's avatar
Dave Olien committed
37 38 39
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_semundo(struct task_struct *tsk);

Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43 44 45 46 47 48
/* The idle threads do not count.. */
int nr_threads;

int max_threads;
unsigned long total_forks;	/* Handle normal Linux uptimes. */
int last_pid;

struct task_struct *pidhash[PIDHASH_SZ];

Linus Torvalds's avatar
Linus Torvalds committed
49 50
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;  /* outer */

Linus Torvalds's avatar
Linus Torvalds committed
51 52 53 54
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
	unsigned long flags;

Linus Torvalds's avatar
Linus Torvalds committed
55
	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
Robert Love's avatar
Robert Love committed
56
	spin_lock_irqsave(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
57
	__add_wait_queue(q, wait);
Robert Love's avatar
Robert Love committed
58
	spin_unlock_irqrestore(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64
}

void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
{
	unsigned long flags;

Linus Torvalds's avatar
Linus Torvalds committed
65
	wait->flags |= WQ_FLAG_EXCLUSIVE;
Robert Love's avatar
Robert Love committed
66
	spin_lock_irqsave(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
67
	__add_wait_queue_tail(q, wait);
Robert Love's avatar
Robert Love committed
68
	spin_unlock_irqrestore(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
69 70 71 72 73 74
}

void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
	unsigned long flags;

Robert Love's avatar
Robert Love committed
75
	spin_lock_irqsave(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
76
	__remove_wait_queue(q, wait);
Robert Love's avatar
Robert Love committed
77
	spin_unlock_irqrestore(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
78 79 80 81
}

void __init fork_init(unsigned long mempages)
{
82 83 84 85 86 87 88 89
	/* create a slab on which task_structs can be allocated */
	task_struct_cachep =
		kmem_cache_create("task_struct",
				  sizeof(struct task_struct),0,
				  SLAB_HWCACHE_ALIGN, NULL, NULL);
	if (!task_struct_cachep)
		panic("fork_init(): cannot create task_struct SLAB cache");

Linus Torvalds's avatar
Linus Torvalds committed
90 91 92 93 94
	/*
	 * The default maximum number of threads is set to a safe
	 * value: the thread structures can take up at most half
	 * of memory.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
95
	max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8;
Linus Torvalds's avatar
Linus Torvalds committed
96 97 98 99 100

	init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
	init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;

	ti = alloc_thread_info();
	if (!ti) return NULL;

	tsk = kmem_cache_alloc(task_struct_cachep,GFP_ATOMIC);
	if (!tsk) {
		free_thread_info(ti);
		return NULL;
	}

	*ti = *orig->thread_info;
	*tsk = *orig;
	tsk->thread_info = ti;
	ti->task = tsk;
	atomic_set(&tsk->usage,1);

	return tsk;
}

void __put_task_struct(struct task_struct *tsk)
{
	free_thread_info(tsk->thread_info);
	kmem_cache_free(task_struct_cachep,tsk);
}

Linus Torvalds's avatar
Linus Torvalds committed
130 131 132 133 134 135 136
/* Protects next_safe and last_pid. */
spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;

static int get_pid(unsigned long flags)
{
	static int next_safe = PID_MAX;
	struct task_struct *p;
137
	int pid;
Linus Torvalds's avatar
Linus Torvalds committed
138

Rusty Russell's avatar
Rusty Russell committed
139 140
	if (flags & CLONE_IDLETASK)
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
141 142 143 144 145 146 147 148 149 150 151 152 153 154

	spin_lock(&lastpid_lock);
	if((++last_pid) & 0xffff8000) {
		last_pid = 300;		/* Skip daemons etc. */
		goto inside;
	}
	if(last_pid >= next_safe) {
inside:
		next_safe = PID_MAX;
		read_lock(&tasklist_lock);
	repeat:
		for_each_task(p) {
			if(p->pid == last_pid	||
			   p->pgrp == last_pid	||
Linus Torvalds's avatar
Linus Torvalds committed
155
			   p->tgid == last_pid	||
Linus Torvalds's avatar
Linus Torvalds committed
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
			   p->session == last_pid) {
				if(++last_pid >= next_safe) {
					if(last_pid & 0xffff8000)
						last_pid = 300;
					next_safe = PID_MAX;
				}
				goto repeat;
			}
			if(p->pid > last_pid && next_safe > p->pid)
				next_safe = p->pid;
			if(p->pgrp > last_pid && next_safe > p->pgrp)
				next_safe = p->pgrp;
			if(p->session > last_pid && next_safe > p->session)
				next_safe = p->session;
		}
		read_unlock(&tasklist_lock);
	}
173
	pid = last_pid;
Linus Torvalds's avatar
Linus Torvalds committed
174 175
	spin_unlock(&lastpid_lock);

176
	return pid;
Linus Torvalds's avatar
Linus Torvalds committed
177 178 179 180 181 182 183 184 185 186 187 188
}

static inline int dup_mmap(struct mm_struct * mm)
{
	struct vm_area_struct * mpnt, *tmp, **pprev;
	int retval;

	flush_cache_mm(current->mm);
	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->map_count = 0;
Linus Torvalds's avatar
Linus Torvalds committed
189
	mm->rss = 0;
Linus Torvalds's avatar
Linus Torvalds committed
190 191 192
	mm->cpu_vm_mask = 0;
	mm->swap_address = 0;
	pprev = &mm->mmap;
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195 196 197 198 199 200 201 202 203 204

	/*
	 * Add it to the mmlist after the parent.
	 * Doing it this way means that we can order the list,
	 * and fork() won't mess up the ordering significantly.
	 * Add it first so that swapoff can see any swap entries.
	 */
	spin_lock(&mmlist_lock);
	list_add(&mm->mmlist, &current->mm->mmlist);
	mmlist_nr++;
	spin_unlock(&mmlist_lock);

Linus Torvalds's avatar
Linus Torvalds committed
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
		struct file *file;

		retval = -ENOMEM;
		if(mpnt->vm_flags & VM_DONTCOPY)
			continue;
		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_mm = mm;
		tmp->vm_next = NULL;
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_dentry->d_inode;
			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
      
			/* insert tmp into the share list, just after mpnt */
			spin_lock(&inode->i_mapping->i_shared_lock);
227
			list_add_tail(&tmp->shared, &mpnt->shared);
Linus Torvalds's avatar
Linus Torvalds committed
228 229 230 231
			spin_unlock(&inode->i_mapping->i_shared_lock);
		}

		/*
Linus Torvalds's avatar
Linus Torvalds committed
232 233
		 * Link in the new vma and copy the page table entries:
		 * link in first so that swapoff can see swap entries.
Linus Torvalds's avatar
Linus Torvalds committed
234
		 */
Linus Torvalds's avatar
Linus Torvalds committed
235
		spin_lock(&mm->page_table_lock);
Linus Torvalds's avatar
Linus Torvalds committed
236 237
		*pprev = tmp;
		pprev = &tmp->vm_next;
Linus Torvalds's avatar
Linus Torvalds committed
238 239 240 241 242 243
		mm->map_count++;
		retval = copy_page_range(mm, current->mm, tmp);
		spin_unlock(&mm->page_table_lock);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);
Linus Torvalds's avatar
Linus Torvalds committed
244 245 246 247 248

		if (retval)
			goto fail_nomem;
	}
	retval = 0;
Linus Torvalds's avatar
Linus Torvalds committed
249
	build_mmap_rb(mm);
Linus Torvalds's avatar
Linus Torvalds committed
250 251 252 253 254 255

fail_nomem:
	flush_tlb_mm(current->mm);
	return retval;
}

Linus Torvalds's avatar
Linus Torvalds committed
256
spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
Linus Torvalds's avatar
Linus Torvalds committed
257
int mmlist_nr;
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262 263 264 265

#define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))

static struct mm_struct * mm_init(struct mm_struct * mm)
{
	atomic_set(&mm->mm_users, 1);
	atomic_set(&mm->mm_count, 1);
Linus Torvalds's avatar
Linus Torvalds committed
266
	init_rwsem(&mm->mmap_sem);
Linus Torvalds's avatar
Linus Torvalds committed
267
	mm->page_table_lock = SPIN_LOCK_UNLOCKED;
Linus Torvalds's avatar
Linus Torvalds committed
268
	mm->pgd = pgd_alloc(mm);
Linus Torvalds's avatar
Linus Torvalds committed
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	if (mm->pgd)
		return mm;
	free_mm(mm);
	return NULL;
}
	

/*
 * Allocate and initialize an mm_struct.
 */
struct mm_struct * mm_alloc(void)
{
	struct mm_struct * mm;

	mm = allocate_mm();
	if (mm) {
		memset(mm, 0, sizeof(*mm));
		return mm_init(mm);
	}
	return NULL;
}

/*
 * Called when the last reference to the mm
 * is dropped: either by a lazy thread or by
 * mmput. Free the page directory and the mm.
 */
inline void __mmdrop(struct mm_struct *mm)
{
	if (mm == &init_mm) BUG();
	pgd_free(mm->pgd);
	destroy_context(mm);
	free_mm(mm);
}

/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
	if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
Linus Torvalds's avatar
Linus Torvalds committed
310 311 312
		extern struct mm_struct *swap_mm;
		if (swap_mm == mm)
			swap_mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist);
Linus Torvalds's avatar
Linus Torvalds committed
313
		list_del(&mm->mmlist);
Linus Torvalds's avatar
Linus Torvalds committed
314
		mmlist_nr--;
Linus Torvalds's avatar
Linus Torvalds committed
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
		spin_unlock(&mmlist_lock);
		exit_mmap(mm);
		mmdrop(mm);
	}
}

/* Please note the differences between mmput and mm_release.
 * mmput is called whenever we stop holding onto a mm_struct,
 * error success whatever.
 *
 * mm_release is called after a mm_struct has been removed
 * from the current process.
 *
 * This difference is important for error handling, when we
 * only half set up a mm_struct for a new process and need to restore
 * the old one.  Because we mmput the new mm_struct before
 * restoring the old one. . .
 * Eric Biederman 10 January 1998
 */
void mm_release(void)
{
	struct task_struct *tsk = current;
Linus Torvalds's avatar
Linus Torvalds committed
337
	struct completion *vfork_done = tsk->vfork_done;
Linus Torvalds's avatar
Linus Torvalds committed
338 339

	/* notify parent sleeping on vfork() */
Linus Torvalds's avatar
Linus Torvalds committed
340 341 342
	if (vfork_done) {
		tsk->vfork_done = NULL;
		complete(vfork_done);
Linus Torvalds's avatar
Linus Torvalds committed
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	}
}

static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
	struct mm_struct * mm, *oldmm;
	int retval;

	tsk->min_flt = tsk->maj_flt = 0;
	tsk->cmin_flt = tsk->cmaj_flt = 0;
	tsk->nswap = tsk->cnswap = 0;

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm)
		return 0;

	if (clone_flags & CLONE_VM) {
		atomic_inc(&oldmm->mm_users);
		mm = oldmm;
370 371 372 373 374 375 376
		/*
		 * There are cases where the PTL is held to ensure no
		 * new threads start up in user mode using an mm, which
		 * allows optimizing out ipis; the tlb_gather_mmu code
		 * is an example.
		 */
		spin_unlock_wait(&oldmm->page_table_lock);
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379 380 381 382 383 384 385 386 387 388 389
		goto good_mm;
	}

	retval = -ENOMEM;
	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	/* Copy the current MM stuff.. */
	memcpy(mm, oldmm, sizeof(*mm));
	if (!mm_init(mm))
		goto fail_nomem;

Colin Gibbs's avatar
Colin Gibbs committed
390 391 392
	if (init_new_context(tsk,mm))
		goto free_pt;

Linus Torvalds's avatar
Linus Torvalds committed
393
	down_write(&oldmm->mmap_sem);
Linus Torvalds's avatar
Linus Torvalds committed
394
	retval = dup_mmap(mm);
Linus Torvalds's avatar
Linus Torvalds committed
395
	up_write(&oldmm->mmap_sem);
Linus Torvalds's avatar
Linus Torvalds committed
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505

	if (retval)
		goto free_pt;

good_mm:
	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

free_pt:
	mmput(mm);
fail_nomem:
	return retval;
}

static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
	/* We don't need to lock fs - think why ;-) */
	if (fs) {
		atomic_set(&fs->count, 1);
		fs->lock = RW_LOCK_UNLOCKED;
		fs->umask = old->umask;
		read_lock(&old->lock);
		fs->rootmnt = mntget(old->rootmnt);
		fs->root = dget(old->root);
		fs->pwdmnt = mntget(old->pwdmnt);
		fs->pwd = dget(old->pwd);
		if (old->altroot) {
			fs->altrootmnt = mntget(old->altrootmnt);
			fs->altroot = dget(old->altroot);
		} else {
			fs->altrootmnt = NULL;
			fs->altroot = NULL;
		}	
		read_unlock(&old->lock);
	}
	return fs;
}

struct fs_struct *copy_fs_struct(struct fs_struct *old)
{
	return __copy_fs_struct(old);
}

static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
{
	if (clone_flags & CLONE_FS) {
		atomic_inc(&current->fs->count);
		return 0;
	}
	tsk->fs = __copy_fs_struct(current->fs);
	if (!tsk->fs)
		return -1;
	return 0;
}

static int count_open_files(struct files_struct *files, int size)
{
	int i;
	
	/* Find the last open fd */
	for (i = size/(8*sizeof(long)); i > 0; ) {
		if (files->open_fds->fds_bits[--i])
			break;
	}
	i = (i+1) * 8 * sizeof(long);
	return i;
}

static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
	struct files_struct *oldf, *newf;
	struct file **old_fds, **new_fds;
	int open_files, nfds, size, i, error = 0;

	/*
	 * A background process may not have any files ...
	 */
	oldf = current->files;
	if (!oldf)
		goto out;

	if (clone_flags & CLONE_FILES) {
		atomic_inc(&oldf->count);
		goto out;
	}

	tsk->files = NULL;
	error = -ENOMEM;
	newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
	if (!newf) 
		goto out;

	atomic_set(&newf->count, 1);

	newf->file_lock	    = RW_LOCK_UNLOCKED;
	newf->next_fd	    = 0;
	newf->max_fds	    = NR_OPEN_DEFAULT;
	newf->max_fdset	    = __FD_SETSIZE;
	newf->close_on_exec = &newf->close_on_exec_init;
	newf->open_fds	    = &newf->open_fds_init;
	newf->fd	    = &newf->fd_array[0];

	/* We don't yet have the oldf readlock, but even if the old
           fdset gets grown now, we'll only copy up to "size" fds */
	size = oldf->max_fdset;
	if (size > __FD_SETSIZE) {
		newf->max_fdset = 0;
		write_lock(&newf->file_lock);
Linus Torvalds's avatar
Linus Torvalds committed
506
		error = expand_fdset(newf, size-1);
Linus Torvalds's avatar
Linus Torvalds committed
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
		write_unlock(&newf->file_lock);
		if (error)
			goto out_release;
	}
	read_lock(&oldf->file_lock);

	open_files = count_open_files(oldf, size);

	/*
	 * Check whether we need to allocate a larger fd array.
	 * Note: we're not a clone task, so the open count won't
	 * change.
	 */
	nfds = NR_OPEN_DEFAULT;
	if (open_files > nfds) {
		read_unlock(&oldf->file_lock);
		newf->max_fds = 0;
		write_lock(&newf->file_lock);
Linus Torvalds's avatar
Linus Torvalds committed
525
		error = expand_fd_array(newf, open_files-1);
Linus Torvalds's avatar
Linus Torvalds committed
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
		write_unlock(&newf->file_lock);
		if (error) 
			goto out_release;
		nfds = newf->max_fds;
		read_lock(&oldf->file_lock);
	}

	old_fds = oldf->fd;
	new_fds = newf->fd;

	memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8);
	memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8);

	for (i = open_files; i != 0; i--) {
		struct file *f = *old_fds++;
		if (f)
			get_file(f);
		*new_fds++ = f;
	}
	read_unlock(&oldf->file_lock);

	/* compute the remainder to be cleared */
	size = (newf->max_fds - open_files) * sizeof(struct file *);

	/* This is long word aligned thus could use a optimized version */ 
	memset(new_fds, 0, size); 

	if (newf->max_fdset > open_files) {
		int left = (newf->max_fdset-open_files)/8;
		int start = open_files / (8 * sizeof(unsigned long));
		
		memset(&newf->open_fds->fds_bits[start], 0, left);
		memset(&newf->close_on_exec->fds_bits[start], 0, left);
	}

	tsk->files = newf;
	error = 0;
out:
	return error;

out_release:
	free_fdset (newf->close_on_exec, newf->max_fdset);
	free_fdset (newf->open_fds, newf->max_fdset);
	kmem_cache_free(files_cachep, newf);
	goto out;
}

static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_SIGHAND) {
		atomic_inc(&current->sig->count);
		return 0;
	}
	sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
	tsk->sig = sig;
	if (!sig)
		return -1;
	spin_lock_init(&sig->siglock);
	atomic_set(&sig->count, 1);
	memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
	return 0;
}

static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
	unsigned long new_flags = p->flags;

595
	new_flags &= ~PF_SUPERPRIV;
Linus Torvalds's avatar
Linus Torvalds committed
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	new_flags |= PF_FORKNOEXEC;
	if (!(clone_flags & CLONE_PTRACE))
		p->ptrace = 0;
	p->flags = new_flags;
}

/*
 *  Ok, this is the main fork-routine. It copies the system process
 * information (task[nr]) and sets up the necessary registers. It also
 * copies the data segment in its entirety.  The "stack_start" and
 * "stack_top" arguments are simply passed along to the platform
 * specific copy_thread() routine.  Most platforms ignore stack_top.
 * For an example that's using stack_top, see
 * arch/ia64/kernel/process.c.
 */
Rusty Russell's avatar
Rusty Russell committed
611 612 613 614
struct task_struct *do_fork(unsigned long clone_flags,
			    unsigned long stack_start,
			    struct pt_regs *regs,
			    unsigned long stack_size)
Linus Torvalds's avatar
Linus Torvalds committed
615
{
Linus Torvalds's avatar
Linus Torvalds committed
616
	int retval;
Linus Torvalds's avatar
Linus Torvalds committed
617
	unsigned long flags;
Rusty Russell's avatar
Rusty Russell committed
618
	struct task_struct *p = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
619 620
	struct completion vfork;

Linus Torvalds's avatar
Linus Torvalds committed
621
	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
Rusty Russell's avatar
Rusty Russell committed
622
		return ERR_PTR(-EINVAL);
Linus Torvalds's avatar
Linus Torvalds committed
623

Linus Torvalds's avatar
Linus Torvalds committed
624
	retval = -ENOMEM;
625
	p = dup_task_struct(current);
Linus Torvalds's avatar
Linus Torvalds committed
626 627 628 629
	if (!p)
		goto fork_out;

	retval = -EAGAIN;
Linus Torvalds's avatar
Linus Torvalds committed
630 631 632 633
	if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
			goto bad_fork_free;
	}
Linus Torvalds's avatar
Linus Torvalds committed
634

Linus Torvalds's avatar
Linus Torvalds committed
635 636 637 638 639 640 641 642 643 644 645
	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);

	/*
	 * Counter increases are protected by
	 * the kernel lock so nr_threads can't
	 * increase under us (but it may decrease).
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;
	
646
	get_exec_domain(p->thread_info->exec_domain);
Linus Torvalds's avatar
Linus Torvalds committed
647 648 649 650

	if (p->binfmt && p->binfmt->module)
		__MOD_INC_USE_COUNT(p->binfmt->module);

651 652 653 654 655 656 657
#ifdef CONFIG_PREEMPT
	/*
	 * schedule_tail drops this_rq()->lock so we compensate with a count
	 * of 1.  Also, we want to start with kernel preemption disabled.
	 */
	p->thread_info->preempt_count = 1;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
658 659 660 661 662 663
	p->did_exec = 0;
	p->swappable = 0;
	p->state = TASK_UNINTERRUPTIBLE;

	copy_flags(clone_flags, p);
	p->pid = get_pid(clone_flags);
664
	p->proc_dentry = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
665

Linus Torvalds's avatar
Linus Torvalds committed
666
	INIT_LIST_HEAD(&p->run_list);
Linus Torvalds's avatar
Linus Torvalds committed
667

668 669
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
Linus Torvalds's avatar
Linus Torvalds committed
670
	init_waitqueue_head(&p->wait_chldexit);
Linus Torvalds's avatar
Linus Torvalds committed
671 672 673 674 675
	p->vfork_done = NULL;
	if (clone_flags & CLONE_VFORK) {
		p->vfork_done = &vfork;
		init_completion(&vfork);
	}
Linus Torvalds's avatar
Linus Torvalds committed
676 677
	spin_lock_init(&p->alloc_lock);

678
	clear_tsk_thread_flag(p,TIF_SIGPENDING);
Linus Torvalds's avatar
Linus Torvalds committed
679 680 681 682 683 684 685 686 687 688 689 690 691 692
	init_sigpending(&p->pending);

	p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
	p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->leader = 0;		/* session leadership doesn't inherit */
	p->tty_old_pgrp = 0;
	p->times.tms_utime = p->times.tms_stime = 0;
	p->times.tms_cutime = p->times.tms_cstime = 0;
#ifdef CONFIG_SMP
	{
		int i;
Linus Torvalds's avatar
Linus Torvalds committed
693

Linus Torvalds's avatar
Linus Torvalds committed
694 695
		/* ?? should we just memset this ?? */
		for(i = 0; i < smp_num_cpus; i++)
Linus Torvalds's avatar
Linus Torvalds committed
696 697
			p->per_cpu_utime[cpu_logical_map(i)] =
				p->per_cpu_stime[cpu_logical_map(i)] = 0;
Linus Torvalds's avatar
Linus Torvalds committed
698 699 700
		spin_lock_init(&p->sigmask_lock);
	}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
701
	p->array = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
702 703 704
	p->lock_depth = -1;		/* -1 = no lock */
	p->start_time = jiffies;

Linus Torvalds's avatar
Linus Torvalds committed
705 706
	INIT_LIST_HEAD(&p->local_pages);

Linus Torvalds's avatar
Linus Torvalds committed
707 708
	retval = -ENOMEM;
	/* copy all the process information */
Dave Olien's avatar
Dave Olien committed
709
	if (copy_semundo(clone_flags, p))
Linus Torvalds's avatar
Linus Torvalds committed
710
		goto bad_fork_cleanup;
Dave Olien's avatar
Dave Olien committed
711 712
	if (copy_files(clone_flags, p))
		goto bad_fork_cleanup_semundo;
Linus Torvalds's avatar
Linus Torvalds committed
713 714 715 716 717 718
	if (copy_fs(clone_flags, p))
		goto bad_fork_cleanup_files;
	if (copy_sighand(clone_flags, p))
		goto bad_fork_cleanup_fs;
	if (copy_mm(clone_flags, p))
		goto bad_fork_cleanup_sighand;
Linus Torvalds's avatar
Linus Torvalds committed
719 720
	if (copy_namespace(clone_flags, p))
		goto bad_fork_cleanup_mm;
Linus Torvalds's avatar
Linus Torvalds committed
721 722
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
Linus Torvalds's avatar
Linus Torvalds committed
723
		goto bad_fork_cleanup_namespace;
Linus Torvalds's avatar
Linus Torvalds committed
724 725 726 727 728 729 730 731 732 733 734 735
	
	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->swappable = 1;
	p->exit_signal = clone_flags & CSIGNAL;
	p->pdeath_signal = 0;

	/*
Linus Torvalds's avatar
Linus Torvalds committed
736 737 738
	 * Share the timeslice between parent and child, thus the
	 * total amount of pending timeslices in the system doesnt change,
	 * resulting in more scheduling fairness.
Linus Torvalds's avatar
Linus Torvalds committed
739
	 */
Linus Torvalds's avatar
Linus Torvalds committed
740 741
	__save_flags(flags);
	__cli();
Linus Torvalds's avatar
Linus Torvalds committed
742 743
	p->time_slice = (current->time_slice + 1) >> 1;
	current->time_slice >>= 1;
Linus Torvalds's avatar
Linus Torvalds committed
744 745 746 747 748 749 750
	if (!current->time_slice) {
		/*
	 	 * This case is rare, it happens when the parent has only
	 	 * a single jiffy left from its timeslice. Taking the
		 * runqueue lock is not a problem.
		 */
		current->time_slice = 1;
Ingo Molnar's avatar
Ingo Molnar committed
751
		scheduler_tick(0, 0);
Linus Torvalds's avatar
Linus Torvalds committed
752
	}
Linus Torvalds's avatar
Linus Torvalds committed
753
	p->sleep_timestamp = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
754
	__restore_flags(flags);
Linus Torvalds's avatar
Linus Torvalds committed
755 756 757 758 759 760 761

	/*
	 * Ok, add it to the run-queues and make it
	 * visible to the rest of the system.
	 *
	 * Let it rip!
	 */
Rusty Russell's avatar
Rusty Russell committed
762
	p->tgid = p->pid;
Linus Torvalds's avatar
Linus Torvalds committed
763
	INIT_LIST_HEAD(&p->thread_group);
Linus Torvalds's avatar
Linus Torvalds committed
764 765

	/* Need tasklist lock for parent etc handling! */
Linus Torvalds's avatar
Linus Torvalds committed
766
	write_lock_irq(&tasklist_lock);
Linus Torvalds's avatar
Linus Torvalds committed
767

768
	/* CLONE_PARENT re-uses the old parent */
769 770
	p->real_parent = current->real_parent;
	p->parent = current->parent;
771
	if (!(clone_flags & CLONE_PARENT)) {
772
		p->real_parent = current;
Linus Torvalds's avatar
Linus Torvalds committed
773
		if (!(p->ptrace & PT_PTRACED))
774
			p->parent = current;
Linus Torvalds's avatar
Linus Torvalds committed
775 776
	}

Linus Torvalds's avatar
Linus Torvalds committed
777 778 779 780
	if (clone_flags & CLONE_THREAD) {
		p->tgid = current->tgid;
		list_add(&p->thread_group, &current->thread_group);
	}
Linus Torvalds's avatar
Linus Torvalds committed
781

Linus Torvalds's avatar
Linus Torvalds committed
782 783 784 785 786 787 788 789
	SET_LINKS(p);
	hash_pid(p);
	nr_threads++;
	write_unlock_irq(&tasklist_lock);

	if (p->ptrace & PT_PTRACED)
		send_sig(SIGSTOP, p, 1);

Linus Torvalds's avatar
Linus Torvalds committed
790
	wake_up_forked_process(p);		/* do this last */
Linus Torvalds's avatar
Linus Torvalds committed
791
	++total_forks;
Linus Torvalds's avatar
Linus Torvalds committed
792 793
	if (clone_flags & CLONE_VFORK)
		wait_for_completion(&vfork);
Linus Torvalds's avatar
Linus Torvalds committed
794 795 796 797 798
	else
		/*
		 * Let the child process run first, to avoid most of the
		 * COW overhead when the child exec()s afterwards.
		 */
799
		set_need_resched();
Rusty Russell's avatar
Rusty Russell committed
800
	retval = 0;
Linus Torvalds's avatar
Linus Torvalds committed
801 802

fork_out:
Rusty Russell's avatar
Rusty Russell committed
803 804 805
	if (retval)
		return ERR_PTR(retval);
	return p;
Linus Torvalds's avatar
Linus Torvalds committed
806

Linus Torvalds's avatar
Linus Torvalds committed
807 808
bad_fork_cleanup_namespace:
	exit_namespace(p);
Linus Torvalds's avatar
Linus Torvalds committed
809 810
bad_fork_cleanup_mm:
	exit_mm(p);
Linus Torvalds's avatar
Linus Torvalds committed
811 812 813 814 815 816
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
Dave Olien's avatar
Dave Olien committed
817 818
bad_fork_cleanup_semundo:
	exit_semundo(p);
Linus Torvalds's avatar
Linus Torvalds committed
819
bad_fork_cleanup:
820
	put_exec_domain(p->thread_info->exec_domain);
Linus Torvalds's avatar
Linus Torvalds committed
821 822 823 824 825 826
	if (p->binfmt && p->binfmt->module)
		__MOD_DEC_USE_COUNT(p->binfmt->module);
bad_fork_cleanup_count:
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
827
	put_task_struct(p);
Linus Torvalds's avatar
Linus Torvalds committed
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
	goto fork_out;
}

/* SLAB cache for signal_struct structures (tsk->sig) */
kmem_cache_t *sigact_cachep;

/* SLAB cache for files_struct structures (tsk->files) */
kmem_cache_t *files_cachep;

/* SLAB cache for fs_struct structures (tsk->fs) */
kmem_cache_t *fs_cachep;

/* SLAB cache for vm_area_struct structures */
kmem_cache_t *vm_area_cachep;

/* SLAB cache for mm_struct structures (tsk->mm) */
kmem_cache_t *mm_cachep;

void __init proc_caches_init(void)
{
	sigact_cachep = kmem_cache_create("signal_act",
			sizeof(struct signal_struct), 0,
			SLAB_HWCACHE_ALIGN, NULL, NULL);
	if (!sigact_cachep)
		panic("Cannot create signal action SLAB cache");

	files_cachep = kmem_cache_create("files_cache", 
			 sizeof(struct files_struct), 0, 
			 SLAB_HWCACHE_ALIGN, NULL, NULL);
	if (!files_cachep) 
		panic("Cannot create files SLAB cache");

	fs_cachep = kmem_cache_create("fs_cache", 
			 sizeof(struct fs_struct), 0, 
			 SLAB_HWCACHE_ALIGN, NULL, NULL);
	if (!fs_cachep) 
		panic("Cannot create fs_struct SLAB cache");
 
	vm_area_cachep = kmem_cache_create("vm_area_struct",
			sizeof(struct vm_area_struct), 0,
			SLAB_HWCACHE_ALIGN, NULL, NULL);
	if(!vm_area_cachep)
		panic("vma_init: Cannot alloc vm_area_struct SLAB cache");

	mm_cachep = kmem_cache_create("mm_struct",
			sizeof(struct mm_struct), 0,
			SLAB_HWCACHE_ALIGN, NULL, NULL);
	if(!mm_cachep)
		panic("vma_init: Cannot alloc mm_struct SLAB cache");
}