プロセス作成のdofork->copy_process
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) //It is not permitted for the child thread to create its own namespace while on the other hand it shares its parent's root director and working directory
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
/*
* Siblings of global init remain as zombies on exit since they are
* not reaped by their parent (swapper). To solve this and to avoid
* multi-rooted process trees, prevent global and container-inits
* from creating siblings.
*/
if ((clone_flags & CLONE_PARENT) &&
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);//security/security.c:702 --->security/capability.c:360 --> return 0; ???
/*
* , , 。 dummy_task_create , 。《 P311》
*
*/
if (retval)
goto fork_out;
retval = -ENOMEM; //out of memory
p = dup_task_struct(current); //current process is the parent process of the process being created, so we need copy the current process's task_struct
/*
* task_struct ,thread_info ( task_struct )
*/
if (!p)
goto fork_out;
ftrace_graph_init_task(p); /* Allocate a return stack for newly created task */
rt_mutex_init_task(p);//reference to Documentation/rt-mutext.txt and Documentation/pi-futex.txt
/*
* 。 。
*/
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); //make sure that the hard irq is enabled, or produce warning.
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); //make sure that the soft irq is enabled, or produce warning.
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) { //the process's number of the current user has should under its limit
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
/*
*
*/
retval = copy_creds(p, clone_flags); //Copy and set credentials for the new process created by fork()
/*
* cred cred 。
* , 。
*/
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
/*
* (nr_threads) (max_threads)。 RAM 。
* , thread_info 。
* /proc/sys/kernel/threads-max max_threads 。
*nr_threads , , idle 。
* :http://hi.baidu.com/zengzhaonong/blog/item/6106d61795f09009c83d6d34.html
*/
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
/*
, 。
*/
p->did_exec = 0; //it means that the process will execute the old code.
/*
task_struct 0, exec(), ( )
*/
delayacct_tsk_init(p); /* Must remain after dup_task_struct() *//* reinitialize in case parent's non-null pointer was dup'ed*/
copy_flags(clone_flags, p); //set p's flags.
/*
* p flags :
*static void copy_flags(unsigned long clone_flags, struct task_struct *p)
*{
* unsigned long new_flags = p->flags;
*
* new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
* new_flags |= PF_FORKNOEXEC;
* new_flags |= PF_STARTING;
* p->flags = new_flags;
* clear_freeze_flag(p); //cancel the previous 'freeze' request
*}
* ,I am not a workqueue worker, , , 。
*/
INIT_LIST_HEAD(&p->children); //
INIT_LIST_HEAD(&p->sibling); //
rcu_copy_process(p);
p->vfork_done = NULL; //for vfork()
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
/*
* task_struct , 。
* 。
*/
p->utime = cputime_zero; //utime
p->stime = cputime_zero; //stime
p->gtime = cputime_zero; //
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
p->default_timer_slack_ns = current->timer_slack_ns;
task_io_accounting_init(&p->ioac);//initialize a structure which is used for recording a single task's IO statistics as 0.
acct_clear_integrals(p);
/*
* task_struct (stime+utime) 0, 0, 0
*/
posix_cpu_timers_init(p); //Initialize POSIX timer handling for a single task.
do_posix_clock_monotonic_gettime(&p->start_time);
/*
*task_struct start_time mononic time, 1970 , 。
*/
p->real_start_time = p->start_time; //set the clock when the process appear
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
if (clone_flags & CLONE_THREAD)
threadgroup_fork_read_lock(current);//The threadgroup_fork_lock prevents threads from forking with CLONE_THREAD while held for writing.
cgroup_fork(p); //attach newly forked task to its parents cgroup.
/*
* , dup_task_struct , , :
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
* it was not made under the protection of RCU or cgroup_mutex, so
* might no longer be a valid cgroup pointer. cgroup_attach_task() might
* have already changed current->cgroups, allowing the previously
* referenced cgroup group to be removed and freed.
*/
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p); // Initialize the scheduling-related fields of the new process, set up data structures (this is rather straightforward), and determine the dynamic priority of the process
/*
*void sched_fork(struct task_struct *p)
*{
* unsigned long flags;
* int cpu = get_cpu(); // , CPU ID。
*
* __sched_fork(p); // Perform scheduler related setup for a newly forked process p.p is forked by current.;
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
/*
static void __sched_fork(struct task_struct *p)
{
p->on_rq = 0; //on_rq denotes whether the entity is currently scheduled on a run queue or not.
p->se.on_rq = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node);
// , ( Linux 93 )
........
}
*/
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING; // 。
/*
* Revert to default priority/policy on fork if requested.
*///
if (unlikely(p->sched_reset_on_fork)) {
if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
p->policy = SCHED_NORMAL;
p->normal_prio = p->static_prio;
}
if (PRIO_TO_NICE(p->static_prio) < 0) {
p->static_prio = NICE_TO_PRIO(0);
p->normal_prio = p->static_prio;
set_load_weight(p);
}
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
*/
p->sched_reset_on_fork = 0;
}
/*
* Make sure we do not leak PI(Priority Inheritance) boosting priority to the child.
*/
p->prio = current->normal_prio;
/*
* task_struct static_prio/normal_prio/prio, ,
* Linux 94 。 , 。
*/
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
/*
* sched_class, Linux 89 。
*/
/*
* The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork()
* is ran before sched_fork().
*
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
set_task_cpu(p, cpu); // CPU , , , 。
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP)
p->on_cpu = 0; // CPU 。
#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1; // 。
#endif
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
#endif
put_cpu();
}
*/
retval = perf_event_init_task(p); //??????????????????????????
if (retval)goto bad_fork_cleanup_policy;if ((retval = audit_alloc(p))) // , Linux 19
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p))) //copy_semundo uses the System V semaphores of the parent process if COPY_SYSVSEM is set
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))// if(clone_flags & CLONE_FILES),child process will share its parent's files or allocate a new files structure and copy contents from its parent's in files structure.
goto bad_fork_cleanup_semundo;
/*
CLONE_FILES , , +1
, , (file_struct) , files .
, , 。
*/
if ((retval = copy_fs(clone_flags, p)))goto bad_fork_cleanup_files;
/*
copy_files
*/
if ((retval = copy_sighand(clone_flags, p)))goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))goto bad_fork_cleanup_sighand;
/*
,
*/
if ((retval = copy_mm(clone_flags, p)))goto bad_fork_cleanup_signal;
/*
* copy_mm mm , , 。
* , active_mm mm :
* , , mm = NULl, active_mm = oldmm ( mm); mm active_mm
* mm active_mm , (task_struct )。
*/
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
/*
* namespace Linux 2.3.2 *********************************8
*/
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); // regs , fork() clone() ( eax ) 0;
/*
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
int err;
struct pt_regs *childregs;
struct task_struct *me = current;
childregs = ((struct pt_regs *)task_pt_regs(p); //childregs thread_info ???
/*
#define KSTK_TOP(info) \
({ \
unsigned long *__ptr = (unsigned long *)(info); \
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
__regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
__regs__ - 1; \
})
(8K - 8- sizeof(struct pt_regs)) ; -8 ?
*/
*childregs = *regs; // regs sys_fork(regs) 。 UNIX ,fork() , sys_fork(regs) ?
childregs->ax = 0; // pt_regs ax 0, , 。 , 0; , ID
if (user_mode(regs))
childregs->sp = sp;
, sp do_fork stack_start, 0.
。*********************************
else
childregs->sp = (unsigned long)childregs;
, , sp 。
p->thread.sp = (unsigned long) childregs;// childregs??
p->thread.sp0 = (unsigned long) (childregs+1);// childregs 。 ????
p->thread.usersp = me->thread.usersp;
set_tsk_thread_flag(p, TIF_FORK);// thread_info flags ,TIF_FORK fork 。
p->thread.io_bitmap_ptr = NULL;
gs,fs es,ds ? ?
Linux , OK 。
savesegment(gs, p->thread.gsindex); //gs
p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs; //thread.gs [gs]*****************
savesegment(fs, p->thread.fsindex);
p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
err = -ENOMEM;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
IO_BITMAP_BYTES);
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}
Set a new TLS(thread level storage) for the child thread?
TLS http://en.wikipedia.org/wiki/Thread-local_storage
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_IA32_EMULATION
if (test_thread_flag(TIF_IA32))
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
else
#endif
err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
if (err)
goto out;
}
err = 0;
out:
if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
return err;
}
*/
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid)
{
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns); //allocate pid.
if (!pid)
goto bad_fork_cleanup_io;
}
p->pid = pid_nr(pid);//get a new global pid No. But I don't know how?... PID, , namespace
p->tgid = p->pid; //thread group id = pid ID ID
if (clone_flags & CLONE_THREAD) //
p->tgid = current->tgid;
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; //set_child_tid???
/* * Clear TID on mm_release()? */
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/* * sigaltstack should be cleared when sharing the same VM */
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0; // Linux 。 , 。
/* Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */
user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); // TIF_SYSCALL_TRACE , ret_from_fork 。
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);/* ok, now we should be set up.. */
/*
( )
*/
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); // task_struct exit_signal
p->pdeath_signal = 0; // ,
p->exit_state = 0;
/* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if * necessary. We need to run them before the task is visible * on the tasklist. */
cgroup_fork_callbacks(p); //about cgroup, pleaese refer to Documenttation/cgroups/cgroups.txt OR http://blog.chinaunix.net/space.php?uid=20543183&do=blog&id=1930840&page=1#comment
cgroup_callbacks_done = 1;/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
{
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
}
else
{
p->real_parent = current; //****
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
/* * Process group and session signals need to be delivered to just the * parent before the fork or both the parent and the child after the * fork. Restart if a signal comes in before we add the new process to * it's process group. * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */
recalc_sigpending();
if (signal_pending(current))
{
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD)
{
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid))
{
tracehook_finish_clone(p, clone_flags, trace); //new child created and being attached
if (thread_group_leader(p))
{
if (is_child_reaper(pid))
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__this_cpu_inc(process_counts);
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD)
threadgroup_fork_read_unlock(current);
perf_event_fork(p);
return p;
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
/*
, 。
*/
}
,copy_process ! , !
, 。