当前位置: 老葡京网站娱乐 > 操作系统 > Unix > 正文

kernel学习:copy_process

时间:2016-02-07

老葡京网站娱乐 www.sdguanhua.com 在do_fork中调用了copy_process,该函数及其重要。该函数创建进程描述符和子进程需要的其他数据结构。它定义在linux2.6.xxx/kernel/fork.c。

只对关键部分进行了注释如下:

/* 
 * This creates a new process as a copy of the old one, 
 * but does not actually start it yet. 
 * 
 * It copies the registers, and all the appropriate 
 * parts of the process environment (as per the clone 
 * flags). The actual kick-off is left to the caller. 
 */
       
       
 /*这部分代码是在2.6.38中实现的*/
static struct task_struct *copy_process(unsigned long clone_flags,  
                    unsigned long stack_start,  
                    struct pt_regs *regs,  
                    unsigned long stack_size,  
                    int __user *child_tidptr,  
                    struct pid *pid,  
                    int trace)  
{  
    int retval;  
    struct task_struct *p;//保存新的进程描述符地址  
    int cgroup_callbacks_done = 0;  
    /*CLONE_NEWNS和CLONE_FS是冲突的不能同时设置,否则出错*/
    if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))  
        return ERR_PTR(-EINVAL);  
      
    /* 
     * Thread groups must share signals as well, and detached threads 
     * can only be started up within the thread group. 
     */
     /*CLONE_THREAD和CLONE_SIGHAND是冲突的不能同时设置,否则出错*/
    if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))  
        return ERR_PTR(-EINVAL);  
      
    /* 
     * Shared signal handlers imply shared VM. By way of the above, 
     * thread groups also imply shared VM. Blocking this case allows 
     * for various simplifications in other code. 
     */
     /*CLONE_SIGHAND和CLONE_VM冲突不能同时设置。否则出错*/
    if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))  
        return ERR_PTR(-EINVAL);  
      
    /* 
     * Siblings of global init remain as zombies on exit since they are 
     * not reaped by their parent (swapper). To solve this and to avoid 
	 *查看本栏目更多精彩内容:http://www.sdguanhua.com/OS/unix/
     * multi-rooted process trees, prevent global and container-inits 
     * from creating siblings. 
     */
    if ((clone_flags & CLONE_PARENT) &&  
                current->signal->flags & SIGNAL_UNKILLABLE)  
        return ERR_PTR(-EINVAL);  
    /*调用系统安全框架创建进程,在配置内核时没有选择CONFIG_SECURITY,则系统安全框架函数为空*/
    retval = security_task_create(clone_flags);  
    if (retval)  
        goto fork_out;  
      
    retval = -ENOMEM;  
    /*来复制一份当前进程的进程描述符,为子进程描述符做准备 
    *该函数为子进程创建一个新的内核栈,并分配一个新的进程描述符和thread_info结构,然后 
    *把父进程的进程描述符和thread_info拷贝进去。这里是完全拷贝,子进程和父进程的描述符完全 
    *相同。 
    */
    p = dup_task_struct(current);  
    if (!p)  
        goto fork_out;  
      
    ftrace_graph_init_task(p);  
      
    rt_mutex_init_task(p);  
      
#ifdef CONFIG_PROVE_LOCKING  
    DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);  
    DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);  
#endif  
      
    /*判断是否超出了设置权限*/
    retval = -EAGAIN;  
    if (atomic_read(&p->real_cred->user->processes) >=  
            task_rlimit(p, RLIMIT_NPROC)) {  
        if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&  
            p->real_cred->user != INIT_USER)  
            goto bad_fork_free;  
    }  
      
    retval = copy_creds(p, clone_flags);  
    if (retval < 0)  
        goto bad_fork_free;  
      
    /* 
     * If multiple threads are within copy_process(), then this check 
     * triggers too late. This doesn't hurt, the check is only there 
     * to stop root fork bombs. 
     */
     /*判断线程数量是否超出系统允许范围,否则释放已经申请到的资源*/
    retval = -EAGAIN;  
    if (nr_threads >= max_threads)//max_threads在kernel_fork中的fork_init中有定义。  
                    //系统最大进程数和系统的内存有关  
        goto bad_fork_cleanup_count;  
      
      
    /*下面的代码主要是对子进程的描述符初始化和复制父进程的资源给子进程*/
          
          
    /*??橐眉剖僮?/
    if (!try_module_get(task_thread_info(p)->exec_domain->module))  
        goto bad_fork_cleanup_count;  
    /*execve系统调用数初始化为0*/
    p->did_exec = 0;  
    delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
    /*设置状态标记,因为目前状态表示是从父进程拷贝过来的*/
    copy_flags(clone_flags, p);  
    INIT_LIST_HEAD(&p->children);  
    INIT_LIST_HEAD(&p->sibling);  
    rcu_copy_process(p);  
    p->vfork_done = NULL;  
    spin_lock_init(&p->alloc_lock);  
      
    init_sigpending(&p->pending);  
      
    p->utime = cputime_zero;  
    p->stime = cputime_zero;  
    p->gtime = cputime_zero;  
    p->utimescaled = cputime_zero;  
    p->stimescaled = cputime_zero;  
#ifndef CONFIG_VIRT_CPU_ACCOUNTING  
    p->prev_utime = cputime_zero;  
    p->prev_stime = cputime_zero;  
#endif  
#if defined(SPLIT_RSS_COUNTING)  
    memset(&p->rss_stat, 0, sizeof(p->rss_stat));  
#endif  
      
    p->default_timer_slack_ns = current->timer_slack_ns;  
      
    task_io_accounting_init(&p->ioac);  
    acct_clear_integrals(p);  
      
    posix_cpu_timers_init(p);  
      
    p->lock_depth = -1;      /* -1 = no lock */
    do_posix_clock_monotonic_gettime(&p->start_time);  
    p->real_start_time = p->start_time;  
    monotonic_to_bootbased(&p->real_start_time);  
    p->io_context = NULL;  
    p->audit_context = NULL;  
    cgroup_fork(p);  
#ifdef CONFIG_NUMA  
    p->mempolicy = mpol_dup(p->mempolicy);  
    if (IS_ERR(p->mempolicy)) {  
        retval = PTR_ERR(p->mempolicy);  
        p->mempolicy = NULL;  
        goto bad_fork_cleanup_cgroup;  
    }  
    mpol_fix_fork_child_flag(p);  
#endif  
#ifdef CONFIG_TRACE_IRQFLAGS  
    p->irq_events = 0;  
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW  
    p->hardirqs_enabled = 1;  
#else  
    p->hardirqs_enabled = 0;  
#endif  
    p->hardirq_enable_ip = 0;  
    p->hardirq_enable_event = 0;  
    p->hardirq_disable_ip = _THIS_IP_;  
    p->hardirq_disable_event = 0;  
    p->softirqs_enabled = 1;  
    p->softirq_enable_ip = _THIS_IP_;  
    p->softirq_enable_event = 0;  
    p->softirq_disable_ip = 0;  
    p->softirq_disable_event = 0;  
    p->hardirq_context = 0;  
    p->softirq_context = 0;  
#endif  
#ifdef CONFIG_LOCKDEP  
    p->lockdep_depth = 0; /* no locks held yet */
    p->curr_chain_key = 0;  
    p->lockdep_recursion = 0;  
#endif  
      
#ifdef CONFIG_DEBUG_MUTEXES  
    p->blocked_on = NULL; /* not blocked yet */
#endif  
#ifdef CONFIG_CGROUP_MEM_RES_CTLR  
    p->memcg_batch.do_batch = 0;  
    p->memcg_batch.memcg = NULL;  
#endif  
      
    /* Perform scheduler related setup. Assign this task to a CPU. */
    sched_fork(p, clone_flags);  
      
    retval = perf_event_init_task(p);  
    if (retval)  
        goto bad_fork_cleanup_policy;  
      
    if ((retval = audit_alloc(p)))  
        goto bad_fork_cleanup_policy;  
    /* copy all the process information */
    if ((retval = copy_semundo(clone_flags, p)))  
        goto bad_fork_cleanup_audit;  
    if ((retval = copy_files(clone_flags, p)))  
        goto bad_fork_cleanup_semundo;  
    if ((retval = copy_fs(clone_flags, p)))  
        goto bad_fork_cleanup_files;  
    if ((retval = copy_sighand(clone_flags, p)))  
        goto bad_fork_cleanup_fs;  
    if ((retval = copy_signal(clone_flags, p)))  
        goto bad_fork_cleanup_sighand;  
    if ((retval = copy_mm(clone_flags, p)))  
        goto bad_fork_cleanup_signal;  
    if ((retval = copy_namespaces(clone_flags, p)))  
        goto bad_fork_cleanup_mm;  
    if ((retval = copy_io(clone_flags, p)))  
        goto bad_fork_cleanup_namespaces;  
    retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);  
    if (retval)  
        goto bad_fork_cleanup_io;  
      
    if (pid != &init_struct_pid) {  
        retval = -ENOMEM;  
        pid = alloc_pid(p->nsproxy->pid_ns);  
        if (!pid)  
            goto bad_fork_cleanup_io;  
      
        if (clone_flags & CLONE_NEWPID) {  
            retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);  
            if (retval < 0)  
                goto bad_fork_free_pid;  
        }  
    }  
      
    p->pid = pid_nr(pid);  
    p->tgid = p->pid;  
    if (clone_flags & CLONE_THREAD)  
        p->tgid = current->tgid;  
      
    if (current->nsproxy != p->nsproxy) {  
        retval = ns_cgroup_clone(p, pid);  
        if (retval)  
            goto bad_fork_free_pid;  
    }  
      
    p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;  
    /* 
     * Clear TID on mm_release()? 
     */
    p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr:   
NULL;  
#ifdef CONFIG_FUTEX  
    p->robust_list = NULL;  
#ifdef CONFIG_COMPAT  
    p->compat_robust_list = NULL;  
#endif  
    INIT_LIST_HEAD(&p->pi_state_list);  
    p->pi_state_cache = NULL;  
#endif  
    /* 
     * sigaltstack should be cleared when sharing the same VM 
     */
    if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)  
        p->sas_ss_sp = p->sas_ss_size = 0;  
      
    /* 
     * Syscall tracing and stepping should be turned off in the 
     * child regardless of CLONE_PTRACE. 
     */
    user_disable_single_step(p);  
    clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);  
#ifdef TIF_SYSCALL_EMU  
    clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);  
#endif  
    clear_all_latency_tracing(p);  
      
    /* ok, now we should be set up.. */
    p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);  
    p->pdeath_signal = 0;  
    p->exit_state = 0;  
      
    /* 
     * Ok, make it visible to the rest of the system. 
     * We dont wake it up yet. 
     */
    p->group_leader = p;  
    INIT_LIST_HEAD(&p->thread_group);  
      
    /* Now that the task is set up, run cgroup callbacks if 
     * necessary. We need to run them before the task is visible 
     * on the tasklist. */
    cgroup_fork_callbacks(p);  
    cgroup_callbacks_done = 1;  
      
    /* Need tasklist lock for parent etc handling! */
    write_lock_irq(&tasklist_lock);  
      
    /* CLONE_PARENT re-uses the old parent */
    if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {  
        p->real_parent = current->real_parent;  
        p->parent_exec_id = current->parent_exec_id;  
    } else {  
        p->real_parent = current;  
        p->parent_exec_id = current->self_exec_id;  
    }  
      
    spin_lock(¤t->sighand->siglock);  
      
    /* 
     * Process group and session signals need to be delivered to just the 
     * parent before the fork or both the parent and the child after the 
     * fork. Restart if a signal comes in before we add the new process to 
     * it's process group. 
     * A fatal signal pending means that current will exit, so the new 
     * thread can't slip out of an OOM kill (or normal SIGKILL). 
     */
    recalc_sigpending();  
    if (signal_pending(current)) {  
        spin_unlock(¤t->sighand->siglock);  
        write_unlock_irq(&tasklist_lock);  
        retval = -ERESTARTNOINTR;  
        goto bad_fork_free_pid;  
    }  
      
    if (clone_flags & CLONE_THREAD) {  
        current->signal->nr_threads++;  
        atomic_inc(¤t->signal->live);  
        atomic_inc(¤t->signal->sigcnt);  
        p->group_leader = current->group_leader;  
        list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);  
    }  
      
    if (likely(p->pid)) {  
        tracehook_finish_clone(p, clone_flags, trace);  
      
        if (thread_group_leader(p)) {  
            if (clone_flags & CLONE_NEWPID)  
                p->nsproxy->pid_ns->child_reaper = p;  
      
            p->signal->leader_pid = pid;  
            p->signal->tty = tty_kref_get(current->signal->tty);  
            attach_pid(p, PIDTYPE_PGID, task_pgrp(current));  
            attach_pid(p, PIDTYPE_SID, task_session(current));  
            list_add_tail(&p->sibling, &p->real_parent->children);  
            list_add_tail_rcu(&p->tasks, &init_task.tasks);  
            __get_cpu_var(process_counts)++;  
        }  
        attach_pid(p, PIDTYPE_PID, pid);  
        nr_threads++;  
    }  
      
    total_forks++;  
    spin_unlock(¤t->sighand->siglock);  
    write_unlock_irq(&tasklist_lock);  
    proc_fork_connector(p);  
    cgroup_post_fork(p);  
    perf_event_fork(p);  
    /*返回子进程描述符的指针*/
    return p;  
      
bad_fork_free_pid:  
    if (pid != &init_struct_pid)  
        free_pid(pid);  
bad_fork_cleanup_io:  
    if (p->io_context)  
        exit_io_context(p);  
bad_fork_cleanup_namespaces:  
    exit_task_namespaces(p);  
bad_fork_cleanup_mm:  
    if (p->mm) {  
        task_lock(p);  
        if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)  
            atomic_dec(&p->mm->oom_disable_count);  
        task_unlock(p);  
        mmput(p->mm);  
    }  
bad_fork_cleanup_signal:  
    if (!(clone_flags & CLONE_THREAD))  
        free_signal_struct(p->signal);  
bad_fork_cleanup_sighand:  
    __cleanup_sighand(p->sighand);  
bad_fork_cleanup_fs:  
    exit_fs(p); /* blocking */
bad_fork_cleanup_files:  
    exit_files(p); /* blocking */
bad_fork_cleanup_semundo:  
    exit_sem(p);  
bad_fork_cleanup_audit:  
    audit_free(p);  
bad_fork_cleanup_policy:  
    perf_event_free_task(p);  
#ifdef CONFIG_NUMA  
    mpol_put(p->mempolicy);  
bad_fork_cleanup_cgroup:  
#endif  
    cgroup_exit(p, cgroup_callbacks_done);  
    delayacct_tsk_free(p);  
    module_put(task_thread_info(p)->exec_domain->module);  
bad_fork_cleanup_count:  
    atomic_dec(&p->cred->user->processes);  
    exit_creds(p);  
bad_fork_free:  
    free_task(p);  
fork_out:  
    return ERR_PTR(retval);  
}