プロセススケジューリングAPIのset_cpus_allowed_ptr


int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
           struct task_struct *p       cpu  
        :
 kernel  main   
	/*
	 * init can run on any cpu.
	 */
	set_cpus_allowed_ptr(current, cpu_all_mask);
        init      cpu 
       :
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
	return __set_cpus_allowed_ptr(p, new_mask, false);
}
        check false
static int __set_cpus_allowed_ptr(struct task_struct *p,
				  const struct cpumask *new_mask, bool check)
{
	const struct cpumask *cpu_valid_mask = cpu_active_mask;
	unsigned int dest_cpu;
	struct rq_flags rf;
	struct rq *rq;
	int ret = 0;
#  task      rq,running queue
	rq = task_rq_lock(p, &rf);
	update_rq_clock(rq);
#  flag PF_KTHREAD       task  kernel space       user space   
#kernel thread           online cpu  
	if (p->flags & PF_KTHREAD) {
		/*
		 * Kernel threads are allowed on online && !active CPUs
		 */
		cpu_valid_mask = cpu_online_mask;
	}

	/*
	 * Must re-check here, to close a race against __kthread_bind(),
	 * sched_setaffinity() is not guaranteed to observe the flag.
	 */
#  thread        cpu  ,     
	if (check && (p->flags & PF_NO_SETAFFINITY)) {
		ret = -EINVAL;
		goto out;
	}
#    thread      cpu         cpu  ,           ,    .
	if (cpumask_equal(&p->cpus_allowed, new_mask))
		goto out;

	if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
		ret = -EINVAL;
		goto out;
	}
# new_mask    task cpus_allowed  
	do_set_cpus_allowed(p, new_mask);

	if (p->flags & PF_KTHREAD) {
		/*
		 * For kernel threads that do indeed end up on online &&
		 * !active we want to ensure they are strict per-CPU threads.
		 */
		WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
			!cpumask_intersects(new_mask, cpu_active_mask) &&
			p->nr_cpus_allowed != 1);
	}

	/* Can the task run on the task's current CPU? If so, we're done */
#  task     task      cpu ,       
	if (cpumask_test_cpu(task_cpu(p), new_mask))
		goto out;
#  ,task      task      cpu ,   new_mask     task        cpu
#   task wakeup  sleep     , task    new_mask         cpu 
	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
	if (task_running(rq, p) || p->state == TASK_WAKING) {
		struct migration_arg arg = { p, dest_cpu };
		/* Need help from migration thread: drop lock and wait. */
#   case ,    cpu     ,   stop_one_cpu            cpu 
		task_rq_unlock(rq, p, &rf);
		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
		tlb_migrate_finish(p->mm);
		return 0;
	} else if (task_on_rq_queued(p)) {
		/*
		 * OK, since we're going to drop the lock immediately
		 * afterwards anyway.
		 */
#   case ,  task   sleep   ,     ,    case      task      .
		rq = move_queued_task(rq, &rf, p, dest_cpu);
	}
out:
	task_rq_unlock(rq, p, &rf);

	return ret;
}