待ち行列の内部実現について話し合う(二)

6289 ワード

待ち行列の内部実現について話し合う(一)
http://blog.csdn.net/yyttiao/article/details/7875871
上に述べました。追加と待機をお願いします。今回はどうやって起こしますか?
#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL)


#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
目覚ましは主に以上の関数です。実は全部同じです。今日は主にwake_を分析します。up_interruptibleという関数は、前の章で対応しています。
この章は主に流れとコードの注釈を見ます。説明会が少ないです
#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
/**
 * __wake_up - wake up threads blocked on a waitqueue.
 * @q: the waitqueue
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 * @key: is directly passed to the wakeup function
 *
 * It may be assumed that this function implies a write memory barrier before
 * changing the task state if and only if any tasks are woken up.
 */
void __wake_up(wait_queue_head_t *q, unsigned int mode,
			int nr_exclusive, void *key)
{
	unsigned long flags;
	/*   wait_queue_head_t    ,     ,       */
	spin_lock_irqsave(&q->lock, flags);
	__wake_up_common(q, mode, nr_exclusive, 0, key);
	spin_unlock_irqrestore(&q->lock, flags);
}
/*
 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 * number) then we wake all the non-exclusive tasks and one exclusive task.
 *
 * There are circumstances in which we can try to wake a task which has already
 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 */
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
				int nr_exclusive, int wake_flags, void *key)
{
	wait_queue_t *curr, *next;


	list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
		unsigned flags = curr->flags;
		/*      default_wake_function    ,          */
		if (curr->func(curr, mode, wake_flags, key) &&
				(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
			break;
	}
}
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
			  void *key)
{
	return try_to_wake_up(curr->private, mode, wake_flags);
}


/**
 * try_to_wake_up - wake up a thread
 * @p: the thread to be awakened
 * @state: the mask of task states that can be woken
 * @wake_flags: wake modifier flags (WF_*)
 *
 * Put it on the run-queue if it's not already there. The "current"
 * thread is always on the run-queue (except when the actual
 * re-schedule is in progress), and as such you're allowed to do
 * the simpler "current->state = TASK_RUNNING" to mark yourself
 * runnable without the overhead of this.
 *
 * Returns %true if @p was woken up, %false if it was already running
 * or @state didn't match @p's state.
 */
static int try_to_wake_up(struct task_struct *p, unsigned int state,
			  int wake_flags)
{
	int cpu, orig_cpu, this_cpu, success = 0;
	unsigned long flags;
	unsigned long en_flags = ENQUEUE_WAKEUP;
	struct rq *rq;
	/*       ,    cpu   */
	this_cpu = get_cpu();
	/*         */
	smp_wmb();
	/*           run_queue and lock it */
	rq = task_rq_lock(p, &flags);
	/*      ,      */
	if (!(p->state & state))
		goto out;


	if (p->se.on_rq)
		goto out_running;
	/*           CPU */
	cpu = task_cpu(p);
	/* save origin cpu */
	orig_cpu = cpu;


	/* support smp           smp      
	 *                CPU      ,    
	 *                    。       
	 */
#ifdef CONFIG_SMP
	if (unlikely(task_running(rq, p)))
		goto out_activate;

	/*
	 * In order to handle concurrent wakeups and release the rq->lock
	 * we put the task in TASK_WAKING state.
	 *
	 * First fix up the nr_uninterruptible count:
	 */
	if (task_contributes_to_load(p)) {
		if (likely(cpu_online(orig_cpu)))
			rq->nr_uninterruptible--;
		else
			this_rq()->nr_uninterruptible--;
	}
	p->state = TASK_WAKING;


	if (p->sched_class->task_waking) {
		p->sched_class->task_waking(rq, p);
		en_flags |= ENQUEUE_WAKING;
	}

	cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
	if (cpu != orig_cpu)
		set_task_cpu(p, cpu);
	__task_rq_unlock(rq);


	rq = cpu_rq(cpu);
	raw_spin_lock(&rq->lock);

	/*
	 * We migrated the task without holding either rq->lock, however
	 * since the task is not on the task list itself, nobody else
	 * will try and migrate the task, hence the rq should match the
	 * cpu we just moved it to.
	 */
	WARN_ON(task_cpu(p) != cpu);
	WARN_ON(p->state != TASK_WAKING);


#ifdef CONFIG_SCHEDSTATS
	schedstat_inc(rq, ttwu_count);
	if (cpu == this_cpu)
		schedstat_inc(rq, ttwu_local);
	else {
		struct sched_domain *sd;
		for_each_domain(this_cpu, sd) {
			if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
				schedstat_inc(sd, ttwu_wake_remote);
				break;
			}
		}
	}
#endif /* CONFIG_SCHEDSTATS */


out_activate:
#endif /* CONFIG_SMP */
	/*    P        rq
	 *     activate_task    q   rq 
     */
	ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
		      cpu == this_cpu, en_flags);
	success = 1;
out_running:
	/*           TASK_RUNING,  cpu          
	                p->state = TASK_RUNNING;
	              ,        
	*/
	ttwu_post_activation(p, rq, wake_flags, success);
out:
	task_rq_unlock(rq, &flags);
	put_cpu();

	return success;
}
総括:待ち行列に対して、実は最も主要なのは任務状態の切り替えで、それをscheduleに取って実行されることができるかどうかです。これらの条件を満たすために設定する方法は、行列を待つ仕組みの原理です。
waitにいますイベントではタスクを非TASK_に設定します。RUNINGはwake_にありますup中将クエスト設定をTASK_に戻します。RUNING
実はwait_が多いです。イベントは直接に呼び出すのではなく、最初の文章のコードのように条件を判断します。ブロックするかどうかを考えるときもあります。ブロックしないでください。必要ではないです。
waitイベントです。もちろん他の方法もあります。人によって違います。
後でscheduleを遊んだ後に説明して、smapの状況を詳しく分析してどのように実行しますか?
ありがとうございます。
待ち行列の内部実現について話し合う(一)
http://blog.csdn.net/yyttiao/article/details/7875871