find_energy_efficient_cpu

348 阅读1分钟

select_task_rq_fair->find_energy_efficient_cpu select_task_rq_fair调用位置:

  1. try_to_wake_up
wake_up_process->try_to_wake_up
  1. wake_up_new_task
  2. sched_exec
static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
{
	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
	int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
	unsigned long max_spare_cap_ls = 0, target_cap;
	unsigned long cpu_cap, util, base_energy = 0;
	bool boosted, latency_sensitive = false;
	unsigned int min_exit_lat = UINT_MAX;
	int cpu, best_energy_cpu = prev_cpu;
	struct cpuidle_state *idle;
	struct sched_domain *sd;
	struct perf_domain *pd;
	int new_cpu = INT_MAX;

	sync_entity_load_avg(&p->se);
	trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
	if (new_cpu != INT_MAX)
		return new_cpu;

	rcu_read_lock();
	pd = rcu_dereference(rd->pd);
	if (!pd || READ_ONCE(rd->overutilized))
		goto fail;

	cpu = smp_processor_id();
	if (sync && cpu_rq(cpu)->nr_running == 1 &&
	    cpumask_test_cpu(cpu, p->cpus_ptr) &&
	    task_fits_capacity(p, capacity_of(cpu))) {
		rcu_read_unlock();
		return cpu;
	}

	/*
	 * Energy-aware wake-up happens on the lowest sched_domain starting
	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
	 */
	sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
	while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
		sd = sd->parent;
	if (!sd)
		goto fail;

	if (!task_util_est(p))
		goto unlock;

	latency_sensitive = uclamp_latency_sensitive(p);
	boosted = uclamp_boosted(p);
	target_cap = boosted ? 0 : ULONG_MAX;

	for (; pd; pd = pd->next) {
		unsigned long cur_delta, spare_cap, max_spare_cap = 0;
		unsigned long base_energy_pd;
		int max_spare_cap_cpu = -1;

		/* Compute the 'base' energy of the pd, without @p */
		base_energy_pd = compute_energy(p, -1, pd);
		base_energy += base_energy_pd;

		for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
			if (!cpumask_test_cpu(cpu, p->cpus_ptr))
				continue;

			util = cpu_util_next(cpu, p, cpu);
			cpu_cap = capacity_of(cpu);
			spare_cap = cpu_cap;
			lsub_positive(&spare_cap, util);

			/*
			 * Skip CPUs that cannot satisfy the capacity request.
			 * IOW, placing the task there would make the CPU
			 * overutilized. Take uclamp into account to see how
			 * much capacity we can get out of the CPU; this is
			 * aligned with schedutil_cpu_util().
			 */
			util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
			if (!fits_capacity(util, cpu_cap))
				continue;

			/* Always use prev_cpu as a candidate. */
			if (!latency_sensitive && cpu == prev_cpu) {
				prev_delta = compute_energy(p, prev_cpu, pd);
				prev_delta -= base_energy_pd;
				best_delta = min(best_delta, prev_delta);
			}

			/*
			 * Find the CPU with the maximum spare capacity in
			 * the performance domain
			 */
			if (spare_cap > max_spare_cap) {
				max_spare_cap = spare_cap;
				max_spare_cap_cpu = cpu;
			}

			if (!latency_sensitive)
				continue;

			if (idle_cpu(cpu)) {
				cpu_cap = capacity_orig_of(cpu);
				if (boosted && cpu_cap < target_cap)
					continue;
				if (!boosted && cpu_cap > target_cap)
					continue;
				idle = idle_get_state(cpu_rq(cpu));
				if (idle && idle->exit_latency > min_exit_lat &&
						cpu_cap == target_cap)
					continue;

				if (idle)
					min_exit_lat = idle->exit_latency;
				target_cap = cpu_cap;
				best_idle_cpu = cpu;
			} else if (spare_cap > max_spare_cap_ls) {
				max_spare_cap_ls = spare_cap;
				max_spare_cap_cpu_ls = cpu;
			}
		}

		/* Evaluate the energy impact of using this CPU. */
		if (!latency_sensitive && max_spare_cap_cpu >= 0 &&
						max_spare_cap_cpu != prev_cpu) {
			cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
			cur_delta -= base_energy_pd;
			if (cur_delta < best_delta) {
				best_delta = cur_delta;
				best_energy_cpu = max_spare_cap_cpu;
			}
		}
	}
unlock:
	rcu_read_unlock();

	if (latency_sensitive)
		return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;

	/*
	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
	 * least 6% of the energy used by prev_cpu.
	 */
	if (prev_delta == ULONG_MAX)
		return best_energy_cpu;

	if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4))
		return best_energy_cpu;

	return prev_cpu;

fail:
	rcu_read_unlock();

	return -1;
}