version 1.28.8.1, 2014/08/20 00:04:29 |
version 1.28.8.2, 2017/12/03 11:38:45 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/* |
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
|
|
* POSSIBILITY OF SUCH DAMAGE. |
* POSSIBILITY OF SUCH DAMAGE. |
*/ |
*/ |
|
|
/*- |
/* |
* Copyright (c) 1982, 1986, 1990, 1991, 1993 |
* Copyright (c) 1982, 1986, 1990, 1991, 1993 |
* The Regents of the University of California. All rights reserved. |
* The Regents of the University of California. All rights reserved. |
* (c) UNIX System Laboratories, Inc. |
* (c) UNIX System Laboratories, Inc. |
Line 80 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 80 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/cpu.h> |
#include <sys/cpu.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/signalvar.h> |
|
#include <sys/resourcevar.h> |
#include <sys/resourcevar.h> |
#include <sys/sched.h> |
#include <sys/sched.h> |
#include <sys/sysctl.h> |
#include <sys/sysctl.h> |
#include <sys/kauth.h> |
|
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
#include <sys/kmem.h> |
|
#include <sys/intr.h> |
#include <sys/intr.h> |
|
|
static void updatepri(struct lwp *); |
static void updatepri(struct lwp *); |
Line 95 static void resetpriority(struct lwp *); |
|
Line 92 static void resetpriority(struct lwp *); |
|
extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */ |
extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */ |
|
|
/* Number of hardclock ticks per sched_tick() */ |
/* Number of hardclock ticks per sched_tick() */ |
static int rrticks; |
static int rrticks __read_mostly; |
|
|
/* |
/* |
* Force switch among equal priority processes every 100ms. |
* Force switch among equal priority processes every 100ms. |
Line 133 sched_tick(struct cpu_info *ci) |
|
Line 130 sched_tick(struct cpu_info *ci) |
|
if (spc->spc_flags & SPCF_SHOULDYIELD) { |
if (spc->spc_flags & SPCF_SHOULDYIELD) { |
/* |
/* |
* Process is stuck in kernel somewhere, probably |
* Process is stuck in kernel somewhere, probably |
* due to buggy or inefficient code. Force a |
* due to buggy or inefficient code. Force a |
* kernel preemption. |
* kernel preemption. |
*/ |
*/ |
cpu_need_resched(ci, RESCHED_KPREEMPT); |
cpu_need_resched(ci, RESCHED_KPREEMPT); |
Line 170 sched_tick(struct cpu_info *ci) |
|
Line 167 sched_tick(struct cpu_info *ci) |
|
#define ESTCPULIM(e) min((e), ESTCPU_MAX) |
#define ESTCPULIM(e) min((e), ESTCPU_MAX) |
|
|
/* |
/* |
* Constants for digital decay and forget: |
* The main parameter used by this algorithm is 'l_estcpu'. It is an estimate |
* 90% of (l_estcpu) usage in 5 * loadav time |
* of the recent CPU utilization of the thread. |
* 95% of (l_pctcpu) usage in 60 seconds (load insensitive) |
* |
* Note that, as ps(1) mentions, this can let percentages |
* l_estcpu is: |
* total over 100% (I've seen 137.9% for 3 processes). |
* - increased each time the hardclock ticks and the thread is found to |
|
* be executing, in sched_schedclock() called from hardclock() |
|
* - decreased (filtered) on each sched tick, in sched_pstats_hook() |
|
* If the lwp is sleeping for more than a second, we don't touch l_estcpu: it |
|
* will be updated in sched_setrunnable() when the lwp wakes up, in burst mode |
|
* (ie, we decrease it n times). |
* |
* |
* Note that hardclock updates l_estcpu and l_cpticks independently. |
* Note that hardclock updates l_estcpu and l_cpticks independently. |
* |
* |
* We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. |
* ----------------------------------------------------------------------------- |
* That is, the system wants to compute a value of decay such |
* |
* that the following for loop: |
* Here we describe how l_estcpu is decreased. |
* for (i = 0; i < (5 * loadavg); i++) |
* |
* l_estcpu *= decay; |
* Constants for digital decay (filter): |
* will compute |
* 90% of l_estcpu usage in (5 * loadavg) seconds |
* l_estcpu *= 0.1; |
* |
* for all values of loadavg: |
* We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. That is, we |
|
* want to compute a value of decay such that the following loop: |
|
* for (i = 0; i < (5 * loadavg); i++) |
|
* l_estcpu *= decay; |
|
* will result in |
|
* l_estcpu *= 0.1; |
|
* for all values of loadavg. |
* |
* |
* Mathematically this loop can be expressed by saying: |
* Mathematically this loop can be expressed by saying: |
* decay ** (5 * loadavg) ~= .1 |
* decay ** (5 * loadavg) ~= .1 |
|
* |
|
* And finally, the corresponding value of decay we're using is: |
|
* decay = (2 * loadavg) / (2 * loadavg + 1) |
* |
* |
* The system computes decay as: |
* ----------------------------------------------------------------------------- |
* decay = (2 * loadavg) / (2 * loadavg + 1) |
|
* |
* |
* We wish to prove that the system's computation of decay |
* Now, let's prove that the value of decay stated above will always fulfill |
* will always fulfill the equation: |
* the equation: |
* decay ** (5 * loadavg) ~= .1 |
* decay ** (5 * loadavg) ~= .1 |
* |
* |
* If we compute b as: |
* If we compute b as: |
* b = 2 * loadavg |
* b = 2 * loadavg |
* then |
* then |
* decay = b / (b + 1) |
* decay = b / (b + 1) |
* |
* |
* We now need to prove two things: |
* We now need to prove two things: |
* 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) |
* 1) Given [factor ** (5 * loadavg) =~ .1], prove [factor == b/(b+1)]. |
* 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) |
* 2) Given [b/(b+1) ** power =~ .1], prove [power == (5 * loadavg)]. |
* |
* |
* Facts: |
* Facts: |
* For x close to zero, exp(x) =~ 1 + x, since |
* * For x real: exp(x) = 0! + x**1/1! + x**2/2! + ... |
* exp(x) = 0! + x**1/1! + x**2/2! + ... . |
* Therefore, for x close to zero, exp(x) =~ 1 + x. |
* therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. |
* In turn, for b large enough, exp(-1/b) =~ 1 - (1/b) = (b-1)/b. |
* For x close to zero, ln(1+x) =~ x, since |
* |
* ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 |
* * For b large enough, (b-1)/b =~ b/(b+1). |
* therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). |
* |
* ln(.1) =~ -2.30 |
* * For x belonging to [-1;1[, ln(1-x) = - x - x**2/2 - x**3/3 - ... |
|
* Therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). |
|
* |
|
* * ln(0.1) =~ -2.30 |
* |
* |
* Proof of (1): |
* Proof of (1): |
* Solve (factor)**(power) =~ .1 given power (5*loadav): |
* factor ** (5 * loadavg) =~ 0.1 |
* solving for factor, |
* => ln(factor) =~ -2.30 / (5 * loadavg) |
* ln(factor) =~ (-2.30/5*loadav), or |
* => factor =~ exp(-1 / ((5 / 2.30) * loadavg)) |
* factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = |
* =~ exp(-1 / (2 * loadavg)) |
* exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED |
* =~ exp(-1 / b) |
|
* =~ (b - 1) / b |
|
* =~ b / (b + 1) |
|
* =~ (2 * loadavg) / ((2 * loadavg) + 1) |
* |
* |
* Proof of (2): |
* Proof of (2): |
* Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): |
* (b / (b + 1)) ** power =~ .1 |
* solving for power, |
* => power * ln(b / (b + 1)) =~ -2.30 |
* power*ln(b/(b+1)) =~ -2.30, or |
* => power * (-1 / (b + 1)) =~ -2.30 |
* power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED |
* => power =~ 2.30 * (b + 1) |
* |
* => power =~ 4.60 * loadavg + 2.30 |
* Actual power values for the implemented algorithm are as follows: |
* => power =~ 5 * loadavg |
* loadav: 1 2 3 4 |
* |
* power: 5.68 10.32 14.94 19.55 |
* Conclusion: decay = (2 * loadavg) / (2 * loadavg + 1) |
*/ |
*/ |
|
|
/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ |
/* See calculations above */ |
#define loadfactor(loadav) (2 * (loadav) / ncpu) |
#define loadfactor(loadavg) (2 * (loadavg)) |
|
|
static fixpt_t |
static fixpt_t |
decay_cpu(fixpt_t loadfac, fixpt_t estcpu) |
decay_cpu(fixpt_t loadfac, fixpt_t estcpu) |
Line 250 decay_cpu(fixpt_t loadfac, fixpt_t estcp |
|
Line 266 decay_cpu(fixpt_t loadfac, fixpt_t estcp |
|
if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) { |
if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) { |
return estcpu * loadfac / (loadfac + FSCALE); |
return estcpu * loadfac / (loadfac + FSCALE); |
} |
} |
#endif /* !defined(_LP64) */ |
#endif |
|
|
return (uint64_t)estcpu * loadfac / (loadfac + FSCALE); |
return (uint64_t)estcpu * loadfac / (loadfac + FSCALE); |
} |
} |
|
|
/* |
|
* For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT), |
|
* sleeping for at least seven times the loadfactor will decay l_estcpu to |
|
* less than (1 << ESTCPU_SHIFT). |
|
* |
|
* note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT). |
|
*/ |
|
static fixpt_t |
static fixpt_t |
decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n) |
decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n) |
{ |
{ |
|
|
|
/* |
|
* For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT), |
|
* if we slept for at least seven times the loadfactor, we will decay |
|
* l_estcpu to less than (1 << ESTCPU_SHIFT), and therefore we can |
|
* return zero directly. |
|
* |
|
* Note that our ESTCPU_MAX is actually much smaller than |
|
* (255 << ESTCPU_SHIFT). |
|
*/ |
if ((n << FSHIFT) >= 7 * loadfac) { |
if ((n << FSHIFT) >= 7 * loadfac) { |
return 0; |
return 0; |
} |
} |
Line 299 sched_pstats_hook(struct lwp *l, int bat |
|
Line 317 sched_pstats_hook(struct lwp *l, int bat |
|
return; |
return; |
} |
} |
} |
} |
loadfac = 2 * (averunnable.ldavg[0]); |
|
|
loadfac = loadfactor(averunnable.ldavg[0]); |
l->l_estcpu = decay_cpu(loadfac, l->l_estcpu); |
l->l_estcpu = decay_cpu(loadfac, l->l_estcpu); |
resetpriority(l); |
resetpriority(l); |
} |
} |
|
|
/* |
/* |
* Recalculate the priority of a process after it has slept for a while. |
* Recalculate the priority of an LWP after it has slept for a while. |
*/ |
*/ |
static void |
static void |
updatepri(struct lwp *l) |
updatepri(struct lwp *l) |
Line 383 resetpriority(struct lwp *l) |
|
Line 402 resetpriority(struct lwp *l) |
|
* is running (linearly), and decays away exponentially, at a rate which is |
* is running (linearly), and decays away exponentially, at a rate which is |
* proportionally slower when the system is busy. The basic principle is |
* proportionally slower when the system is busy. The basic principle is |
* that the system will 90% forget that the process used a lot of CPU time |
* that the system will 90% forget that the process used a lot of CPU time |
* in 5 * loadav seconds. This causes the system to favor processes which |
* in (5 * loadavg) seconds. This causes the system to favor processes which |
* haven't run much recently, and to round-robin among other processes. |
* haven't run much recently, and to round-robin among other processes. |
*/ |
*/ |
|
|
void |
void |
sched_schedclock(struct lwp *l) |
sched_schedclock(struct lwp *l) |
{ |
{ |