From 6b91ee436af4dea51b99748094d0ecdffd369504 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Tue, 14 Feb 2017 23:45:01 +0800 Subject: [PATCH] kern: Add cpucounter which returns 64bit monotonic counter. It will be used to: - Simplify per-cpu raw time extraction. - ALTQ machclk. - Per packet timestamp for CoDel. As of this commit, dummy cpucounter, which falls back to cputimer, and TSC cpucounter are implemented. --- sys/kern/kern_cputimer.c | 58 +++++++++++++++++++++++++++++++++++++++++++ sys/platform/pc64/isa/clock.c | 43 +++++++++++++++++++++++++++++++- sys/sys/systimer.h | 38 ++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 1 deletion(-) diff --git a/sys/kern/kern_cputimer.c b/sys/kern/kern_cputimer.c index 3e0b716e48..db65322025 100644 --- a/sys/kern/kern_cputimer.c +++ b/sys/kern/kern_cputimer.c @@ -48,6 +48,8 @@ extern void pcpu_timer_process(void); extern void pcpu_timer_process_frame(struct intrframe *); +static uint64_t dummy_cpucounter_count(void); + static sysclock_t dummy_cputimer_count(void); static struct cputimer dummy_cputimer = { @@ -65,9 +67,20 @@ static struct cputimer dummy_cputimer = { .freq64_nsec = (1000000000LL << 32) / 1000000 }; +static struct cpucounter dummy_cpucounter = { + .freq = 1000000ULL, + .count = dummy_cpucounter_count, + .flags = CPUCOUNTER_FLAG_MPSYNC, + .prio = CPUCOUNTER_PRIO_DUMMY, + .type = CPUCOUNTER_DUMMY +}; + struct cputimer *sys_cputimer = &dummy_cputimer; SLIST_HEAD(, cputimer) cputimerhead = SLIST_HEAD_INITIALIZER(&cputimerhead); +static SLIST_HEAD(, cpucounter) cpucounterhead = + SLIST_HEAD_INITIALIZER(cpucounterhead); + static int cputimer_intr_ps_reqs; static struct lwkt_serialize cputimer_intr_ps_slize = LWKT_SERIALIZE_INITIALIZER; @@ -609,3 +622,48 @@ pcpu_timer_process_frame(struct intrframe *frame) { pcpu_timer_process_oncpu(mycpu, frame); } + +static uint64_t +dummy_cpucounter_count(void) +{ + struct timeval tv; + + microuptime(&tv); + return ((tv.tv_sec * 1000000ULL) + tv.tv_usec); +} + +const struct cpucounter * +cpucounter_find_pcpu(void) +{ + const struct cpucounter *cc, *ret; + + ret = &dummy_cpucounter; + SLIST_FOREACH(cc, &cpucounterhead, link) { + if (cc->prio > ret->prio) + ret = cc; + } + return (ret); +} + +const struct cpucounter * +cpucounter_find(void) +{ + const struct cpucounter *cc, *ret; + + ret = &dummy_cpucounter; + SLIST_FOREACH(cc, &cpucounterhead, link) { + if ((cc->flags & CPUCOUNTER_FLAG_MPSYNC) && + cc->prio > ret->prio) + ret = cc; + } + KASSERT(ret->flags & CPUCOUNTER_FLAG_MPSYNC, + ("cpucounter %u is not MPsync", ret->type)); + return (ret); +} + +void +cpucounter_register(struct cpucounter *cc) +{ + + SLIST_INSERT_HEAD(&cpucounterhead, cc, link); +} diff --git a/sys/platform/pc64/isa/clock.c b/sys/platform/pc64/isa/clock.c index 4c3d6439d2..435a27f96c 100644 --- a/sys/platform/pc64/isa/clock.c +++ b/sys/platform/pc64/isa/clock.c @@ -163,6 +163,14 @@ static struct cputimer tsc_cputimer = { .freq = 0 /* determined later */ }; +static struct cpucounter tsc_cpucounter = { + .freq = 0, /* determined later */ + .count = NULL, /* determined later */ + .flags = 0, /* adjusted later */ + .prio = CPUCOUNTER_PRIO_TSC, + .type = CPUCOUNTER_TSC +}; + static void i8254_intr_reload(struct cputimer_intr *, sysclock_t); static void i8254_intr_config(struct cputimer_intr *, const struct cputimer *); static void i8254_intr_initclock(struct cputimer_intr *, boolean_t); @@ -1405,14 +1413,35 @@ tsc_cputimer_count_mfence(void) return tsc_cputimer_count(); } +static uint64_t +tsc_cpucounter_count_lfence(void) +{ + + cpu_lfence(); + return (rdtsc()); +} + +static uint64_t +tsc_cpucounter_count_mfence(void) +{ + + cpu_mfence(); + return (rdtsc()); +} + static void tsc_cputimer_register(void) { uint64_t freq; int enable = 1; - if (!tsc_mpsync) + if (!tsc_mpsync) { + if (tsc_invariant) { + /* Per-cpu cpucounter still works. */ + goto regcnt; + } return; + } TUNABLE_INT_FETCH("hw.tsc_cputimer_enable", &enable); if (!enable) @@ -1435,6 +1464,18 @@ tsc_cputimer_register(void) cputimer_register(&tsc_cputimer); cputimer_select(&tsc_cputimer, 0); + + tsc_cpucounter.flags |= CPUCOUNTER_FLAG_MPSYNC; +regcnt: + tsc_cpucounter.freq = tsc_frequency; + if (cpu_vendor_id == CPU_VENDOR_INTEL) { + tsc_cpucounter.count = + tsc_cpucounter_count_lfence; + } else { + tsc_cpucounter.count = + tsc_cpucounter_count_mfence; /* safe bet */ + } + cpucounter_register(&tsc_cpucounter); } SYSINIT(tsc_cputimer_reg, SI_BOOT2_POST_SMP, SI_ORDER_FIRST, tsc_cputimer_register, NULL); diff --git a/sys/sys/systimer.h b/sys/sys/systimer.h index dbbb80bdf3..85c50883de 100644 --- a/sys/sys/systimer.h +++ b/sys/sys/systimer.h @@ -270,6 +270,44 @@ int cputimer_intr_select_caps(uint32_t); int cputimer_intr_powersave_addreq(void); void cputimer_intr_powersave_remreq(void); +/* + * The cpucounter interface. + * + * REQUIREMENT FOR CPUCOUNTER IMPLEMENTATION: + * + * - The values returned by count() must be MP synchronized, if + * CPUCOUNTER_FLAG_MPSYNC is set on 'flags'. + * - The values returned by count() must be stable under all situation, + * e.g. when the platform enters power saving mode. + * - The values returned by count() must be monotonically increasing. + */ +struct cpucounter { + uint64_t freq; + uint64_t (*count)(void); + uint16_t flags; /* CPUCOUNTER_FLAG_ */ + uint16_t prio; /* CPUCOUNTER_PRIO_ */ + uint16_t type; /* CPUCOUNTER_ */ + uint16_t reserved; + SLIST_ENTRY(cpucounter) link; +} __cachealign; + +#define CPUCOUNTER_FLAG_MPSYNC 0x0001 + +#define CPUCOUNTER_DUMMY 0 +#define CPUCOUNTER_TSC 1 +#define CPUCOUNTER_VMM 2 +#define CPUCOUNTER_VMM1 3 +#define CPUCOUNTER_VMM2 4 + +#define CPUCOUNTER_PRIO_DUMMY 0 +#define CPUCOUNTER_PRIO_TSC 50 +#define CPUCOUNTER_PRIO_VMM 100 +#define CPUCOUNTER_PRIO_VMM_HI 150 + +void cpucounter_register(struct cpucounter *); +const struct cpucounter *cpucounter_find_pcpu(void); +const struct cpucounter *cpucounter_find(void); + #endif /* _KERNEL || _KERNEL_STRUCTURES */ #endif /* !_SYS_SYSTIMER_H_ */ -- 2.11.4.GIT