From 73b1bfb1db1221c0954ef1a7a18f89760f3b0806 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sat, 7 Jan 2017 23:05:59 -0800 Subject: [PATCH] kernel - Remove unused pmap_list * pmap_list was used in the (now removed) 32-bit pmap code because expansion of the kernel_pmap potentially required updating the PT pages in all existing user pmaps. The 64-bit pmap code does not need to do this so the list can be removed. * Removes a global spin-lock from the pmap creation and release path. Not a major factor for performance but a nice cleanup. --- sys/platform/pc64/x86_64/pmap.c | 19 ++++++------------- sys/platform/vkernel64/platform/pmap.c | 17 +---------------- 2 files changed, 7 insertions(+), 29 deletions(-) diff --git a/sys/platform/pc64/x86_64/pmap.c b/sys/platform/pc64/x86_64/pmap.c index 88da12c98e..4ff921c290 100644 --- a/sys/platform/pc64/x86_64/pmap.c +++ b/sys/platform/pc64/x86_64/pmap.c @@ -152,7 +152,6 @@ static int protection_codes[PROTECTION_CODES_SIZE]; struct pmap kernel_pmap; -static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list); MALLOC_DEFINE(M_OBJPMAP, "objpmap", "pmaps associated with VM objects"); @@ -1695,8 +1694,7 @@ pmap_pinit_defaults(struct pmap *pmap) pmap->suword32 = std_suword32; } /* - * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because - * it, and IdlePTD, represents the template used to update all other pmaps. + * Initialize pmap0/vmspace0. * * On architectures where the kernel pmap is not integrated into the user * process pmap, this pmap represents the process pmap, not the kernel pmap. @@ -1860,17 +1858,11 @@ pmap_puninit(pmap_t pmap) } /* - * Wire in kernel global address entries. To avoid a race condition - * between pmap initialization and pmap_growkernel, this procedure - * adds the pmap to the master list (which growkernel scans to update), - * then copies the template. + * This function is now unused (used to add the pmap to the pmap_list) */ void pmap_pinit2(struct pmap *pmap) { - spin_lock(&pmap_spin); - TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode); - spin_unlock(&pmap_spin); } /* @@ -2376,9 +2368,10 @@ pmap_release(struct pmap *pmap) ("pmap still active! %016jx", (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active))); - spin_lock(&pmap_spin); - TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode); - spin_unlock(&pmap_spin); + /* + * There is no longer a pmap_list, if there were we would remove the + * pmap from it here. + */ /* * Pull pv's off the RB tree in order from low to high and release diff --git a/sys/platform/vkernel64/platform/pmap.c b/sys/platform/vkernel64/platform/pmap.c index 9a2fee1b1d..8ca0cd515a 100644 --- a/sys/platform/vkernel64/platform/pmap.c +++ b/sys/platform/vkernel64/platform/pmap.c @@ -136,7 +136,6 @@ static pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va); static int protection_codes[8]; struct pmap kernel_pmap; -static TAILQ_HEAD(,pmap) pmap_list = TAILQ_HEAD_INITIALIZER(pmap_list); static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ @@ -1286,21 +1285,11 @@ pmap_puninit(pmap_t pmap) } /* - * Wire in kernel global address entries. To avoid a race condition - * between pmap initialization and pmap_growkernel, this procedure - * adds the pmap to the master list (which growkernel scans to update), - * then copies the template. - * - * In a virtual kernel there are no kernel global address entries. - * - * No requirements. + * This function is now unused (used to add the pmap to the pmap_list) */ void pmap_pinit2(struct pmap *pmap) { - spin_lock(&pmap_spin); - TAILQ_INSERT_TAIL(&pmap_list, pmap, pm_pmnode); - spin_unlock(&pmap_spin); } /* @@ -1628,10 +1617,6 @@ pmap_release(struct pmap *pmap) pmap, (uintmax_t)CPUMASK_LOWMASK(pmap->pm_active))); - spin_lock(&pmap_spin); - TAILQ_REMOVE(&pmap_list, pmap, pm_pmnode); - spin_unlock(&pmap_spin); - vm_object_hold(object); do { info.error = 0; -- 2.11.4.GIT