From 2a4073c2bb288193f5e7a0d57e9cf2f9786dddc3 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 20 Jan 2012 14:34:19 -0800 Subject: [PATCH] SHM_UNLOCK: fix long unpreemptible section commit 85046579bde15e532983438f86b36856e358f417 upstream. scan_mapping_unevictable_pages() is used to make SysV SHM_LOCKed pages evictable again once the shared memory is unlocked. It does this with pagevec_lookup()s across the whole object (which might occupy most of memory), and takes 300ms to unlock 7GB here. A cond_resched() every PAGEVEC_SIZE pages would be good. However, KOSAKI-san points out that this is called under shmem.c's info->lock, and it's also under shm.c's shm_lock(), both spinlocks. There is no strong reason for that: we need to take these pages off the unevictable list soonish, but those locks are not required for it. So move the call to scan_mapping_unevictable_pages() from shmem.c's unlock handling up to shm.c's unlock handling. Remove the recently added barrier, not needed now we have spin_unlock() before the scan. Use get_file(), with subsequent fput(), to make sure we have a reference to mapping throughout scan_mapping_unevictable_pages(): that's something that was previously guaranteed by the shm_lock(). Remove shmctl's lru_add_drain_all(): we don't fault in pages at SHM_LOCK time, and we lazily discover them to be Unevictable later, so it serves no purpose for SHM_LOCK; and serves no purpose for SHM_UNLOCK, since pages still on pagevec are not marked Unevictable. The original code avoided redundant rescans by checking VM_LOCKED flag at its level: now avoid them by checking shp's SHM_LOCKED. The original code called scan_mapping_unevictable_pages() on a locked area at shm_destroy() time: perhaps we once had accounting cross-checks which required that, but not now, so skip the overhead and just let inode eviction deal with them. Put check_move_unevictable_page() and scan_mapping_unevictable_pages() under CONFIG_SHMEM (with stub for the TINY case when ramfs is used), more as comment than to save space; comment them used for SHM_UNLOCK. Signed-off-by: Hugh Dickins Reviewed-by: KOSAKI Motohiro Cc: Minchan Kim Cc: Rik van Riel Cc: Shaohua Li Cc: Eric Dumazet Cc: Johannes Weiner Cc: Michel Lespinasse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- ipc/shm.c | 37 ++++++++++++++++++++++--------------- mm/shmem.c | 7 ------- mm/vmscan.c | 12 +++++++++++- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index 02ecf2c078f..854ab58e5f6 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) case SHM_LOCK: case SHM_UNLOCK: { - struct file *uninitialized_var(shm_file); - - lru_add_drain_all(); /* drain pagevecs to lru lists */ + struct file *shm_file; shp = shm_lock_check(ns, shmid); if (IS_ERR(shp)) { @@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; - - if(cmd==SHM_LOCK) { + + shm_file = shp->shm_file; + if (is_file_hugepages(shm_file)) + goto out_unlock; + + if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); - if (!is_file_hugepages(shp->shm_file)) { - err = shmem_lock(shp->shm_file, 1, user); - if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ - shp->shm_perm.mode |= SHM_LOCKED; - shp->mlock_user = user; - } + err = shmem_lock(shm_file, 1, user); + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { + shp->shm_perm.mode |= SHM_LOCKED; + shp->mlock_user = user; } - } else if (!is_file_hugepages(shp->shm_file)) { - shmem_lock(shp->shm_file, 0, shp->mlock_user); - shp->shm_perm.mode &= ~SHM_LOCKED; - shp->mlock_user = NULL; + goto out_unlock; } + + /* SHM_UNLOCK */ + if (!(shp->shm_perm.mode & SHM_LOCKED)) + goto out_unlock; + shmem_lock(shm_file, 0, shp->mlock_user); + shp->shm_perm.mode &= ~SHM_LOCKED; + shp->mlock_user = NULL; + get_file(shm_file); shm_unlock(shp); + scan_mapping_unevictable_pages(shm_file->f_mapping); + fput(shm_file); goto out; } case IPC_RMID: diff --git a/mm/shmem.c b/mm/shmem.c index d6722506d2d..cc6d40b2892 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1068,13 +1068,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) user_shm_unlock(inode->i_size, user); info->flags &= ~VM_LOCKED; mapping_clear_unevictable(file->f_mapping); - /* - * Ensure that a racing putback_lru_page() can see - * the pages of this mapping are evictable when we - * skip them due to !PageLRU during the scan. - */ - smp_mb__after_clear_bit(); - scan_mapping_unevictable_pages(file->f_mapping); } retval = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index f54a05b7a61..824676a4ca7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3353,6 +3353,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) return 1; } +#ifdef CONFIG_SHMEM /** * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list * @page: page to check evictability and move to appropriate lru list @@ -3363,6 +3364,8 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) * * Restrictions: zone->lru_lock must be held, page must be on LRU and must * have PageUnevictable set. + * + * This function is only used for SysV IPC SHM_UNLOCK. */ static void check_move_unevictable_page(struct page *page, struct zone *zone) { @@ -3396,6 +3399,8 @@ retry: * * Scan all pages in mapping. Check unevictable pages for * evictability and move them to the appropriate zone lru list. + * + * This function is only used for SysV IPC SHM_UNLOCK. */ void scan_mapping_unevictable_pages(struct address_space *mapping) { @@ -3441,9 +3446,14 @@ void scan_mapping_unevictable_pages(struct address_space *mapping) pagevec_release(&pvec); count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); + cond_resched(); } - } +#else +void scan_mapping_unevictable_pages(struct address_space *mapping) +{ +} +#endif /* CONFIG_SHMEM */ static void warn_scan_unevictable_pages(void) { -- 2.11.4.GIT