Index: kern/kern_exec.c =================================================================== --- kern/kern_exec.c (revision 254308) +++ kern/kern_exec.c (working copy) @@ -950,7 +950,7 @@ exec_map_first_page(imgp) break; } else { ma[i] = vm_page_alloc(object, i, - VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); + VM_ALLOC_NORMAL); if (ma[i] == NULL) break; } Index: fs/tmpfs/tmpfs_subr.c =================================================================== --- fs/tmpfs/tmpfs_subr.c (revision 254308) +++ fs/tmpfs/tmpfs_subr.c (working copy) @@ -104,8 +104,7 @@ tmpfs_mem_avail(void) { vm_ooffset_t avail; - avail = swap_pager_avail + cnt.v_free_count + cnt.v_cache_count - - tmpfs_pages_reserved; + avail = swap_pager_avail + cnt.v_free_count - tmpfs_pages_reserved; if (__predict_false(avail < 0)) avail = 0; return (avail); Index: vm/vm_pageout.c =================================================================== --- vm/vm_pageout.c (revision 254387) +++ vm/vm_pageout.c (working copy) @@ -897,6 +897,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) int act_delta; int vnodes_skipped = 0; int maxlaunder; + int pqueue; boolean_t queues_locked; /* @@ -921,6 +922,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) * number of pages from the inactive count that should be * discounted in setting the target for the active queue scan. */ + pqueue = PQ_CACHE; +again: addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit); /* @@ -951,7 +954,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) * is not used to form decisions for the inactive queue, only for the * active queue. */ - pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; + pq = &vmd->vmd_pagequeues[pqueue]; maxscan = pq->pq_cnt; vm_pagequeue_lock(pq); queues_locked = TRUE; @@ -960,7 +963,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) m = next) { vm_pagequeue_assert_locked(pq); KASSERT(queues_locked, ("unlocked queues")); - KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m)); + KASSERT(m->queue == pqueue, ("Incorrect queue %p", m)); PCPU_INC(cnt.v_pdpages); next = TAILQ_NEXT(m, plinks.q); @@ -1083,20 +1086,13 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) if (m->dirty == 0 && object->ref_count != 0) pmap_remove_all(m); - if (m->valid == 0) { + if (m->valid == 0 || m->dirty == 0) { /* * Invalid pages can be easily freed */ vm_page_free(m); PCPU_INC(cnt.v_dfree); --page_shortage; - } else if (m->dirty == 0) { - /* - * Clean pages can be placed onto the cache queue. - * This effectively frees them. - */ - vm_page_cache(m); - --page_shortage; } else if ((m->flags & PG_WINATCFLS) == 0 && pass < 2) { /* * Dirty pages need to be paged out, but flushing @@ -1205,7 +1201,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) * above. The page might have been freed and * reused for another vnode. */ - if (m->queue != PQ_INACTIVE || + if (m->queue != pqueue || m->object != object || TAILQ_NEXT(m, plinks.q) != &vmd->vmd_marker) { vm_page_unlock(m); @@ -1281,6 +1277,10 @@ relock_queues: TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q); } vm_pagequeue_unlock(pq); + if (pqueue == PQ_CACHE) { + pqueue = PQ_INACTIVE; + goto again; + } /* * Compute the number of pages we want to try to move from the Index: vm/vm_radix.c =================================================================== --- vm/vm_radix.c (revision 254308) +++ vm/vm_radix.c (working copy) @@ -341,8 +341,6 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t index = page->pindex; -restart: - /* * The owner of record for root is not really important because it * will never be used. @@ -361,31 +359,10 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t __func__, (uintmax_t)index); clev = vm_radix_keydiff(m->pindex, index); - /* - * During node allocation the trie that is being - * walked can be modified because of recursing radix - * trie operations. - * If this is the case, the recursing functions signal - * such situation and the insert operation must - * start from scratch again. - * The freed radix node will then be in the UMA - * caches very likely to avoid the same situation - * to happen. - */ - rtree->rt_flags |= RT_INSERT_INPROG; tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); - rtree->rt_flags &= ~RT_INSERT_INPROG; - if (tmp == NULL) { - rtree->rt_flags &= ~RT_TRIE_MODIFIED; + if (tmp == NULL) return (ENOMEM); - } - if ((rtree->rt_flags & RT_TRIE_MODIFIED) != 0) { - rtree->rt_flags &= ~RT_TRIE_MODIFIED; - tmp->rn_count = 0; - vm_radix_node_put(tmp); - goto restart; - } *parentp = tmp; vm_radix_addpage(tmp, index, clev, page); vm_radix_addpage(tmp, m->pindex, clev, m); @@ -411,19 +388,9 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t clev = vm_radix_keydiff(newind, index); /* See the comments above. */ - rtree->rt_flags |= RT_INSERT_INPROG; tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev); - rtree->rt_flags &= ~RT_INSERT_INPROG; - if (tmp == NULL) { - rtree->rt_flags &= ~RT_TRIE_MODIFIED; + if (tmp == NULL) return (ENOMEM); - } - if ((rtree->rt_flags & RT_TRIE_MODIFIED) != 0) { - rtree->rt_flags &= ~RT_TRIE_MODIFIED; - tmp->rn_count = 0; - vm_radix_node_put(tmp); - goto restart; - } *parentp = tmp; vm_radix_addpage(tmp, index, clev, page); slot = vm_radix_slot(newind, clev); @@ -693,20 +660,6 @@ vm_radix_remove(struct vm_radix *rtree, vm_pindex_ vm_page_t m; int i, slot; - /* - * Detect if a page is going to be removed from a trie which is - * already undergoing another trie operation. - * Right now this is only possible for vm_radix_remove() recursing - * into vm_radix_insert(). - * If this is the case, the caller must be notified about this - * situation. It will also takecare to update the RT_TRIE_MODIFIED - * accordingly. - * The RT_TRIE_MODIFIED bit is set here because the remove operation - * will always succeed. - */ - if ((rtree->rt_flags & RT_INSERT_INPROG) != 0) - rtree->rt_flags |= RT_TRIE_MODIFIED; - rnode = vm_radix_getroot(rtree); if (vm_radix_isleaf(rnode)) { m = vm_radix_topage(rnode); @@ -761,9 +714,6 @@ vm_radix_reclaim_allnodes(struct vm_radix *rtree) { struct vm_radix_node *root; - KASSERT((rtree->rt_flags & RT_INSERT_INPROG) == 0, - ("vm_radix_reclaim_allnodes: unexpected trie recursion")); - root = vm_radix_getroot(rtree); if (root == NULL) return; Index: vm/vm_meter.c =================================================================== --- vm/vm_meter.c (revision 254308) +++ vm/vm_meter.c (working copy) @@ -231,7 +231,7 @@ vmtotal(SYSCTL_HANDLER_ARGS) } } mtx_unlock(&vm_object_list_mtx); - total.t_free = cnt.v_free_count + cnt.v_cache_count; + total.t_free = cnt.v_free_count; return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); } Index: vm/vm_phys.c =================================================================== --- vm/vm_phys.c (revision 254308) +++ vm/vm_phys.c (working copy) @@ -821,7 +821,7 @@ vm_phys_zero_pages_idle(void) for (;;) { TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) { for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { - if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { + if ((m_tmp->flags & PG_ZERO) == 0) { vm_phys_unfree_page(m_tmp); vm_phys_freecnt_adj(m, -1); mtx_unlock(&vm_page_queue_free_mtx); Index: vm/swap_pager.c =================================================================== --- vm/swap_pager.c (revision 254308) +++ vm/swap_pager.c (working copy) @@ -2316,8 +2316,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred) * of data we will have to page back in, plus an epsilon so * the system doesn't become critically low on swap space. */ - if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < - nblks + nswap_lowat) { + if (cnt.v_free_count + swap_pager_avail < nblks + nswap_lowat) { return (ENOMEM); } Index: vm/vm_mmap.c =================================================================== --- vm/vm_mmap.c (revision 254308) +++ vm/vm_mmap.c (working copy) @@ -914,9 +914,6 @@ RestartScan: pindex = OFF_TO_IDX(current->offset + (addr - current->start)); m = vm_page_lookup(object, pindex); - if (m == NULL && - vm_page_is_cached(object, pindex)) - mincoreinfo = MINCORE_INCORE; if (m != NULL && m->valid == 0) m = NULL; if (m != NULL) Index: vm/vm_object.c =================================================================== --- vm/vm_object.c (revision 254308) +++ vm/vm_object.c (working copy) @@ -201,12 +201,9 @@ vm_object_zinit(void *mem, int size, int flags) /* These are true for any object that has been freed */ object->rtree.rt_root = 0; - object->rtree.rt_flags = 0; object->paging_in_progress = 0; object->resident_page_count = 0; object->shadow_count = 0; - object->cache.rt_root = 0; - object->cache.rt_flags = 0; return (0); } @@ -779,9 +776,6 @@ vm_object_terminate(vm_object_t object) if (__predict_false(!LIST_EMPTY(&object->rvq))) vm_reserv_break_all(object); #endif - if (__predict_false(!vm_object_cache_is_empty(object))) - vm_page_cache_free(object, 0, 0); - /* * Let the pager know object is dead. */ @@ -1118,13 +1112,6 @@ shadowlookup: } else if ((tobject->flags & OBJ_UNMANAGED) != 0) goto unlock_tobject; m = vm_page_lookup(tobject, tpindex); - if (m == NULL && advise == MADV_WILLNEED) { - /* - * If the page is cached, reactivate it. - */ - m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED | - VM_ALLOC_NOBUSY); - } if (m == NULL) { /* * There may be swap even if there is no backing page @@ -1389,19 +1376,6 @@ retry: swap_pager_copy(orig_object, new_object, offidxstart, 0); TAILQ_FOREACH(m, &new_object->memq, listq) vm_page_xunbusy(m); - - /* - * Transfer any cached pages from orig_object to new_object. - * If swap_pager_copy() found swapped out pages within the - * specified range of orig_object, then it changed - * new_object's type to OBJT_SWAP when it transferred those - * pages to new_object. Otherwise, new_object's type - * should still be OBJT_DEFAULT and orig_object should not - * contain any cached pages within the specified range. - */ - if (__predict_false(!vm_object_cache_is_empty(orig_object))) - vm_page_cache_transfer(orig_object, offidxstart, - new_object); } VM_OBJECT_WUNLOCK(orig_object); VM_OBJECT_WUNLOCK(new_object); @@ -1754,13 +1728,6 @@ vm_object_collapse(vm_object_t object) backing_object, object, OFF_TO_IDX(object->backing_object_offset), TRUE); - - /* - * Free any cached pages from backing_object. - */ - if (__predict_false( - !vm_object_cache_is_empty(backing_object))) - vm_page_cache_free(backing_object, 0, 0); } /* * Object now shadows whatever backing_object did. @@ -1958,8 +1925,7 @@ next: } vm_object_pip_wakeup(object); skipmemq: - if (__predict_false(!vm_object_cache_is_empty(object))) - vm_page_cache_free(object, start, end); + return; } /* Index: vm/vm_reserv.c =================================================================== --- vm/vm_reserv.c (revision 254308) +++ vm/vm_reserv.c (working copy) @@ -466,7 +466,7 @@ found: return (NULL); /* Handle vm_page_rename(m, new_object, ...). */ for (i = 0; i < npages; i++) - if ((rv->pages[index + i].flags & (PG_CACHED | PG_FREE)) == 0) + if ((rv->pages[index + i].flags & PG_FREE) == 0) return (NULL); for (i = 0; i < npages; i++) vm_reserv_populate(rv); @@ -585,7 +585,7 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex found: m = &rv->pages[VM_RESERV_INDEX(object, pindex)]; /* Handle vm_page_rename(m, new_object, ...). */ - if ((m->flags & (PG_CACHED | PG_FREE)) == 0) + if ((m->flags & PG_FREE) == 0) return (NULL); vm_reserv_populate(rv); return (m); @@ -611,7 +611,7 @@ vm_reserv_break_all(vm_object_t object) LIST_REMOVE(rv, objq); rv->object = NULL; for (i = 0; i < VM_LEVEL_0_NPAGES; i++) { - if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) + if ((rv->pages[i].flags & PG_FREE) != 0) vm_phys_free_pages(&rv->pages[i], 0); else rv->popcnt--; @@ -639,9 +639,6 @@ vm_reserv_free_page(vm_page_t m) rv = vm_reserv_from_page(m); if (rv->object == NULL) return (FALSE); - if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE) - vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages, - VM_LEVEL_0_ORDER); vm_reserv_depopulate(rv); return (TRUE); } @@ -686,62 +683,6 @@ vm_reserv_level_iffullpop(vm_page_t m) } /* - * Prepare for the reactivation of a cached page. - * - * First, suppose that the given page "m" was allocated individually, i.e., not - * as part of a reservation, and cached. Then, suppose a reservation - * containing "m" is allocated by the same object. Although "m" and the - * reservation belong to the same object, "m"'s pindex may not match the - * reservation's. - * - * The free page queue must be locked. - */ -boolean_t -vm_reserv_reactivate_page(vm_page_t m) -{ - vm_reserv_t rv; - int i, m_index; - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - rv = vm_reserv_from_page(m); - if (rv->object == NULL) - return (FALSE); - KASSERT((m->flags & PG_CACHED) != 0, - ("vm_reserv_uncache_page: page %p is not cached", m)); - if (m->object == rv->object && - m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex)) - vm_reserv_populate(rv); - else { - KASSERT(rv->inpartpopq, - ("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE", - rv)); - TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); - rv->inpartpopq = FALSE; - LIST_REMOVE(rv, objq); - rv->object = NULL; - /* Don't vm_phys_free_pages(m, 0). */ - m_index = m - rv->pages; - for (i = 0; i < m_index; i++) { - if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) - vm_phys_free_pages(&rv->pages[i], 0); - else - rv->popcnt--; - } - for (i++; i < VM_LEVEL_0_NPAGES; i++) { - if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) - vm_phys_free_pages(&rv->pages[i], 0); - else - rv->popcnt--; - } - KASSERT(rv->popcnt == 0, - ("vm_reserv_uncache_page: reserv %p's popcnt is corrupted", - rv)); - vm_reserv_broken++; - } - return (TRUE); -} - -/* * Breaks the given partially-populated reservation, releasing its cached and * free pages to the physical memory allocator. * @@ -762,7 +703,7 @@ vm_reserv_reclaim(vm_reserv_t rv) LIST_REMOVE(rv, objq); rv->object = NULL; for (i = 0; i < VM_LEVEL_0_NPAGES; i++) { - if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) + if ((rv->pages[i].flags & PG_FREE) != 0) vm_phys_free_pages(&rv->pages[i], 0); else rv->popcnt--; @@ -821,7 +762,7 @@ vm_reserv_reclaim_contig(u_long npages, vm_paddr_t } pa_length = 0; for (i = 0; i < VM_LEVEL_0_NPAGES; i++) - if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) { + if ((rv->pages[i].flags & PG_FREE) != 0) { pa_length += PAGE_SIZE; if (pa_length == PAGE_SIZE) { pa = VM_PAGE_TO_PHYS(&rv->pages[i]); Index: vm/vm_reserv.h =================================================================== --- vm/vm_reserv.h (revision 254308) +++ vm/vm_reserv.h (working copy) @@ -54,7 +54,6 @@ void vm_reserv_break_all(vm_object_t object); boolean_t vm_reserv_free_page(vm_page_t m); void vm_reserv_init(void); int vm_reserv_level_iffullpop(vm_page_t m); -boolean_t vm_reserv_reactivate_page(vm_page_t m); boolean_t vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); boolean_t vm_reserv_reclaim_inactive(void); Index: vm/_vm_radix.h =================================================================== --- vm/_vm_radix.h (revision 254308) +++ vm/_vm_radix.h (working copy) @@ -36,12 +36,8 @@ */ struct vm_radix { uintptr_t rt_root; - uint8_t rt_flags; }; -#define RT_INSERT_INPROG 0x01 -#define RT_TRIE_MODIFIED 0x02 - #ifdef _KERNEL static __inline boolean_t Index: vm/vm_fault.c =================================================================== --- vm/vm_fault.c (revision 254308) +++ vm/vm_fault.c (working copy) @@ -1001,8 +1001,10 @@ vm_fault_cache_behind(const struct faultstate *fs, vm_page_aflag_clear(m, PGA_REFERENCED); if (m->dirty != 0) vm_page_deactivate(m); + else if (m->queue != PQ_CACHE) + vm_page_cache(m); else - vm_page_cache(m); + m_prev = NULL; } vm_page_unlock(m); } @@ -1458,8 +1460,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marr for (i = 0, tpindex = pindex - 1; tpindex >= startpindex && tpindex < pindex; i++, tpindex--) { - rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | - VM_ALLOC_IFNOTCACHED); + rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (rtm == NULL) { /* * Shift the allocated pages to the @@ -1497,8 +1498,7 @@ vm_fault_additional_pages(m, rbehind, rahead, marr for (; tpindex < endpindex; i++, tpindex++) { - rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | - VM_ALLOC_IFNOTCACHED); + rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (rtm == NULL) { break; } Index: vm/vm_page.c =================================================================== --- vm/vm_page.c (revision 254308) +++ vm/vm_page.c (working copy) @@ -144,8 +144,7 @@ SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFL static uma_zone_t fakepg_zone; -static struct vnode *vm_page_alloc_init(vm_page_t m); -static void vm_page_cache_turn_free(vm_page_t m); +static void vm_page_alloc_init(vm_page_t m); static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); static void vm_page_enqueue(int queue, vm_page_t m); static void vm_page_init_fakepg(void *dummy); @@ -259,6 +258,10 @@ vm_page_domain_init(struct vm_domain *vmd) "vm active pagequeue"; *__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) = &cnt.v_active_count; + *__DECONST(char **, &vmd->vmd_pagequeues[PQ_CACHE].pq_name) = + "vm cache pagequeue"; + *__DECONST(int **, &vmd->vmd_pagequeues[PQ_CACHE].pq_vcnt) = + &cnt.v_cache_count; vmd->vmd_page_count = 0; vmd->vmd_free_count = 0; vmd->vmd_segs = 0; @@ -916,8 +919,6 @@ vm_page_dirty_KBI(vm_page_t m) { /* These assertions refer to this operation by its public name. */ - KASSERT((m->flags & PG_CACHED) == 0, - ("vm_page_dirty: page in cache!")); KASSERT(!VM_PAGE_IS_FREE(m), ("vm_page_dirty: page is free!")); KASSERT(m->valid == VM_PAGE_BITS_ALL, @@ -1280,142 +1281,6 @@ vm_page_rename(vm_page_t m, vm_object_t new_object } /* - * Convert all of the given object's cached pages that have a - * pindex within the given range into free pages. If the value - * zero is given for "end", then the range's upper bound is - * infinity. If the given object is backed by a vnode and it - * transitions from having one or more cached pages to none, the - * vnode's hold count is reduced. - */ -void -vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) -{ - vm_page_t m; - boolean_t empty; - - mtx_lock(&vm_page_queue_free_mtx); - if (__predict_false(vm_radix_is_empty(&object->cache))) { - mtx_unlock(&vm_page_queue_free_mtx); - return; - } - while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) { - if (end != 0 && m->pindex >= end) - break; - vm_radix_remove(&object->cache, m->pindex); - vm_page_cache_turn_free(m); - } - empty = vm_radix_is_empty(&object->cache); - mtx_unlock(&vm_page_queue_free_mtx); - if (object->type == OBJT_VNODE && empty) - vdrop(object->handle); -} - -/* - * Returns the cached page that is associated with the given - * object and offset. If, however, none exists, returns NULL. - * - * The free page queue must be locked. - */ -static inline vm_page_t -vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - return (vm_radix_lookup(&object->cache, pindex)); -} - -/* - * Remove the given cached page from its containing object's - * collection of cached pages. - * - * The free page queue must be locked. - */ -static void -vm_page_cache_remove(vm_page_t m) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - KASSERT((m->flags & PG_CACHED) != 0, - ("vm_page_cache_remove: page %p is not cached", m)); - vm_radix_remove(&m->object->cache, m->pindex); - m->object = NULL; - cnt.v_cache_count--; -} - -/* - * Transfer all of the cached pages with offset greater than or - * equal to 'offidxstart' from the original object's cache to the - * new object's cache. However, any cached pages with offset - * greater than or equal to the new object's size are kept in the - * original object. Initially, the new object's cache must be - * empty. Offset 'offidxstart' in the original object must - * correspond to offset zero in the new object. - * - * The new object must be locked. - */ -void -vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, - vm_object_t new_object) -{ - vm_page_t m; - - /* - * Insertion into an object's collection of cached pages - * requires the object to be locked. In contrast, removal does - * not. - */ - VM_OBJECT_ASSERT_WLOCKED(new_object); - KASSERT(vm_radix_is_empty(&new_object->cache), - ("vm_page_cache_transfer: object %p has cached pages", - new_object)); - mtx_lock(&vm_page_queue_free_mtx); - while ((m = vm_radix_lookup_ge(&orig_object->cache, - offidxstart)) != NULL) { - /* - * Transfer all of the pages with offset greater than or - * equal to 'offidxstart' from the original object's - * cache to the new object's cache. - */ - if ((m->pindex - offidxstart) >= new_object->size) - break; - vm_radix_remove(&orig_object->cache, m->pindex); - /* Update the page's object and offset. */ - m->object = new_object; - m->pindex -= offidxstart; - if (vm_radix_insert(&new_object->cache, m)) - vm_page_cache_turn_free(m); - } - mtx_unlock(&vm_page_queue_free_mtx); -} - -/* - * Returns TRUE if a cached page is associated with the given object and - * offset, and FALSE otherwise. - * - * The object must be locked. - */ -boolean_t -vm_page_is_cached(vm_object_t object, vm_pindex_t pindex) -{ - vm_page_t m; - - /* - * Insertion into an object's collection of cached pages requires the - * object to be locked. Therefore, if the object is locked and the - * object's collection is empty, there is no need to acquire the free - * page queues lock in order to prove that the specified page doesn't - * exist. - */ - VM_OBJECT_ASSERT_WLOCKED(object); - if (__predict_true(vm_object_cache_is_empty(object))) - return (FALSE); - mtx_lock(&vm_page_queue_free_mtx); - m = vm_page_cache_lookup(object, pindex); - mtx_unlock(&vm_page_queue_free_mtx); - return (m != NULL); -} - -/* * vm_page_alloc: * * Allocate and return a page that is associated with the specified @@ -1431,9 +1296,6 @@ vm_page_rename(vm_page_t m, vm_object_t new_object * optional allocation flags: * VM_ALLOC_COUNT(number) the number of additional pages that the caller * intends to allocate - * VM_ALLOC_IFCACHED return page only if it is cached - * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page - * is cached * VM_ALLOC_NOBUSY do not exclusive busy the page * VM_ALLOC_NODUMP do not include the page in a kernel core dump * VM_ALLOC_NOOBJ page is not associated with an object and @@ -1447,8 +1309,6 @@ vm_page_rename(vm_page_t m, vm_object_t new_object vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) { - struct vnode *vp = NULL; - vm_object_t m_object; vm_page_t m, mpred; int flags, req_class; @@ -1475,47 +1335,24 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind KASSERT(mpred == NULL || mpred->pindex != pindex, ("vm_page_alloc: pindex already allocated")); } - /* * The page allocation request can came from consumers which already * hold the free page queue mutex, like vm_page_insert() in * vm_page_cache(). */ mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); - if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || + if (cnt.v_free_count > cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || + cnt.v_free_count > cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count > 0)) { - /* - * Allocate from the free queue if the number of free pages - * exceeds the minimum for the request class. - */ - if (object != NULL && - (m = vm_page_cache_lookup(object, pindex)) != NULL) { - if ((req & VM_ALLOC_IFNOTCACHED) != 0) { - mtx_unlock(&vm_page_queue_free_mtx); - return (NULL); - } - if (vm_phys_unfree_page(m)) - vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); + cnt.v_free_count > 0)) { + m = NULL; #if VM_NRESERVLEVEL > 0 - else if (!vm_reserv_reactivate_page(m)) -#else - else + if (object != NULL && (object->flags & + (OBJ_COLORED | OBJ_FICTITIOUS)) == OBJ_COLORED) + m = vm_reserv_alloc_page(object, pindex, mpred); + if (m == NULL) { #endif - panic("vm_page_alloc: cache page %p is missing" - " from the free queue", m); - } else if ((req & VM_ALLOC_IFCACHED) != 0) { - mtx_unlock(&vm_page_queue_free_mtx); - return (NULL); -#if VM_NRESERVLEVEL > 0 - } else if (object == NULL || (object->flags & (OBJ_COLORED | - OBJ_FICTITIOUS)) != OBJ_COLORED || (m = - vm_reserv_alloc_page(object, pindex, mpred)) == NULL) { -#else - } else { -#endif m = vm_phys_alloc_pages(object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); #if VM_NRESERVLEVEL > 0 @@ -1524,8 +1361,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); } -#endif } +#endif } else { /* * Not allocatable, give up. @@ -1551,30 +1388,14 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT, ("vm_page_alloc: page %p has unexpected memattr %d", m, pmap_page_get_memattr(m))); - if ((m->flags & PG_CACHED) != 0) { - KASSERT((m->flags & PG_ZERO) == 0, - ("vm_page_alloc: cached page %p is PG_ZERO", m)); - KASSERT(m->valid != 0, - ("vm_page_alloc: cached page %p is invalid", m)); - if (m->object == object && m->pindex == pindex) - cnt.v_reactivated++; - else - m->valid = 0; - m_object = m->object; - vm_page_cache_remove(m); - if (m_object->type == OBJT_VNODE && - vm_object_cache_is_empty(m_object)) - vp = m_object->handle; - } else { - KASSERT(VM_PAGE_IS_FREE(m), - ("vm_page_alloc: page %p is not free", m)); - KASSERT(m->valid == 0, - ("vm_page_alloc: free page %p is valid", m)); - vm_phys_freecnt_adj(m, -1); - } + KASSERT(VM_PAGE_IS_FREE(m), + ("vm_page_alloc: page %p is not free", m)); + KASSERT(m->valid == 0, + ("vm_page_alloc: free page %p is valid", m)); + vm_phys_freecnt_adj(m, -1); /* - * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag + * Only the PG_ZERO flag is inherited. The PG_FREE flag * must be cleared before the free page queues lock is released. */ flags = 0; @@ -1607,9 +1428,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind if (object != NULL) { if (vm_page_insert_after(m, object, pindex, mpred)) { - /* See the comment below about hold count. */ - if (vp != NULL) - vdrop(vp); pagedaemon_wakeup(); m->object = NULL; vm_page_free(m); @@ -1624,15 +1442,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind m->pindex = pindex; /* - * The following call to vdrop() must come after the above call - * to vm_page_insert() in case both affect the same object and - * vnode. Otherwise, the affected vnode's hold count could - * temporarily become zero. - */ - if (vp != NULL) - vdrop(vp); - - /* * Don't wakeup too often - wakeup the pageout daemon when * we would be nearly out of memory. */ @@ -1642,16 +1451,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind return (m); } -static void -vm_page_alloc_contig_vdrop(struct spglist *lst) -{ - - while (!SLIST_EMPTY(lst)) { - vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv); - SLIST_REMOVE_HEAD(lst, plinks.s.ss); - } -} - /* * vm_page_alloc_contig: * @@ -1695,8 +1494,6 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr) { - struct vnode *drop; - struct spglist deferred_vdrop_list; vm_page_t m, m_tmp, m_ret; u_int flags, oflags; int req_class; @@ -1722,13 +1519,12 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT) req_class = VM_ALLOC_SYSTEM; - SLIST_INIT(&deferred_vdrop_list); mtx_lock(&vm_page_queue_free_mtx); - if (cnt.v_free_count + cnt.v_cache_count >= npages + + if (cnt.v_free_count >= npages + cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count >= npages + + cnt.v_free_count >= npages + cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count >= npages)) { + cnt.v_free_count >= npages)) { #if VM_NRESERVLEVEL > 0 retry: if (object == NULL || (object->flags & OBJ_COLORED) == 0 || @@ -1744,21 +1540,8 @@ retry: return (NULL); } if (m_ret != NULL) - for (m = m_ret; m < &m_ret[npages]; m++) { - drop = vm_page_alloc_init(m); - if (drop != NULL) { - /* - * Enqueue the vnode for deferred vdrop(). - * - * Once the pages are removed from the free - * page list, "pageq" can be safely abused to - * construct a short-lived list of vnodes. - */ - m->plinks.s.pv = drop; - SLIST_INSERT_HEAD(&deferred_vdrop_list, m, - plinks.s.ss); - } - } + for (m = m_ret; m < &m_ret[npages]; m++) + vm_page_alloc_init(m); else { #if VM_NRESERVLEVEL > 0 if (vm_reserv_reclaim_contig(npages, low, high, alignment, @@ -1802,8 +1585,6 @@ retry: m->oflags = oflags; if (object != NULL) { if (vm_page_insert(m, object, pindex)) { - vm_page_alloc_contig_vdrop( - &deferred_vdrop_list); if (vm_paging_needed()) pagedaemon_wakeup(); for (m_tmp = m, m = m_ret; @@ -1820,7 +1601,6 @@ retry: pmap_page_set_memattr(m, memattr); pindex++; } - vm_page_alloc_contig_vdrop(&deferred_vdrop_list); if (vm_paging_needed()) pagedaemon_wakeup(); return (m_ret); @@ -1828,17 +1608,14 @@ retry: /* * Initialize a page that has been freshly dequeued from a freelist. - * The caller has to drop the vnode returned, if it is not NULL. * * This function may only be used to initialize unmanaged pages. * * To be called with vm_page_queue_free_mtx held. */ -static struct vnode * +static void vm_page_alloc_init(vm_page_t m) { - struct vnode *drop; - vm_object_t m_object; KASSERT(m->queue == PQ_NONE, ("vm_page_alloc_init: page %p has unexpected queue %d", @@ -1855,28 +1632,15 @@ vm_page_alloc_init(vm_page_t m) ("vm_page_alloc_init: page %p has unexpected memattr %d", m, pmap_page_get_memattr(m))); mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - drop = NULL; - if ((m->flags & PG_CACHED) != 0) { - KASSERT((m->flags & PG_ZERO) == 0, - ("vm_page_alloc_init: cached page %p is PG_ZERO", m)); - m->valid = 0; - m_object = m->object; - vm_page_cache_remove(m); - if (m_object->type == OBJT_VNODE && - vm_object_cache_is_empty(m_object)) - drop = m_object->handle; - } else { - KASSERT(VM_PAGE_IS_FREE(m), - ("vm_page_alloc_init: page %p is not free", m)); - KASSERT(m->valid == 0, - ("vm_page_alloc_init: free page %p is valid", m)); - vm_phys_freecnt_adj(m, -1); - if ((m->flags & PG_ZERO) != 0) - vm_page_zero_count--; - } + KASSERT(VM_PAGE_IS_FREE(m), + ("vm_page_alloc_init: page %p is not free", m)); + KASSERT(m->valid == 0, + ("vm_page_alloc_init: free page %p is valid", m)); + vm_phys_freecnt_adj(m, -1); + if ((m->flags & PG_ZERO) != 0) + vm_page_zero_count--; /* Don't clear the PG_ZERO flag; we'll need it later. */ m->flags &= PG_ZERO; - return (drop); } /* @@ -1902,7 +1666,6 @@ vm_page_alloc_init(vm_page_t m) vm_page_t vm_page_alloc_freelist(int flind, int req) { - struct vnode *drop; vm_page_t m; u_int flags; int req_class; @@ -1919,11 +1682,11 @@ vm_page_alloc_freelist(int flind, int req) * Do not allocate reserved pages unless the req has asked for it. */ mtx_lock(&vm_page_queue_free_mtx); - if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || + if (cnt.v_free_count > cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || + cnt.v_free_count > cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count > 0)) + cnt.v_free_count > 0)) m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); else { mtx_unlock(&vm_page_queue_free_mtx); @@ -1936,7 +1699,7 @@ vm_page_alloc_freelist(int flind, int req) mtx_unlock(&vm_page_queue_free_mtx); return (NULL); } - drop = vm_page_alloc_init(m); + vm_page_alloc_init(m); mtx_unlock(&vm_page_queue_free_mtx); /* @@ -1957,8 +1720,6 @@ vm_page_alloc_freelist(int flind, int req) } /* Unmanaged pages don't use "act_count". */ m->oflags = VPO_UNMANAGED; - if (drop != NULL) - vdrop(drop); if (vm_paging_needed()) pagedaemon_wakeup(); return (m); @@ -2159,9 +1920,8 @@ vm_page_activate(vm_page_t m) /* * vm_page_free_wakeup: * - * Helper routine for vm_page_free_toq() and vm_page_cache(). This - * routine is called when a page has been added to the cache or free - * queues. + * Helper routine for vm_page_free_toq(). This routine is called when + * a page has been added to the free queue. * * The page queues must be locked. */ @@ -2175,7 +1935,7 @@ vm_page_free_wakeup(void) * some free. */ if (vm_pageout_pages_needed && - cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { + cnt.v_free_count >= cnt.v_pageout_free_min) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } @@ -2191,28 +1951,6 @@ vm_page_free_wakeup(void) } /* - * Turn a cached page into a free page, by changing its attributes. - * Keep the statistics up-to-date. - * - * The free page queue must be locked. - */ -static void -vm_page_cache_turn_free(vm_page_t m) -{ - - mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - - m->object = NULL; - m->valid = 0; - /* Clear PG_CACHED and set PG_FREE. */ - m->flags ^= PG_CACHED | PG_FREE; - KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, - ("vm_page_cache_free: page %p has inconsistent flags", m)); - cnt.v_cache_count--; - vm_phys_freecnt_adj(m, 1); -} - -/* * vm_page_free_toq: * * Returns the given page to the free list, @@ -2490,7 +2228,6 @@ void vm_page_cache(vm_page_t m) { vm_object_t object; - boolean_t cache_was_empty; vm_page_lock_assert(m, MA_OWNED); object = m->object; @@ -2512,8 +2249,8 @@ vm_page_cache(vm_page_t m) vm_page_free(m); return; } - KASSERT((m->flags & PG_CACHED) == 0, - ("vm_page_cache: page %p is already cached", m)); + if (m->queue == PQ_CACHE) + return; /* * Remove the page from the paging queues. @@ -2521,59 +2258,10 @@ vm_page_cache(vm_page_t m) vm_page_remque(m); /* - * Remove the page from the object's collection of resident - * pages. + * Insert the cache page queue. */ - vm_radix_remove(&object->rtree, m->pindex); - TAILQ_REMOVE(&object->memq, m, listq); - object->resident_page_count--; - - /* - * Restore the default memory attribute to the page. - */ - if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT) - pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT); - - /* - * Insert the page into the object's collection of cached pages - * and the physical memory allocator's cache/free page queues. - */ m->flags &= ~PG_ZERO; - mtx_lock(&vm_page_queue_free_mtx); - cache_was_empty = vm_radix_is_empty(&object->cache); - if (vm_radix_insert(&object->cache, m)) { - mtx_unlock(&vm_page_queue_free_mtx); - if (object->resident_page_count == 0) - vdrop(object->handle); - m->object = NULL; - vm_page_free(m); - return; - } - m->flags |= PG_CACHED; - cnt.v_cache_count++; - PCPU_INC(cnt.v_tcached); -#if VM_NRESERVLEVEL > 0 - if (!vm_reserv_free_page(m)) { -#else - if (TRUE) { -#endif - vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); - vm_phys_free_pages(m, 0); - } - vm_page_free_wakeup(); - mtx_unlock(&vm_page_queue_free_mtx); - - /* - * Increment the vnode's hold count if this is the object's only - * cached page. Decrement the vnode's hold count if this was - * the object's only resident page. - */ - if (object->type == OBJT_VNODE) { - if (cache_was_empty && object->resident_page_count != 0) - vhold(object->handle); - else if (!cache_was_empty && object->resident_page_count == 0) - vdrop(object->handle); - } + vm_page_enqueue(PQ_CACHE, m); } /* Index: vm/vm_page.h =================================================================== --- vm/vm_page.h (revision 254308) +++ vm/vm_page.h (working copy) @@ -205,9 +205,10 @@ struct vm_page { #define VPB_UNBUSIED VPB_SHARERS_WORD(0) #define PQ_NONE 255 -#define PQ_INACTIVE 0 -#define PQ_ACTIVE 1 -#define PQ_COUNT 2 +#define PQ_ACTIVE 0 +#define PQ_INACTIVE 1 +#define PQ_CACHE 2 +#define PQ_COUNT 3 TAILQ_HEAD(pglist, vm_page); SLIST_HEAD(spglist, vm_page); @@ -322,7 +323,6 @@ extern struct mtx_padalign pa_lock[]; * Page flags. If changed at any other time than page allocation or * freeing, the modification must be protected by the vm_page lock. */ -#define PG_CACHED 0x0001 /* page is cached */ #define PG_FREE 0x0002 /* page is free */ #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */ #define PG_ZERO 0x0008 /* page is zeroed */ @@ -392,8 +392,6 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); #define VM_ALLOC_RETRY 0x0080 /* Mandatory with vm_page_grab() */ #define VM_ALLOC_NOOBJ 0x0100 /* No associated object */ #define VM_ALLOC_NOBUSY 0x0200 /* Do not busy the page */ -#define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */ -#define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */ #define VM_ALLOC_IGN_SBUSY 0x1000 /* vm_page_grab() only */ #define VM_ALLOC_NODUMP 0x2000 /* don't include in dump */ #define VM_ALLOC_SBUSY 0x4000 /* Shared busy the page */ @@ -437,8 +435,6 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_page_t vm_page_alloc_freelist(int, int); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); void vm_page_cache(vm_page_t); -void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t); -void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t); int vm_page_try_to_cache (vm_page_t); int vm_page_try_to_free (vm_page_t); void vm_page_deactivate (vm_page_t); Index: vm/vnode_pager.c =================================================================== --- vm/vnode_pager.c (revision 254308) +++ vm/vnode_pager.c (working copy) @@ -438,10 +438,6 @@ vnode_pager_setsize(vp, nsize) * replacement from working properly. */ vm_page_clear_dirty(m, base, PAGE_SIZE - base); - } else if ((nsize & PAGE_MASK) && - vm_page_is_cached(object, OFF_TO_IDX(nsize))) { - vm_page_cache_free(object, OFF_TO_IDX(nsize), - nobjsize); } } object->un_pager.vnp.vnp_size = nsize; @@ -1047,7 +1043,7 @@ vnode_pager_putpages(object, m, count, sync, rtval * daemon up. This should be probably be addressed XXX. */ - if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) + if (cnt.v_free_count < cnt.v_pageout_free_min) sync |= OBJPC_SYNC; /* Index: cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c =================================================================== --- cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (revision 254308) +++ cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (working copy) @@ -349,11 +349,7 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, continue; } vm_page_sbusy(pp); - } else if (pp == NULL) { - pp = vm_page_alloc(obj, OFF_TO_IDX(start), - VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED | - VM_ALLOC_SBUSY); - } else { + } else if (pp != NULL) { ASSERT(pp != NULL && !pp->valid); pp = NULL; } Index: amd64/include/vmparam.h =================================================================== --- amd64/include/vmparam.h (revision 254308) +++ amd64/include/vmparam.h (working copy) @@ -95,8 +95,7 @@ * the pool from which physical pages for page tables and small UMA * objects are allocated. */ -#define VM_NFREEPOOL 3 -#define VM_FREEPOOL_CACHE 2 +#define VM_NFREEPOOL 2 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 1 Index: sys/vmmeter.h =================================================================== --- sys/vmmeter.h (revision 254308) +++ sys/vmmeter.h (working copy) @@ -97,7 +97,7 @@ struct vmmeter { u_int v_inactive_target; /* (c) pages desired inactive */ u_int v_inactive_count; /* (q) pages inactive */ u_int v_cache_count; /* (f) pages on cache queue */ - u_int v_cache_min; /* (c) min pages desired on cache queue */ + u_int v_cache_min; /* (c) min pages on cache queue (unused) */ u_int v_cache_max; /* (c) max pages in cached obj (unused) */ u_int v_pageout_free_min; /* (c) min pages reserved for kernel */ u_int v_interrupt_free_min; /* (c) reserved pages for int code */ @@ -131,7 +131,7 @@ static __inline int vm_page_count_severe(void) { - return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count)); + return (cnt.v_free_severe > cnt.v_free_count); } /* @@ -148,7 +148,7 @@ static __inline int vm_page_count_min(void) { - return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count)); + return (cnt.v_free_min > cnt.v_free_count); } /* @@ -160,7 +160,7 @@ static __inline int vm_page_count_target(void) { - return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count)); + return (cnt.v_free_target > cnt.v_free_count); } /* @@ -172,7 +172,7 @@ static __inline int vm_paging_target(void) { - return (cnt.v_free_target - (cnt.v_free_count + cnt.v_cache_count)); + return (cnt.v_free_target - cnt.v_free_count); } /* @@ -183,7 +183,7 @@ static __inline int vm_paging_needed(void) { - return (cnt.v_free_count + cnt.v_cache_count < vm_pageout_wakeup_thresh); + return (cnt.v_free_count < vm_pageout_wakeup_thresh); } #endif