Index: vm/vm_pageout.c =================================================================== --- vm/vm_pageout.c (revision 254540) +++ vm/vm_pageout.c (working copy) @@ -159,6 +159,8 @@ static int vm_max_launder = 32; static int vm_pageout_update_period; static int defer_swap_pageouts; static int disable_swap_pageouts; +static int lowmem_period = 10; +static int lowmem_ticks; #if defined(NO_SWAPPING) static int vm_swap_enabled = 0; @@ -179,6 +181,9 @@ SYSCTL_INT(_vm, OID_AUTO, pageout_update_period, CTLFLAG_RW, &vm_pageout_update_period, 0, "Maximum active LRU update period"); +SYSCTL_INT(_vm, OID_AUTO, lowmem_period, CTLFLAG_RW, &lowmem_period, 0, + "Low memory callback period"); + #if defined(NO_SWAPPING) SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout"); @@ -901,9 +906,10 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) /* * If we need to reclaim memory ask kernel caches to return - * some. + * some. We rate limit to avoid thrashing. */ - if (pass > 0) { + if (vmd == &vm_dom[0] && pass > 0 && + lowmem_ticks + (lowmem_period * hz) < ticks) { /* * Decrease registered cache sizes. */ @@ -913,6 +919,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) * drained above. */ uma_reclaim(); + lowmem_ticks = ticks; } /* @@ -1326,21 +1333,12 @@ relock_queues: m = next; continue; } - object = m->object; - if (!VM_OBJECT_TRYWLOCK(object) && - !vm_pageout_fallback_object_lock(m, &next)) { - VM_OBJECT_WUNLOCK(object); - vm_page_unlock(m); - m = next; - continue; - } /* * Don't deactivate pages that are busy. */ if (vm_page_busied(m) || m->hold_count != 0) { vm_page_unlock(m); - VM_OBJECT_WUNLOCK(object); vm_page_requeue_locked(m); m = next; continue; @@ -1360,6 +1358,15 @@ relock_queues: vm_page_aflag_clear(m, PGA_REFERENCED); act_delta += 1; } + /* + * Unlocked object ref count check. Two races are possible. + * 1) The ref was transitioning to zero and we saw non-zero, + * the pmap bits will be checked unnecessarily. + * 2) The ref was transitioning to one and we saw zero. + * The page lock prevents a new reference to this page so + * we need not check the reference bits. + */ + object = m->object; if (object->ref_count != 0) act_delta += pmap_ts_referenced(m); @@ -1370,16 +1377,14 @@ relock_queues: m->act_count += ACT_ADVANCE + act_delta; if (m->act_count > ACT_MAX) m->act_count = ACT_MAX; - } else { + } else m->act_count -= min(m->act_count, ACT_DECLINE); - act_delta = m->act_count; - } /* * Move this page to the tail of the active or inactive * queue depending on usage. */ - if (act_delta == 0) { + if (m->act_count == 0) { KASSERT(object->ref_count != 0 || !pmap_page_is_mapped(m), ("vm_pageout_scan: page %p is mapped", m)); @@ -1390,7 +1395,6 @@ relock_queues: } else vm_page_requeue_locked(m); vm_page_unlock(m); - VM_OBJECT_WUNLOCK(object); m = next; } vm_pagequeue_unlock(pq); Index: vm/vm_init.c =================================================================== --- vm/vm_init.c (revision 254540) +++ vm/vm_init.c (working copy) @@ -156,7 +156,8 @@ vm_mem_init(dummy) #if VM_NRESERVLEVEL > 0 1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT)); #else - PAGE_SIZE); + /* On non-superpage architectures want large import sizes. */ + PAGE_SIZE * 1024); #endif kmem_init_zero_region();