1 | /* $NetBSD: subr_vmem.c,v 1.95 2016/07/07 06:55:43 msaitoh Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, |
5 | * All rights reserved. |
6 | * |
7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions |
9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
26 | * SUCH DAMAGE. |
27 | */ |
28 | |
29 | /* |
30 | * reference: |
31 | * - Magazines and Vmem: Extending the Slab Allocator |
32 | * to Many CPUs and Arbitrary Resources |
33 | * http://www.usenix.org/event/usenix01/bonwick.html |
34 | * |
35 | * locking & the boundary tag pool: |
36 | * - A pool(9) is used for vmem boundary tags |
37 | * - During a pool get call the global vmem_btag_refill_lock is taken, |
38 | * to serialize access to the allocation reserve, but no other |
39 | * vmem arena locks. |
40 | * - During pool_put calls no vmem mutexes are locked. |
41 | * - pool_drain doesn't hold the pool's mutex while releasing memory to |
42 | * its backing therefore no interferance with any vmem mutexes. |
43 | * - The boundary tag pool is forced to put page headers into pool pages |
44 | * (PR_PHINPAGE) and not off page to avoid pool recursion. |
45 | * (due to sizeof(bt_t) it should be the case anyway) |
46 | */ |
47 | |
48 | #include <sys/cdefs.h> |
49 | __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.95 2016/07/07 06:55:43 msaitoh Exp $" ); |
50 | |
51 | #if defined(_KERNEL) && defined(_KERNEL_OPT) |
52 | #include "opt_ddb.h" |
53 | #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */ |
54 | |
55 | #include <sys/param.h> |
56 | #include <sys/hash.h> |
57 | #include <sys/queue.h> |
58 | #include <sys/bitops.h> |
59 | |
60 | #if defined(_KERNEL) |
61 | #include <sys/systm.h> |
62 | #include <sys/kernel.h> /* hz */ |
63 | #include <sys/callout.h> |
64 | #include <sys/kmem.h> |
65 | #include <sys/pool.h> |
66 | #include <sys/vmem.h> |
67 | #include <sys/vmem_impl.h> |
68 | #include <sys/workqueue.h> |
69 | #include <sys/atomic.h> |
70 | #include <uvm/uvm.h> |
71 | #include <uvm/uvm_extern.h> |
72 | #include <uvm/uvm_km.h> |
73 | #include <uvm/uvm_page.h> |
74 | #include <uvm/uvm_pdaemon.h> |
75 | #else /* defined(_KERNEL) */ |
76 | #include <stdio.h> |
77 | #include <errno.h> |
78 | #include <assert.h> |
79 | #include <stdlib.h> |
80 | #include <string.h> |
81 | #include "../sys/vmem.h" |
82 | #include "../sys/vmem_impl.h" |
83 | #endif /* defined(_KERNEL) */ |
84 | |
85 | |
86 | #if defined(_KERNEL) |
87 | #include <sys/evcnt.h> |
88 | #define VMEM_EVCNT_DEFINE(name) \ |
89 | struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ |
90 | "vmem", #name); \ |
91 | EVCNT_ATTACH_STATIC(vmem_evcnt_##name); |
92 | #define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++ |
93 | #define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count-- |
94 | |
95 | VMEM_EVCNT_DEFINE(static_bt_count) |
96 | VMEM_EVCNT_DEFINE(static_bt_inuse) |
97 | |
98 | #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) |
99 | #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) |
100 | #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) |
101 | #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) |
102 | |
103 | #else /* defined(_KERNEL) */ |
104 | |
105 | #define VMEM_EVCNT_INCR(ev) /* nothing */ |
106 | #define VMEM_EVCNT_DECR(ev) /* nothing */ |
107 | |
108 | #define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */ |
109 | #define VMEM_CONDVAR_DESTROY(vm) /* nothing */ |
110 | #define VMEM_CONDVAR_WAIT(vm) /* nothing */ |
111 | #define VMEM_CONDVAR_BROADCAST(vm) /* nothing */ |
112 | |
113 | #define UNITTEST |
114 | #define KASSERT(a) assert(a) |
115 | #define mutex_init(a, b, c) /* nothing */ |
116 | #define mutex_destroy(a) /* nothing */ |
117 | #define mutex_enter(a) /* nothing */ |
118 | #define mutex_tryenter(a) true |
119 | #define mutex_exit(a) /* nothing */ |
120 | #define mutex_owned(a) /* nothing */ |
121 | #define ASSERT_SLEEPABLE() /* nothing */ |
122 | #define panic(...) printf(__VA_ARGS__); abort() |
123 | #endif /* defined(_KERNEL) */ |
124 | |
125 | #if defined(VMEM_SANITY) |
126 | static void vmem_check(vmem_t *); |
127 | #else /* defined(VMEM_SANITY) */ |
128 | #define vmem_check(vm) /* nothing */ |
129 | #endif /* defined(VMEM_SANITY) */ |
130 | |
131 | #define VMEM_HASHSIZE_MIN 1 /* XXX */ |
132 | #define VMEM_HASHSIZE_MAX 65536 /* XXX */ |
133 | #define VMEM_HASHSIZE_INIT 1 |
134 | |
135 | #define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) |
136 | |
137 | #if defined(_KERNEL) |
138 | static bool vmem_bootstrapped = false; |
139 | static kmutex_t vmem_list_lock; |
140 | static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); |
141 | #endif /* defined(_KERNEL) */ |
142 | |
143 | /* ---- misc */ |
144 | |
145 | #define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) |
146 | #define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) |
147 | #define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) |
148 | #define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) |
149 | #define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) |
150 | #define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) |
151 | |
152 | #define VMEM_ALIGNUP(addr, align) \ |
153 | (-(-(addr) & -(align))) |
154 | |
155 | #define VMEM_CROSS_P(addr1, addr2, boundary) \ |
156 | ((((addr1) ^ (addr2)) & -(boundary)) != 0) |
157 | |
158 | #define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) |
159 | #define SIZE2ORDER(size) ((int)ilog2(size)) |
160 | |
161 | #if !defined(_KERNEL) |
162 | #define xmalloc(sz, flags) malloc(sz) |
163 | #define xfree(p, sz) free(p) |
164 | #define bt_alloc(vm, flags) malloc(sizeof(bt_t)) |
165 | #define bt_free(vm, bt) free(bt) |
166 | #else /* defined(_KERNEL) */ |
167 | |
168 | #define xmalloc(sz, flags) \ |
169 | kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP); |
170 | #define xfree(p, sz) kmem_free(p, sz); |
171 | |
172 | /* |
173 | * BT_RESERVE calculation: |
174 | * we allocate memory for boundry tags with vmem, therefor we have |
175 | * to keep a reserve of bts used to allocated memory for bts. |
176 | * This reserve is 4 for each arena involved in allocating vmems memory. |
177 | * BT_MAXFREE: don't cache excessive counts of bts in arenas |
178 | */ |
179 | #define STATIC_BT_COUNT 200 |
180 | #define BT_MINRESERVE 4 |
181 | #define BT_MAXFREE 64 |
182 | |
183 | static struct vmem_btag static_bts[STATIC_BT_COUNT]; |
184 | static int static_bt_count = STATIC_BT_COUNT; |
185 | |
186 | static struct vmem kmem_va_meta_arena_store; |
187 | vmem_t *kmem_va_meta_arena; |
188 | static struct vmem kmem_meta_arena_store; |
189 | vmem_t *kmem_meta_arena = NULL; |
190 | |
191 | static kmutex_t vmem_btag_refill_lock; |
192 | static kmutex_t vmem_btag_lock; |
193 | static LIST_HEAD(, vmem_btag) vmem_btag_freelist; |
194 | static size_t vmem_btag_freelist_count = 0; |
195 | static struct pool vmem_btag_pool; |
196 | |
197 | static void |
198 | vmem_kick_pdaemon(void) |
199 | { |
200 | #if defined(_KERNEL) |
201 | mutex_spin_enter(&uvm_fpageqlock); |
202 | uvm_kick_pdaemon(); |
203 | mutex_spin_exit(&uvm_fpageqlock); |
204 | #endif |
205 | } |
206 | |
207 | /* ---- boundary tag */ |
208 | |
209 | static int bt_refill(vmem_t *vm); |
210 | |
211 | static void * |
212 | pool_page_alloc_vmem_meta(struct pool *pp, int flags) |
213 | { |
214 | const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; |
215 | vmem_addr_t va; |
216 | int ret; |
217 | |
218 | ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz, |
219 | (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va); |
220 | |
221 | return ret ? NULL : (void *)va; |
222 | } |
223 | |
224 | static void |
225 | pool_page_free_vmem_meta(struct pool *pp, void *v) |
226 | { |
227 | |
228 | vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz); |
229 | } |
230 | |
231 | /* allocator for vmem-pool metadata */ |
232 | struct pool_allocator pool_allocator_vmem_meta = { |
233 | .pa_alloc = pool_page_alloc_vmem_meta, |
234 | .pa_free = pool_page_free_vmem_meta, |
235 | .pa_pagesz = 0 |
236 | }; |
237 | |
238 | static int |
239 | bt_refill(vmem_t *vm) |
240 | { |
241 | bt_t *bt; |
242 | |
243 | VMEM_LOCK(vm); |
244 | if (vm->vm_nfreetags > BT_MINRESERVE) { |
245 | VMEM_UNLOCK(vm); |
246 | return 0; |
247 | } |
248 | |
249 | mutex_enter(&vmem_btag_lock); |
250 | while (!LIST_EMPTY(&vmem_btag_freelist) && |
251 | vm->vm_nfreetags <= BT_MINRESERVE) { |
252 | bt = LIST_FIRST(&vmem_btag_freelist); |
253 | LIST_REMOVE(bt, bt_freelist); |
254 | LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); |
255 | vm->vm_nfreetags++; |
256 | vmem_btag_freelist_count--; |
257 | VMEM_EVCNT_INCR(static_bt_inuse); |
258 | } |
259 | mutex_exit(&vmem_btag_lock); |
260 | |
261 | while (vm->vm_nfreetags <= BT_MINRESERVE) { |
262 | VMEM_UNLOCK(vm); |
263 | mutex_enter(&vmem_btag_refill_lock); |
264 | bt = pool_get(&vmem_btag_pool, PR_NOWAIT); |
265 | mutex_exit(&vmem_btag_refill_lock); |
266 | VMEM_LOCK(vm); |
267 | if (bt == NULL) |
268 | break; |
269 | LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); |
270 | vm->vm_nfreetags++; |
271 | } |
272 | |
273 | if (vm->vm_nfreetags <= BT_MINRESERVE) { |
274 | VMEM_UNLOCK(vm); |
275 | return ENOMEM; |
276 | } |
277 | |
278 | VMEM_UNLOCK(vm); |
279 | |
280 | if (kmem_meta_arena != NULL) { |
281 | (void)bt_refill(kmem_arena); |
282 | (void)bt_refill(kmem_va_meta_arena); |
283 | (void)bt_refill(kmem_meta_arena); |
284 | } |
285 | |
286 | return 0; |
287 | } |
288 | |
289 | static bt_t * |
290 | bt_alloc(vmem_t *vm, vm_flag_t flags) |
291 | { |
292 | bt_t *bt; |
293 | VMEM_LOCK(vm); |
294 | while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) { |
295 | VMEM_UNLOCK(vm); |
296 | if (bt_refill(vm)) { |
297 | if ((flags & VM_NOSLEEP) != 0) { |
298 | return NULL; |
299 | } |
300 | |
301 | /* |
302 | * It would be nice to wait for something specific here |
303 | * but there are multiple ways that a retry could |
304 | * succeed and we can't wait for multiple things |
305 | * simultaneously. So we'll just sleep for an arbitrary |
306 | * short period of time and retry regardless. |
307 | * This should be a very rare case. |
308 | */ |
309 | |
310 | vmem_kick_pdaemon(); |
311 | kpause("btalloc" , false, 1, NULL); |
312 | } |
313 | VMEM_LOCK(vm); |
314 | } |
315 | bt = LIST_FIRST(&vm->vm_freetags); |
316 | LIST_REMOVE(bt, bt_freelist); |
317 | vm->vm_nfreetags--; |
318 | VMEM_UNLOCK(vm); |
319 | |
320 | return bt; |
321 | } |
322 | |
323 | static void |
324 | bt_free(vmem_t *vm, bt_t *bt) |
325 | { |
326 | |
327 | VMEM_LOCK(vm); |
328 | LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); |
329 | vm->vm_nfreetags++; |
330 | VMEM_UNLOCK(vm); |
331 | } |
332 | |
333 | static void |
334 | bt_freetrim(vmem_t *vm, int freelimit) |
335 | { |
336 | bt_t *t; |
337 | LIST_HEAD(, vmem_btag) tofree; |
338 | |
339 | LIST_INIT(&tofree); |
340 | |
341 | VMEM_LOCK(vm); |
342 | while (vm->vm_nfreetags > freelimit) { |
343 | bt_t *bt = LIST_FIRST(&vm->vm_freetags); |
344 | LIST_REMOVE(bt, bt_freelist); |
345 | vm->vm_nfreetags--; |
346 | if (bt >= static_bts |
347 | && bt < &static_bts[STATIC_BT_COUNT]) { |
348 | mutex_enter(&vmem_btag_lock); |
349 | LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); |
350 | vmem_btag_freelist_count++; |
351 | mutex_exit(&vmem_btag_lock); |
352 | VMEM_EVCNT_DECR(static_bt_inuse); |
353 | } else { |
354 | LIST_INSERT_HEAD(&tofree, bt, bt_freelist); |
355 | } |
356 | } |
357 | |
358 | VMEM_UNLOCK(vm); |
359 | while (!LIST_EMPTY(&tofree)) { |
360 | t = LIST_FIRST(&tofree); |
361 | LIST_REMOVE(t, bt_freelist); |
362 | pool_put(&vmem_btag_pool, t); |
363 | } |
364 | } |
365 | #endif /* defined(_KERNEL) */ |
366 | |
367 | /* |
368 | * freelist[0] ... [1, 1] |
369 | * freelist[1] ... [2, 3] |
370 | * freelist[2] ... [4, 7] |
371 | * freelist[3] ... [8, 15] |
372 | * : |
373 | * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] |
374 | * : |
375 | */ |
376 | |
377 | static struct vmem_freelist * |
378 | bt_freehead_tofree(vmem_t *vm, vmem_size_t size) |
379 | { |
380 | const vmem_size_t qsize = size >> vm->vm_quantum_shift; |
381 | const int idx = SIZE2ORDER(qsize); |
382 | |
383 | KASSERT(size != 0 && qsize != 0); |
384 | KASSERT((size & vm->vm_quantum_mask) == 0); |
385 | KASSERT(idx >= 0); |
386 | KASSERT(idx < VMEM_MAXORDER); |
387 | |
388 | return &vm->vm_freelist[idx]; |
389 | } |
390 | |
391 | /* |
392 | * bt_freehead_toalloc: return the freelist for the given size and allocation |
393 | * strategy. |
394 | * |
395 | * for VM_INSTANTFIT, return the list in which any blocks are large enough |
396 | * for the requested size. otherwise, return the list which can have blocks |
397 | * large enough for the requested size. |
398 | */ |
399 | |
400 | static struct vmem_freelist * |
401 | bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) |
402 | { |
403 | const vmem_size_t qsize = size >> vm->vm_quantum_shift; |
404 | int idx = SIZE2ORDER(qsize); |
405 | |
406 | KASSERT(size != 0 && qsize != 0); |
407 | KASSERT((size & vm->vm_quantum_mask) == 0); |
408 | |
409 | if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { |
410 | idx++; |
411 | /* check too large request? */ |
412 | } |
413 | KASSERT(idx >= 0); |
414 | KASSERT(idx < VMEM_MAXORDER); |
415 | |
416 | return &vm->vm_freelist[idx]; |
417 | } |
418 | |
419 | /* ---- boundary tag hash */ |
420 | |
421 | static struct vmem_hashlist * |
422 | bt_hashhead(vmem_t *vm, vmem_addr_t addr) |
423 | { |
424 | struct vmem_hashlist *list; |
425 | unsigned int hash; |
426 | |
427 | hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); |
428 | list = &vm->vm_hashlist[hash % vm->vm_hashsize]; |
429 | |
430 | return list; |
431 | } |
432 | |
433 | static bt_t * |
434 | bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) |
435 | { |
436 | struct vmem_hashlist *list; |
437 | bt_t *bt; |
438 | |
439 | list = bt_hashhead(vm, addr); |
440 | LIST_FOREACH(bt, list, bt_hashlist) { |
441 | if (bt->bt_start == addr) { |
442 | break; |
443 | } |
444 | } |
445 | |
446 | return bt; |
447 | } |
448 | |
449 | static void |
450 | bt_rembusy(vmem_t *vm, bt_t *bt) |
451 | { |
452 | |
453 | KASSERT(vm->vm_nbusytag > 0); |
454 | vm->vm_inuse -= bt->bt_size; |
455 | vm->vm_nbusytag--; |
456 | LIST_REMOVE(bt, bt_hashlist); |
457 | } |
458 | |
459 | static void |
460 | bt_insbusy(vmem_t *vm, bt_t *bt) |
461 | { |
462 | struct vmem_hashlist *list; |
463 | |
464 | KASSERT(bt->bt_type == BT_TYPE_BUSY); |
465 | |
466 | list = bt_hashhead(vm, bt->bt_start); |
467 | LIST_INSERT_HEAD(list, bt, bt_hashlist); |
468 | vm->vm_nbusytag++; |
469 | vm->vm_inuse += bt->bt_size; |
470 | } |
471 | |
472 | /* ---- boundary tag list */ |
473 | |
474 | static void |
475 | bt_remseg(vmem_t *vm, bt_t *bt) |
476 | { |
477 | |
478 | TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); |
479 | } |
480 | |
481 | static void |
482 | bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) |
483 | { |
484 | |
485 | TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); |
486 | } |
487 | |
488 | static void |
489 | bt_insseg_tail(vmem_t *vm, bt_t *bt) |
490 | { |
491 | |
492 | TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); |
493 | } |
494 | |
495 | static void |
496 | bt_remfree(vmem_t *vm, bt_t *bt) |
497 | { |
498 | |
499 | KASSERT(bt->bt_type == BT_TYPE_FREE); |
500 | |
501 | LIST_REMOVE(bt, bt_freelist); |
502 | } |
503 | |
504 | static void |
505 | bt_insfree(vmem_t *vm, bt_t *bt) |
506 | { |
507 | struct vmem_freelist *list; |
508 | |
509 | list = bt_freehead_tofree(vm, bt->bt_size); |
510 | LIST_INSERT_HEAD(list, bt, bt_freelist); |
511 | } |
512 | |
513 | /* ---- vmem internal functions */ |
514 | |
515 | #if defined(QCACHE) |
516 | static inline vm_flag_t |
517 | prf_to_vmf(int prflags) |
518 | { |
519 | vm_flag_t vmflags; |
520 | |
521 | KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); |
522 | if ((prflags & PR_WAITOK) != 0) { |
523 | vmflags = VM_SLEEP; |
524 | } else { |
525 | vmflags = VM_NOSLEEP; |
526 | } |
527 | return vmflags; |
528 | } |
529 | |
530 | static inline int |
531 | vmf_to_prf(vm_flag_t vmflags) |
532 | { |
533 | int prflags; |
534 | |
535 | if ((vmflags & VM_SLEEP) != 0) { |
536 | prflags = PR_WAITOK; |
537 | } else { |
538 | prflags = PR_NOWAIT; |
539 | } |
540 | return prflags; |
541 | } |
542 | |
543 | static size_t |
544 | qc_poolpage_size(size_t qcache_max) |
545 | { |
546 | int i; |
547 | |
548 | for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { |
549 | /* nothing */ |
550 | } |
551 | return ORDER2SIZE(i); |
552 | } |
553 | |
554 | static void * |
555 | qc_poolpage_alloc(struct pool *pool, int prflags) |
556 | { |
557 | qcache_t *qc = QC_POOL_TO_QCACHE(pool); |
558 | vmem_t *vm = qc->qc_vmem; |
559 | vmem_addr_t addr; |
560 | |
561 | if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, |
562 | prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) |
563 | return NULL; |
564 | return (void *)addr; |
565 | } |
566 | |
567 | static void |
568 | qc_poolpage_free(struct pool *pool, void *addr) |
569 | { |
570 | qcache_t *qc = QC_POOL_TO_QCACHE(pool); |
571 | vmem_t *vm = qc->qc_vmem; |
572 | |
573 | vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); |
574 | } |
575 | |
576 | static void |
577 | qc_init(vmem_t *vm, size_t qcache_max, int ipl) |
578 | { |
579 | qcache_t *prevqc; |
580 | struct pool_allocator *pa; |
581 | int qcache_idx_max; |
582 | int i; |
583 | |
584 | KASSERT((qcache_max & vm->vm_quantum_mask) == 0); |
585 | if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { |
586 | qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; |
587 | } |
588 | vm->vm_qcache_max = qcache_max; |
589 | pa = &vm->vm_qcache_allocator; |
590 | memset(pa, 0, sizeof(*pa)); |
591 | pa->pa_alloc = qc_poolpage_alloc; |
592 | pa->pa_free = qc_poolpage_free; |
593 | pa->pa_pagesz = qc_poolpage_size(qcache_max); |
594 | |
595 | qcache_idx_max = qcache_max >> vm->vm_quantum_shift; |
596 | prevqc = NULL; |
597 | for (i = qcache_idx_max; i > 0; i--) { |
598 | qcache_t *qc = &vm->vm_qcache_store[i - 1]; |
599 | size_t size = i << vm->vm_quantum_shift; |
600 | pool_cache_t pc; |
601 | |
602 | qc->qc_vmem = vm; |
603 | snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu" , |
604 | vm->vm_name, size); |
605 | |
606 | pc = pool_cache_init(size, |
607 | ORDER2SIZE(vm->vm_quantum_shift), 0, |
608 | PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */, |
609 | qc->qc_name, pa, ipl, NULL, NULL, NULL); |
610 | |
611 | KASSERT(pc); |
612 | |
613 | qc->qc_cache = pc; |
614 | KASSERT(qc->qc_cache != NULL); /* XXX */ |
615 | if (prevqc != NULL && |
616 | qc->qc_cache->pc_pool.pr_itemsperpage == |
617 | prevqc->qc_cache->pc_pool.pr_itemsperpage) { |
618 | pool_cache_destroy(qc->qc_cache); |
619 | vm->vm_qcache[i - 1] = prevqc; |
620 | continue; |
621 | } |
622 | qc->qc_cache->pc_pool.pr_qcache = qc; |
623 | vm->vm_qcache[i - 1] = qc; |
624 | prevqc = qc; |
625 | } |
626 | } |
627 | |
628 | static void |
629 | qc_destroy(vmem_t *vm) |
630 | { |
631 | const qcache_t *prevqc; |
632 | int i; |
633 | int qcache_idx_max; |
634 | |
635 | qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; |
636 | prevqc = NULL; |
637 | for (i = 0; i < qcache_idx_max; i++) { |
638 | qcache_t *qc = vm->vm_qcache[i]; |
639 | |
640 | if (prevqc == qc) { |
641 | continue; |
642 | } |
643 | pool_cache_destroy(qc->qc_cache); |
644 | prevqc = qc; |
645 | } |
646 | } |
647 | #endif |
648 | |
649 | #if defined(_KERNEL) |
650 | static void |
651 | vmem_bootstrap(void) |
652 | { |
653 | |
654 | mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM); |
655 | mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM); |
656 | mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM); |
657 | |
658 | while (static_bt_count-- > 0) { |
659 | bt_t *bt = &static_bts[static_bt_count]; |
660 | LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); |
661 | VMEM_EVCNT_INCR(static_bt_count); |
662 | vmem_btag_freelist_count++; |
663 | } |
664 | vmem_bootstrapped = TRUE; |
665 | } |
666 | |
667 | void |
668 | vmem_subsystem_init(vmem_t *vm) |
669 | { |
670 | |
671 | kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va" , |
672 | 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm, |
673 | 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT, |
674 | IPL_VM); |
675 | |
676 | kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta" , |
677 | 0, 0, PAGE_SIZE, |
678 | uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena, |
679 | 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); |
680 | |
681 | pool_init(&vmem_btag_pool, sizeof(bt_t), 0, 0, PR_PHINPAGE, |
682 | "vmembt" , &pool_allocator_vmem_meta, IPL_VM); |
683 | } |
684 | #endif /* defined(_KERNEL) */ |
685 | |
686 | static int |
687 | vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, |
688 | int spanbttype) |
689 | { |
690 | bt_t *btspan; |
691 | bt_t *btfree; |
692 | |
693 | KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); |
694 | KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); |
695 | KASSERT(spanbttype == BT_TYPE_SPAN || |
696 | spanbttype == BT_TYPE_SPAN_STATIC); |
697 | |
698 | btspan = bt_alloc(vm, flags); |
699 | if (btspan == NULL) { |
700 | return ENOMEM; |
701 | } |
702 | btfree = bt_alloc(vm, flags); |
703 | if (btfree == NULL) { |
704 | bt_free(vm, btspan); |
705 | return ENOMEM; |
706 | } |
707 | |
708 | btspan->bt_type = spanbttype; |
709 | btspan->bt_start = addr; |
710 | btspan->bt_size = size; |
711 | |
712 | btfree->bt_type = BT_TYPE_FREE; |
713 | btfree->bt_start = addr; |
714 | btfree->bt_size = size; |
715 | |
716 | VMEM_LOCK(vm); |
717 | bt_insseg_tail(vm, btspan); |
718 | bt_insseg(vm, btfree, btspan); |
719 | bt_insfree(vm, btfree); |
720 | vm->vm_size += size; |
721 | VMEM_UNLOCK(vm); |
722 | |
723 | return 0; |
724 | } |
725 | |
726 | static void |
727 | vmem_destroy1(vmem_t *vm) |
728 | { |
729 | |
730 | #if defined(QCACHE) |
731 | qc_destroy(vm); |
732 | #endif /* defined(QCACHE) */ |
733 | if (vm->vm_hashlist != NULL) { |
734 | int i; |
735 | |
736 | for (i = 0; i < vm->vm_hashsize; i++) { |
737 | bt_t *bt; |
738 | |
739 | while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { |
740 | KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); |
741 | bt_free(vm, bt); |
742 | } |
743 | } |
744 | if (vm->vm_hashlist != &vm->vm_hash0) { |
745 | xfree(vm->vm_hashlist, |
746 | sizeof(struct vmem_hashlist *) * vm->vm_hashsize); |
747 | } |
748 | } |
749 | |
750 | bt_freetrim(vm, 0); |
751 | |
752 | VMEM_CONDVAR_DESTROY(vm); |
753 | VMEM_LOCK_DESTROY(vm); |
754 | xfree(vm, sizeof(*vm)); |
755 | } |
756 | |
757 | static int |
758 | vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) |
759 | { |
760 | vmem_addr_t addr; |
761 | int rc; |
762 | |
763 | if (vm->vm_importfn == NULL) { |
764 | return EINVAL; |
765 | } |
766 | |
767 | if (vm->vm_flags & VM_LARGEIMPORT) { |
768 | size *= 16; |
769 | } |
770 | |
771 | if (vm->vm_flags & VM_XIMPORT) { |
772 | rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size, |
773 | &size, flags, &addr); |
774 | } else { |
775 | rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); |
776 | } |
777 | if (rc) { |
778 | return ENOMEM; |
779 | } |
780 | |
781 | if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { |
782 | (*vm->vm_releasefn)(vm->vm_arg, addr, size); |
783 | return ENOMEM; |
784 | } |
785 | |
786 | return 0; |
787 | } |
788 | |
789 | static int |
790 | vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) |
791 | { |
792 | bt_t *bt; |
793 | int i; |
794 | struct vmem_hashlist *newhashlist; |
795 | struct vmem_hashlist *oldhashlist; |
796 | size_t oldhashsize; |
797 | |
798 | KASSERT(newhashsize > 0); |
799 | |
800 | newhashlist = |
801 | xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); |
802 | if (newhashlist == NULL) { |
803 | return ENOMEM; |
804 | } |
805 | for (i = 0; i < newhashsize; i++) { |
806 | LIST_INIT(&newhashlist[i]); |
807 | } |
808 | |
809 | if (!VMEM_TRYLOCK(vm)) { |
810 | xfree(newhashlist, |
811 | sizeof(struct vmem_hashlist *) * newhashsize); |
812 | return EBUSY; |
813 | } |
814 | oldhashlist = vm->vm_hashlist; |
815 | oldhashsize = vm->vm_hashsize; |
816 | vm->vm_hashlist = newhashlist; |
817 | vm->vm_hashsize = newhashsize; |
818 | if (oldhashlist == NULL) { |
819 | VMEM_UNLOCK(vm); |
820 | return 0; |
821 | } |
822 | for (i = 0; i < oldhashsize; i++) { |
823 | while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { |
824 | bt_rembusy(vm, bt); /* XXX */ |
825 | bt_insbusy(vm, bt); |
826 | } |
827 | } |
828 | VMEM_UNLOCK(vm); |
829 | |
830 | if (oldhashlist != &vm->vm_hash0) { |
831 | xfree(oldhashlist, |
832 | sizeof(struct vmem_hashlist *) * oldhashsize); |
833 | } |
834 | |
835 | return 0; |
836 | } |
837 | |
838 | /* |
839 | * vmem_fit: check if a bt can satisfy the given restrictions. |
840 | * |
841 | * it's a caller's responsibility to ensure the region is big enough |
842 | * before calling us. |
843 | */ |
844 | |
845 | static int |
846 | vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, |
847 | vmem_size_t phase, vmem_size_t nocross, |
848 | vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) |
849 | { |
850 | vmem_addr_t start; |
851 | vmem_addr_t end; |
852 | |
853 | KASSERT(size > 0); |
854 | KASSERT(bt->bt_size >= size); /* caller's responsibility */ |
855 | |
856 | /* |
857 | * XXX assumption: vmem_addr_t and vmem_size_t are |
858 | * unsigned integer of the same size. |
859 | */ |
860 | |
861 | start = bt->bt_start; |
862 | if (start < minaddr) { |
863 | start = minaddr; |
864 | } |
865 | end = BT_END(bt); |
866 | if (end > maxaddr) { |
867 | end = maxaddr; |
868 | } |
869 | if (start > end) { |
870 | return ENOMEM; |
871 | } |
872 | |
873 | start = VMEM_ALIGNUP(start - phase, align) + phase; |
874 | if (start < bt->bt_start) { |
875 | start += align; |
876 | } |
877 | if (VMEM_CROSS_P(start, start + size - 1, nocross)) { |
878 | KASSERT(align < nocross); |
879 | start = VMEM_ALIGNUP(start - phase, nocross) + phase; |
880 | } |
881 | if (start <= end && end - start >= size - 1) { |
882 | KASSERT((start & (align - 1)) == phase); |
883 | KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); |
884 | KASSERT(minaddr <= start); |
885 | KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); |
886 | KASSERT(bt->bt_start <= start); |
887 | KASSERT(BT_END(bt) - start >= size - 1); |
888 | *addrp = start; |
889 | return 0; |
890 | } |
891 | return ENOMEM; |
892 | } |
893 | |
894 | /* ---- vmem API */ |
895 | |
896 | /* |
897 | * vmem_create_internal: creates a vmem arena. |
898 | */ |
899 | |
900 | vmem_t * |
901 | vmem_init(vmem_t *vm, const char *name, |
902 | vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, |
903 | vmem_import_t *importfn, vmem_release_t *releasefn, |
904 | vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) |
905 | { |
906 | int i; |
907 | |
908 | KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); |
909 | KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); |
910 | KASSERT(quantum > 0); |
911 | |
912 | #if defined(_KERNEL) |
913 | /* XXX: SMP, we get called early... */ |
914 | if (!vmem_bootstrapped) { |
915 | vmem_bootstrap(); |
916 | } |
917 | #endif /* defined(_KERNEL) */ |
918 | |
919 | if (vm == NULL) { |
920 | vm = xmalloc(sizeof(*vm), flags); |
921 | } |
922 | if (vm == NULL) { |
923 | return NULL; |
924 | } |
925 | |
926 | VMEM_CONDVAR_INIT(vm, "vmem" ); |
927 | VMEM_LOCK_INIT(vm, ipl); |
928 | vm->vm_flags = flags; |
929 | vm->vm_nfreetags = 0; |
930 | LIST_INIT(&vm->vm_freetags); |
931 | strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); |
932 | vm->vm_quantum_mask = quantum - 1; |
933 | vm->vm_quantum_shift = SIZE2ORDER(quantum); |
934 | KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); |
935 | vm->vm_importfn = importfn; |
936 | vm->vm_releasefn = releasefn; |
937 | vm->vm_arg = arg; |
938 | vm->vm_nbusytag = 0; |
939 | vm->vm_size = 0; |
940 | vm->vm_inuse = 0; |
941 | #if defined(QCACHE) |
942 | qc_init(vm, qcache_max, ipl); |
943 | #endif /* defined(QCACHE) */ |
944 | |
945 | TAILQ_INIT(&vm->vm_seglist); |
946 | for (i = 0; i < VMEM_MAXORDER; i++) { |
947 | LIST_INIT(&vm->vm_freelist[i]); |
948 | } |
949 | memset(&vm->vm_hash0, 0, sizeof(struct vmem_hashlist)); |
950 | vm->vm_hashsize = 1; |
951 | vm->vm_hashlist = &vm->vm_hash0; |
952 | |
953 | if (size != 0) { |
954 | if (vmem_add(vm, base, size, flags) != 0) { |
955 | vmem_destroy1(vm); |
956 | return NULL; |
957 | } |
958 | } |
959 | |
960 | #if defined(_KERNEL) |
961 | if (flags & VM_BOOTSTRAP) { |
962 | bt_refill(vm); |
963 | } |
964 | |
965 | mutex_enter(&vmem_list_lock); |
966 | LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); |
967 | mutex_exit(&vmem_list_lock); |
968 | #endif /* defined(_KERNEL) */ |
969 | |
970 | return vm; |
971 | } |
972 | |
973 | |
974 | |
975 | /* |
976 | * vmem_create: create an arena. |
977 | * |
978 | * => must not be called from interrupt context. |
979 | */ |
980 | |
981 | vmem_t * |
982 | vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, |
983 | vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, |
984 | vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) |
985 | { |
986 | |
987 | KASSERT((flags & (VM_XIMPORT)) == 0); |
988 | |
989 | return vmem_init(NULL, name, base, size, quantum, |
990 | importfn, releasefn, source, qcache_max, flags, ipl); |
991 | } |
992 | |
993 | /* |
994 | * vmem_xcreate: create an arena takes alternative import func. |
995 | * |
996 | * => must not be called from interrupt context. |
997 | */ |
998 | |
999 | vmem_t * |
1000 | vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size, |
1001 | vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn, |
1002 | vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) |
1003 | { |
1004 | |
1005 | KASSERT((flags & (VM_XIMPORT)) == 0); |
1006 | |
1007 | return vmem_init(NULL, name, base, size, quantum, |
1008 | (vmem_import_t *)importfn, releasefn, source, |
1009 | qcache_max, flags | VM_XIMPORT, ipl); |
1010 | } |
1011 | |
1012 | void |
1013 | vmem_destroy(vmem_t *vm) |
1014 | { |
1015 | |
1016 | #if defined(_KERNEL) |
1017 | mutex_enter(&vmem_list_lock); |
1018 | LIST_REMOVE(vm, vm_alllist); |
1019 | mutex_exit(&vmem_list_lock); |
1020 | #endif /* defined(_KERNEL) */ |
1021 | |
1022 | vmem_destroy1(vm); |
1023 | } |
1024 | |
1025 | vmem_size_t |
1026 | vmem_roundup_size(vmem_t *vm, vmem_size_t size) |
1027 | { |
1028 | |
1029 | return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; |
1030 | } |
1031 | |
1032 | /* |
1033 | * vmem_alloc: allocate resource from the arena. |
1034 | */ |
1035 | |
1036 | int |
1037 | vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) |
1038 | { |
1039 | const vm_flag_t strat __diagused = flags & VM_FITMASK; |
1040 | |
1041 | KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); |
1042 | KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); |
1043 | |
1044 | KASSERT(size > 0); |
1045 | KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); |
1046 | if ((flags & VM_SLEEP) != 0) { |
1047 | ASSERT_SLEEPABLE(); |
1048 | } |
1049 | |
1050 | #if defined(QCACHE) |
1051 | if (size <= vm->vm_qcache_max) { |
1052 | void *p; |
1053 | int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; |
1054 | qcache_t *qc = vm->vm_qcache[qidx - 1]; |
1055 | |
1056 | p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); |
1057 | if (addrp != NULL) |
1058 | *addrp = (vmem_addr_t)p; |
1059 | return (p == NULL) ? ENOMEM : 0; |
1060 | } |
1061 | #endif /* defined(QCACHE) */ |
1062 | |
1063 | return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, |
1064 | flags, addrp); |
1065 | } |
1066 | |
1067 | int |
1068 | vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, |
1069 | const vmem_size_t phase, const vmem_size_t nocross, |
1070 | const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, |
1071 | vmem_addr_t *addrp) |
1072 | { |
1073 | struct vmem_freelist *list; |
1074 | struct vmem_freelist *first; |
1075 | struct vmem_freelist *end; |
1076 | bt_t *bt; |
1077 | bt_t *btnew; |
1078 | bt_t *btnew2; |
1079 | const vmem_size_t size = vmem_roundup_size(vm, size0); |
1080 | vm_flag_t strat = flags & VM_FITMASK; |
1081 | vmem_addr_t start; |
1082 | int rc; |
1083 | |
1084 | KASSERT(size0 > 0); |
1085 | KASSERT(size > 0); |
1086 | KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); |
1087 | if ((flags & VM_SLEEP) != 0) { |
1088 | ASSERT_SLEEPABLE(); |
1089 | } |
1090 | KASSERT((align & vm->vm_quantum_mask) == 0); |
1091 | KASSERT((align & (align - 1)) == 0); |
1092 | KASSERT((phase & vm->vm_quantum_mask) == 0); |
1093 | KASSERT((nocross & vm->vm_quantum_mask) == 0); |
1094 | KASSERT((nocross & (nocross - 1)) == 0); |
1095 | KASSERT((align == 0 && phase == 0) || phase < align); |
1096 | KASSERT(nocross == 0 || nocross >= size); |
1097 | KASSERT(minaddr <= maxaddr); |
1098 | KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); |
1099 | |
1100 | if (align == 0) { |
1101 | align = vm->vm_quantum_mask + 1; |
1102 | } |
1103 | |
1104 | /* |
1105 | * allocate boundary tags before acquiring the vmem lock. |
1106 | */ |
1107 | btnew = bt_alloc(vm, flags); |
1108 | if (btnew == NULL) { |
1109 | return ENOMEM; |
1110 | } |
1111 | btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ |
1112 | if (btnew2 == NULL) { |
1113 | bt_free(vm, btnew); |
1114 | return ENOMEM; |
1115 | } |
1116 | |
1117 | /* |
1118 | * choose a free block from which we allocate. |
1119 | */ |
1120 | retry_strat: |
1121 | first = bt_freehead_toalloc(vm, size, strat); |
1122 | end = &vm->vm_freelist[VMEM_MAXORDER]; |
1123 | retry: |
1124 | bt = NULL; |
1125 | VMEM_LOCK(vm); |
1126 | vmem_check(vm); |
1127 | if (strat == VM_INSTANTFIT) { |
1128 | /* |
1129 | * just choose the first block which satisfies our restrictions. |
1130 | * |
1131 | * note that we don't need to check the size of the blocks |
1132 | * because any blocks found on these list should be larger than |
1133 | * the given size. |
1134 | */ |
1135 | for (list = first; list < end; list++) { |
1136 | bt = LIST_FIRST(list); |
1137 | if (bt != NULL) { |
1138 | rc = vmem_fit(bt, size, align, phase, |
1139 | nocross, minaddr, maxaddr, &start); |
1140 | if (rc == 0) { |
1141 | goto gotit; |
1142 | } |
1143 | /* |
1144 | * don't bother to follow the bt_freelist link |
1145 | * here. the list can be very long and we are |
1146 | * told to run fast. blocks from the later free |
1147 | * lists are larger and have better chances to |
1148 | * satisfy our restrictions. |
1149 | */ |
1150 | } |
1151 | } |
1152 | } else { /* VM_BESTFIT */ |
1153 | /* |
1154 | * we assume that, for space efficiency, it's better to |
1155 | * allocate from a smaller block. thus we will start searching |
1156 | * from the lower-order list than VM_INSTANTFIT. |
1157 | * however, don't bother to find the smallest block in a free |
1158 | * list because the list can be very long. we can revisit it |
1159 | * if/when it turns out to be a problem. |
1160 | * |
1161 | * note that the 'first' list can contain blocks smaller than |
1162 | * the requested size. thus we need to check bt_size. |
1163 | */ |
1164 | for (list = first; list < end; list++) { |
1165 | LIST_FOREACH(bt, list, bt_freelist) { |
1166 | if (bt->bt_size >= size) { |
1167 | rc = vmem_fit(bt, size, align, phase, |
1168 | nocross, minaddr, maxaddr, &start); |
1169 | if (rc == 0) { |
1170 | goto gotit; |
1171 | } |
1172 | } |
1173 | } |
1174 | } |
1175 | } |
1176 | VMEM_UNLOCK(vm); |
1177 | #if 1 |
1178 | if (strat == VM_INSTANTFIT) { |
1179 | strat = VM_BESTFIT; |
1180 | goto retry_strat; |
1181 | } |
1182 | #endif |
1183 | if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) { |
1184 | |
1185 | /* |
1186 | * XXX should try to import a region large enough to |
1187 | * satisfy restrictions? |
1188 | */ |
1189 | |
1190 | goto fail; |
1191 | } |
1192 | /* XXX eeek, minaddr & maxaddr not respected */ |
1193 | if (vmem_import(vm, size, flags) == 0) { |
1194 | goto retry; |
1195 | } |
1196 | /* XXX */ |
1197 | |
1198 | if ((flags & VM_SLEEP) != 0) { |
1199 | vmem_kick_pdaemon(); |
1200 | VMEM_LOCK(vm); |
1201 | VMEM_CONDVAR_WAIT(vm); |
1202 | VMEM_UNLOCK(vm); |
1203 | goto retry; |
1204 | } |
1205 | fail: |
1206 | bt_free(vm, btnew); |
1207 | bt_free(vm, btnew2); |
1208 | return ENOMEM; |
1209 | |
1210 | gotit: |
1211 | KASSERT(bt->bt_type == BT_TYPE_FREE); |
1212 | KASSERT(bt->bt_size >= size); |
1213 | bt_remfree(vm, bt); |
1214 | vmem_check(vm); |
1215 | if (bt->bt_start != start) { |
1216 | btnew2->bt_type = BT_TYPE_FREE; |
1217 | btnew2->bt_start = bt->bt_start; |
1218 | btnew2->bt_size = start - bt->bt_start; |
1219 | bt->bt_start = start; |
1220 | bt->bt_size -= btnew2->bt_size; |
1221 | bt_insfree(vm, btnew2); |
1222 | bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); |
1223 | btnew2 = NULL; |
1224 | vmem_check(vm); |
1225 | } |
1226 | KASSERT(bt->bt_start == start); |
1227 | if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { |
1228 | /* split */ |
1229 | btnew->bt_type = BT_TYPE_BUSY; |
1230 | btnew->bt_start = bt->bt_start; |
1231 | btnew->bt_size = size; |
1232 | bt->bt_start = bt->bt_start + size; |
1233 | bt->bt_size -= size; |
1234 | bt_insfree(vm, bt); |
1235 | bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist)); |
1236 | bt_insbusy(vm, btnew); |
1237 | vmem_check(vm); |
1238 | VMEM_UNLOCK(vm); |
1239 | } else { |
1240 | bt->bt_type = BT_TYPE_BUSY; |
1241 | bt_insbusy(vm, bt); |
1242 | vmem_check(vm); |
1243 | VMEM_UNLOCK(vm); |
1244 | bt_free(vm, btnew); |
1245 | btnew = bt; |
1246 | } |
1247 | if (btnew2 != NULL) { |
1248 | bt_free(vm, btnew2); |
1249 | } |
1250 | KASSERT(btnew->bt_size >= size); |
1251 | btnew->bt_type = BT_TYPE_BUSY; |
1252 | |
1253 | if (addrp != NULL) |
1254 | *addrp = btnew->bt_start; |
1255 | return 0; |
1256 | } |
1257 | |
1258 | /* |
1259 | * vmem_free: free the resource to the arena. |
1260 | */ |
1261 | |
1262 | void |
1263 | vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) |
1264 | { |
1265 | |
1266 | KASSERT(size > 0); |
1267 | |
1268 | #if defined(QCACHE) |
1269 | if (size <= vm->vm_qcache_max) { |
1270 | int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; |
1271 | qcache_t *qc = vm->vm_qcache[qidx - 1]; |
1272 | |
1273 | pool_cache_put(qc->qc_cache, (void *)addr); |
1274 | return; |
1275 | } |
1276 | #endif /* defined(QCACHE) */ |
1277 | |
1278 | vmem_xfree(vm, addr, size); |
1279 | } |
1280 | |
1281 | void |
1282 | vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) |
1283 | { |
1284 | bt_t *bt; |
1285 | bt_t *t; |
1286 | LIST_HEAD(, vmem_btag) tofree; |
1287 | |
1288 | LIST_INIT(&tofree); |
1289 | |
1290 | KASSERT(size > 0); |
1291 | |
1292 | VMEM_LOCK(vm); |
1293 | |
1294 | bt = bt_lookupbusy(vm, addr); |
1295 | KASSERT(bt != NULL); |
1296 | KASSERT(bt->bt_start == addr); |
1297 | KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || |
1298 | bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); |
1299 | KASSERT(bt->bt_type == BT_TYPE_BUSY); |
1300 | bt_rembusy(vm, bt); |
1301 | bt->bt_type = BT_TYPE_FREE; |
1302 | |
1303 | /* coalesce */ |
1304 | t = TAILQ_NEXT(bt, bt_seglist); |
1305 | if (t != NULL && t->bt_type == BT_TYPE_FREE) { |
1306 | KASSERT(BT_END(bt) < t->bt_start); /* YYY */ |
1307 | bt_remfree(vm, t); |
1308 | bt_remseg(vm, t); |
1309 | bt->bt_size += t->bt_size; |
1310 | LIST_INSERT_HEAD(&tofree, t, bt_freelist); |
1311 | } |
1312 | t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); |
1313 | if (t != NULL && t->bt_type == BT_TYPE_FREE) { |
1314 | KASSERT(BT_END(t) < bt->bt_start); /* YYY */ |
1315 | bt_remfree(vm, t); |
1316 | bt_remseg(vm, t); |
1317 | bt->bt_size += t->bt_size; |
1318 | bt->bt_start = t->bt_start; |
1319 | LIST_INSERT_HEAD(&tofree, t, bt_freelist); |
1320 | } |
1321 | |
1322 | t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); |
1323 | KASSERT(t != NULL); |
1324 | KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); |
1325 | if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && |
1326 | t->bt_size == bt->bt_size) { |
1327 | vmem_addr_t spanaddr; |
1328 | vmem_size_t spansize; |
1329 | |
1330 | KASSERT(t->bt_start == bt->bt_start); |
1331 | spanaddr = bt->bt_start; |
1332 | spansize = bt->bt_size; |
1333 | bt_remseg(vm, bt); |
1334 | LIST_INSERT_HEAD(&tofree, bt, bt_freelist); |
1335 | bt_remseg(vm, t); |
1336 | LIST_INSERT_HEAD(&tofree, t, bt_freelist); |
1337 | vm->vm_size -= spansize; |
1338 | VMEM_CONDVAR_BROADCAST(vm); |
1339 | VMEM_UNLOCK(vm); |
1340 | (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); |
1341 | } else { |
1342 | bt_insfree(vm, bt); |
1343 | VMEM_CONDVAR_BROADCAST(vm); |
1344 | VMEM_UNLOCK(vm); |
1345 | } |
1346 | |
1347 | while (!LIST_EMPTY(&tofree)) { |
1348 | t = LIST_FIRST(&tofree); |
1349 | LIST_REMOVE(t, bt_freelist); |
1350 | bt_free(vm, t); |
1351 | } |
1352 | |
1353 | bt_freetrim(vm, BT_MAXFREE); |
1354 | } |
1355 | |
1356 | /* |
1357 | * vmem_add: |
1358 | * |
1359 | * => caller must ensure appropriate spl, |
1360 | * if the arena can be accessed from interrupt context. |
1361 | */ |
1362 | |
1363 | int |
1364 | vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) |
1365 | { |
1366 | |
1367 | return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); |
1368 | } |
1369 | |
1370 | /* |
1371 | * vmem_size: information about arenas size |
1372 | * |
1373 | * => return free/allocated size in arena |
1374 | */ |
1375 | vmem_size_t |
1376 | vmem_size(vmem_t *vm, int typemask) |
1377 | { |
1378 | |
1379 | switch (typemask) { |
1380 | case VMEM_ALLOC: |
1381 | return vm->vm_inuse; |
1382 | case VMEM_FREE: |
1383 | return vm->vm_size - vm->vm_inuse; |
1384 | case VMEM_FREE|VMEM_ALLOC: |
1385 | return vm->vm_size; |
1386 | default: |
1387 | panic("vmem_size" ); |
1388 | } |
1389 | } |
1390 | |
1391 | /* ---- rehash */ |
1392 | |
1393 | #if defined(_KERNEL) |
1394 | static struct callout vmem_rehash_ch; |
1395 | static int vmem_rehash_interval; |
1396 | static struct workqueue *vmem_rehash_wq; |
1397 | static struct work vmem_rehash_wk; |
1398 | |
1399 | static void |
1400 | vmem_rehash_all(struct work *wk, void *dummy) |
1401 | { |
1402 | vmem_t *vm; |
1403 | |
1404 | KASSERT(wk == &vmem_rehash_wk); |
1405 | mutex_enter(&vmem_list_lock); |
1406 | LIST_FOREACH(vm, &vmem_list, vm_alllist) { |
1407 | size_t desired; |
1408 | size_t current; |
1409 | |
1410 | if (!VMEM_TRYLOCK(vm)) { |
1411 | continue; |
1412 | } |
1413 | desired = vm->vm_nbusytag; |
1414 | current = vm->vm_hashsize; |
1415 | VMEM_UNLOCK(vm); |
1416 | |
1417 | if (desired > VMEM_HASHSIZE_MAX) { |
1418 | desired = VMEM_HASHSIZE_MAX; |
1419 | } else if (desired < VMEM_HASHSIZE_MIN) { |
1420 | desired = VMEM_HASHSIZE_MIN; |
1421 | } |
1422 | if (desired > current * 2 || desired * 2 < current) { |
1423 | vmem_rehash(vm, desired, VM_NOSLEEP); |
1424 | } |
1425 | } |
1426 | mutex_exit(&vmem_list_lock); |
1427 | |
1428 | callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); |
1429 | } |
1430 | |
1431 | static void |
1432 | vmem_rehash_all_kick(void *dummy) |
1433 | { |
1434 | |
1435 | workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); |
1436 | } |
1437 | |
1438 | void |
1439 | vmem_rehash_start(void) |
1440 | { |
1441 | int error; |
1442 | |
1443 | error = workqueue_create(&vmem_rehash_wq, "vmem_rehash" , |
1444 | vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); |
1445 | if (error) { |
1446 | panic("%s: workqueue_create %d\n" , __func__, error); |
1447 | } |
1448 | callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); |
1449 | callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); |
1450 | |
1451 | vmem_rehash_interval = hz * 10; |
1452 | callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); |
1453 | } |
1454 | #endif /* defined(_KERNEL) */ |
1455 | |
1456 | /* ---- debug */ |
1457 | |
1458 | #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) |
1459 | |
1460 | static void bt_dump(const bt_t *, void (*)(const char *, ...) |
1461 | __printflike(1, 2)); |
1462 | |
1463 | static const char * |
1464 | bt_type_string(int type) |
1465 | { |
1466 | static const char * const table[] = { |
1467 | [BT_TYPE_BUSY] = "busy" , |
1468 | [BT_TYPE_FREE] = "free" , |
1469 | [BT_TYPE_SPAN] = "span" , |
1470 | [BT_TYPE_SPAN_STATIC] = "static span" , |
1471 | }; |
1472 | |
1473 | if (type >= __arraycount(table)) { |
1474 | return "BOGUS" ; |
1475 | } |
1476 | return table[type]; |
1477 | } |
1478 | |
1479 | static void |
1480 | bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) |
1481 | { |
1482 | |
1483 | (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n" , |
1484 | bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, |
1485 | bt->bt_type, bt_type_string(bt->bt_type)); |
1486 | } |
1487 | |
1488 | static void |
1489 | vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2)) |
1490 | { |
1491 | const bt_t *bt; |
1492 | int i; |
1493 | |
1494 | (*pr)("vmem %p '%s'\n" , vm, vm->vm_name); |
1495 | TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { |
1496 | bt_dump(bt, pr); |
1497 | } |
1498 | |
1499 | for (i = 0; i < VMEM_MAXORDER; i++) { |
1500 | const struct vmem_freelist *fl = &vm->vm_freelist[i]; |
1501 | |
1502 | if (LIST_EMPTY(fl)) { |
1503 | continue; |
1504 | } |
1505 | |
1506 | (*pr)("freelist[%d]\n" , i); |
1507 | LIST_FOREACH(bt, fl, bt_freelist) { |
1508 | bt_dump(bt, pr); |
1509 | } |
1510 | } |
1511 | } |
1512 | |
1513 | #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ |
1514 | |
1515 | #if defined(DDB) |
1516 | static bt_t * |
1517 | vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) |
1518 | { |
1519 | bt_t *bt; |
1520 | |
1521 | TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { |
1522 | if (BT_ISSPAN_P(bt)) { |
1523 | continue; |
1524 | } |
1525 | if (bt->bt_start <= addr && addr <= BT_END(bt)) { |
1526 | return bt; |
1527 | } |
1528 | } |
1529 | |
1530 | return NULL; |
1531 | } |
1532 | |
1533 | void |
1534 | vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) |
1535 | { |
1536 | vmem_t *vm; |
1537 | |
1538 | LIST_FOREACH(vm, &vmem_list, vm_alllist) { |
1539 | bt_t *bt; |
1540 | |
1541 | bt = vmem_whatis_lookup(vm, addr); |
1542 | if (bt == NULL) { |
1543 | continue; |
1544 | } |
1545 | (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n" , |
1546 | (void *)addr, (void *)bt->bt_start, |
1547 | (size_t)(addr - bt->bt_start), vm->vm_name, |
1548 | (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free" ); |
1549 | } |
1550 | } |
1551 | |
1552 | void |
1553 | vmem_printall(const char *modif, void (*pr)(const char *, ...)) |
1554 | { |
1555 | const vmem_t *vm; |
1556 | |
1557 | LIST_FOREACH(vm, &vmem_list, vm_alllist) { |
1558 | vmem_dump(vm, pr); |
1559 | } |
1560 | } |
1561 | |
1562 | void |
1563 | vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) |
1564 | { |
1565 | const vmem_t *vm = (const void *)addr; |
1566 | |
1567 | vmem_dump(vm, pr); |
1568 | } |
1569 | #endif /* defined(DDB) */ |
1570 | |
1571 | #if defined(_KERNEL) |
1572 | #define vmem_printf printf |
1573 | #else |
1574 | #include <stdio.h> |
1575 | #include <stdarg.h> |
1576 | |
1577 | static void |
1578 | vmem_printf(const char *fmt, ...) |
1579 | { |
1580 | va_list ap; |
1581 | va_start(ap, fmt); |
1582 | vprintf(fmt, ap); |
1583 | va_end(ap); |
1584 | } |
1585 | #endif |
1586 | |
1587 | #if defined(VMEM_SANITY) |
1588 | |
1589 | static bool |
1590 | vmem_check_sanity(vmem_t *vm) |
1591 | { |
1592 | const bt_t *bt, *bt2; |
1593 | |
1594 | KASSERT(vm != NULL); |
1595 | |
1596 | TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { |
1597 | if (bt->bt_start > BT_END(bt)) { |
1598 | printf("corrupted tag\n" ); |
1599 | bt_dump(bt, vmem_printf); |
1600 | return false; |
1601 | } |
1602 | } |
1603 | TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { |
1604 | TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { |
1605 | if (bt == bt2) { |
1606 | continue; |
1607 | } |
1608 | if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { |
1609 | continue; |
1610 | } |
1611 | if (bt->bt_start <= BT_END(bt2) && |
1612 | bt2->bt_start <= BT_END(bt)) { |
1613 | printf("overwrapped tags\n" ); |
1614 | bt_dump(bt, vmem_printf); |
1615 | bt_dump(bt2, vmem_printf); |
1616 | return false; |
1617 | } |
1618 | } |
1619 | } |
1620 | |
1621 | return true; |
1622 | } |
1623 | |
1624 | static void |
1625 | vmem_check(vmem_t *vm) |
1626 | { |
1627 | |
1628 | if (!vmem_check_sanity(vm)) { |
1629 | panic("insanity vmem %p" , vm); |
1630 | } |
1631 | } |
1632 | |
1633 | #endif /* defined(VMEM_SANITY) */ |
1634 | |
1635 | #if defined(UNITTEST) |
1636 | int |
1637 | main(void) |
1638 | { |
1639 | int rc; |
1640 | vmem_t *vm; |
1641 | vmem_addr_t p; |
1642 | struct reg { |
1643 | vmem_addr_t p; |
1644 | vmem_size_t sz; |
1645 | bool x; |
1646 | } *reg = NULL; |
1647 | int nreg = 0; |
1648 | int nalloc = 0; |
1649 | int nfree = 0; |
1650 | vmem_size_t total = 0; |
1651 | #if 1 |
1652 | vm_flag_t strat = VM_INSTANTFIT; |
1653 | #else |
1654 | vm_flag_t strat = VM_BESTFIT; |
1655 | #endif |
1656 | |
1657 | vm = vmem_create("test" , 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, |
1658 | #ifdef _KERNEL |
1659 | IPL_NONE |
1660 | #else |
1661 | 0 |
1662 | #endif |
1663 | ); |
1664 | if (vm == NULL) { |
1665 | printf("vmem_create\n" ); |
1666 | exit(EXIT_FAILURE); |
1667 | } |
1668 | vmem_dump(vm, vmem_printf); |
1669 | |
1670 | rc = vmem_add(vm, 0, 50, VM_SLEEP); |
1671 | assert(rc == 0); |
1672 | rc = vmem_add(vm, 100, 200, VM_SLEEP); |
1673 | assert(rc == 0); |
1674 | rc = vmem_add(vm, 2000, 1, VM_SLEEP); |
1675 | assert(rc == 0); |
1676 | rc = vmem_add(vm, 40000, 65536, VM_SLEEP); |
1677 | assert(rc == 0); |
1678 | rc = vmem_add(vm, 10000, 10000, VM_SLEEP); |
1679 | assert(rc == 0); |
1680 | rc = vmem_add(vm, 500, 1000, VM_SLEEP); |
1681 | assert(rc == 0); |
1682 | rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); |
1683 | assert(rc == 0); |
1684 | rc = vmem_xalloc(vm, 0x101, 0, 0, 0, |
1685 | 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); |
1686 | assert(rc != 0); |
1687 | rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); |
1688 | assert(rc == 0 && p == 0); |
1689 | vmem_xfree(vm, p, 50); |
1690 | rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); |
1691 | assert(rc == 0 && p == 0); |
1692 | rc = vmem_xalloc(vm, 0x100, 0, 0, 0, |
1693 | 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); |
1694 | assert(rc != 0); |
1695 | rc = vmem_xalloc(vm, 0x100, 0, 0, 0, |
1696 | 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); |
1697 | assert(rc != 0); |
1698 | rc = vmem_xalloc(vm, 0x100, 0, 0, 0, |
1699 | 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); |
1700 | assert(rc == 0); |
1701 | vmem_dump(vm, vmem_printf); |
1702 | for (;;) { |
1703 | struct reg *r; |
1704 | int t = rand() % 100; |
1705 | |
1706 | if (t > 45) { |
1707 | /* alloc */ |
1708 | vmem_size_t sz = rand() % 500 + 1; |
1709 | bool x; |
1710 | vmem_size_t align, phase, nocross; |
1711 | vmem_addr_t minaddr, maxaddr; |
1712 | |
1713 | if (t > 70) { |
1714 | x = true; |
1715 | /* XXX */ |
1716 | align = 1 << (rand() % 15); |
1717 | phase = rand() % 65536; |
1718 | nocross = 1 << (rand() % 15); |
1719 | if (align <= phase) { |
1720 | phase = 0; |
1721 | } |
1722 | if (VMEM_CROSS_P(phase, phase + sz - 1, |
1723 | nocross)) { |
1724 | nocross = 0; |
1725 | } |
1726 | do { |
1727 | minaddr = rand() % 50000; |
1728 | maxaddr = rand() % 70000; |
1729 | } while (minaddr > maxaddr); |
1730 | printf("=== xalloc %" PRIu64 |
1731 | " align=%" PRIu64 ", phase=%" PRIu64 |
1732 | ", nocross=%" PRIu64 ", min=%" PRIu64 |
1733 | ", max=%" PRIu64 "\n" , |
1734 | (uint64_t)sz, |
1735 | (uint64_t)align, |
1736 | (uint64_t)phase, |
1737 | (uint64_t)nocross, |
1738 | (uint64_t)minaddr, |
1739 | (uint64_t)maxaddr); |
1740 | rc = vmem_xalloc(vm, sz, align, phase, nocross, |
1741 | minaddr, maxaddr, strat|VM_SLEEP, &p); |
1742 | } else { |
1743 | x = false; |
1744 | printf("=== alloc %" PRIu64 "\n" , (uint64_t)sz); |
1745 | rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); |
1746 | } |
1747 | printf("-> %" PRIu64 "\n" , (uint64_t)p); |
1748 | vmem_dump(vm, vmem_printf); |
1749 | if (rc != 0) { |
1750 | if (x) { |
1751 | continue; |
1752 | } |
1753 | break; |
1754 | } |
1755 | nreg++; |
1756 | reg = realloc(reg, sizeof(*reg) * nreg); |
1757 | r = ®[nreg - 1]; |
1758 | r->p = p; |
1759 | r->sz = sz; |
1760 | r->x = x; |
1761 | total += sz; |
1762 | nalloc++; |
1763 | } else if (nreg != 0) { |
1764 | /* free */ |
1765 | r = ®[rand() % nreg]; |
1766 | printf("=== free %" PRIu64 ", %" PRIu64 "\n" , |
1767 | (uint64_t)r->p, (uint64_t)r->sz); |
1768 | if (r->x) { |
1769 | vmem_xfree(vm, r->p, r->sz); |
1770 | } else { |
1771 | vmem_free(vm, r->p, r->sz); |
1772 | } |
1773 | total -= r->sz; |
1774 | vmem_dump(vm, vmem_printf); |
1775 | *r = reg[nreg - 1]; |
1776 | nreg--; |
1777 | nfree++; |
1778 | } |
1779 | printf("total=%" PRIu64 "\n" , (uint64_t)total); |
1780 | } |
1781 | fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n" , |
1782 | (uint64_t)total, nalloc, nfree); |
1783 | exit(EXIT_SUCCESS); |
1784 | } |
1785 | #endif /* defined(UNITTEST) */ |
1786 | |