1 | /* $NetBSD: kern_rwlock_obj.c,v 1.3 2011/05/13 22:16:43 rmind Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.3 2011/05/13 22:16:43 rmind Exp $" ); |
34 | |
35 | #include <sys/param.h> |
36 | #include <sys/atomic.h> |
37 | #include <sys/pool.h> |
38 | #include <sys/rwlock.h> |
39 | |
40 | /* Mutex cache */ |
41 | #define RW_OBJ_MAGIC 0x85d3c85d |
42 | struct krwobj { |
43 | krwlock_t ro_lock; |
44 | u_int ro_magic; |
45 | u_int ro_refcnt; |
46 | }; |
47 | |
48 | static int rw_obj_ctor(void *, void *, int); |
49 | |
50 | static pool_cache_t rw_obj_cache __read_mostly; |
51 | |
52 | /* |
53 | * rw_obj_init: |
54 | * |
55 | * Initialize the rw object store. |
56 | */ |
57 | void |
58 | rw_obj_init(void) |
59 | { |
60 | |
61 | rw_obj_cache = pool_cache_init(sizeof(struct krwobj), |
62 | coherency_unit, 0, 0, "rwlock" , NULL, IPL_NONE, rw_obj_ctor, |
63 | NULL, NULL); |
64 | } |
65 | |
66 | /* |
67 | * rw_obj_ctor: |
68 | * |
69 | * Initialize a new lock for the cache. |
70 | */ |
71 | static int |
72 | rw_obj_ctor(void *arg, void *obj, int flags) |
73 | { |
74 | struct krwobj * ro = obj; |
75 | |
76 | ro->ro_magic = RW_OBJ_MAGIC; |
77 | |
78 | return 0; |
79 | } |
80 | |
81 | /* |
82 | * rw_obj_alloc: |
83 | * |
84 | * Allocate a single lock object. |
85 | */ |
86 | krwlock_t * |
87 | rw_obj_alloc(void) |
88 | { |
89 | struct krwobj *ro; |
90 | |
91 | ro = pool_cache_get(rw_obj_cache, PR_WAITOK); |
92 | rw_init(&ro->ro_lock); |
93 | ro->ro_refcnt = 1; |
94 | |
95 | return (krwlock_t *)ro; |
96 | } |
97 | |
98 | /* |
99 | * rw_obj_hold: |
100 | * |
101 | * Add a single reference to a lock object. A reference to the object |
102 | * must already be held, and must be held across this call. |
103 | */ |
104 | void |
105 | rw_obj_hold(krwlock_t *lock) |
106 | { |
107 | struct krwobj *ro = (struct krwobj *)lock; |
108 | |
109 | KASSERT(ro->ro_magic == RW_OBJ_MAGIC); |
110 | KASSERT(ro->ro_refcnt > 0); |
111 | |
112 | atomic_inc_uint(&ro->ro_refcnt); |
113 | } |
114 | |
115 | /* |
116 | * rw_obj_free: |
117 | * |
118 | * Drop a reference from a lock object. If the last reference is being |
119 | * dropped, free the object and return true. Otherwise, return false. |
120 | */ |
121 | bool |
122 | rw_obj_free(krwlock_t *lock) |
123 | { |
124 | struct krwobj *ro = (struct krwobj *)lock; |
125 | |
126 | KASSERT(ro->ro_magic == RW_OBJ_MAGIC); |
127 | KASSERT(ro->ro_refcnt > 0); |
128 | |
129 | if (atomic_dec_uint_nv(&ro->ro_refcnt) > 0) { |
130 | return false; |
131 | } |
132 | rw_destroy(&ro->ro_lock); |
133 | pool_cache_put(rw_obj_cache, ro); |
134 | return true; |
135 | } |
136 | |