1/* $NetBSD: mutex.h,v 1.20 2010/02/08 09:54:27 skrll Exp $ */
2
3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35/*
36 * There are 2 types of mutexes:
37 *
38 * * Adaptive -- If the lock is already held, the thread attempting
39 * to acquire the lock determines if the thread that holds it is
40 * currently running. If so, it spins, else it sleeps.
41 *
42 * * Spin -- If the lock is already held, the thread attempting to
43 * acquire the lock spins. The IPL will be raised on entry.
44 *
45 * Machine dependent code must provide the following:
46 *
47 * struct mutex
48 * The actual mutex structure. This structure is mostly
49 * opaque to machine-independent code; most access are done
50 * through macros. However, machine-independent code must
51 * be able to access the following members:
52 *
53 * uintptr_t mtx_owner
54 * ipl_cookie_t mtx_ipl
55 * __cpu_simple_lock_t mtx_lock
56 *
57 * If an architecture can be considered 'simple' (no interlock required in
58 * the MP case, or no MP) it need only define __HAVE_SIMPLE_MUTEXES and
59 * provide the following:
60 *
61 * struct mutex
62 *
63 * [additionally:]
64 * volatile integer mtx_id
65 *
66 * MUTEX_RECEIVE(mtx)
67 * Post a load fence after acquiring the mutex, if necessary.
68 *
69 * MUTEX_GIVE(mtx)
70 * Post a load/store fence after releasing the mutex, if
71 * necessary.
72 *
73 * MUTEX_CAS(ptr, old, new)
74 * Perform an atomic "compare and swap" operation and
75 * evaluate to true or false according to the success
76 *
77 * Otherwise, the following must be defined:
78 *
79 * MUTEX_INITIALIZE_SPIN(mtx, dodebug, minipl)
80 * Initialize a spin mutex.
81 *
82 * MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)
83 * Initialize an adaptive mutex.
84 *
85 * MUTEX_DESTROY(mtx)
86 * Tear down a mutex.
87 *
88 * MUTEX_ADAPTIVE_P(mtx)
89 * Evaluates to true if the mutex is an adaptive mutex.
90 *
91 * MUTEX_SPIN_P(mtx)
92 * Evaluates to true if the mutex is a spin mutex.
93 *
94 * MUTEX_OWNER(owner)
95 * Returns the owner of the adaptive mutex (LWP address).
96 *
97 * MUTEX_OWNED(owner)
98 * Returns non-zero if an adaptive mutex is currently
99 * held by an LWP.
100 *
101 * MUTEX_HAS_WAITERS(mtx)
102 * Returns true if the mutex has waiters.
103 *
104 * MUTEX_SET_WAITERS(mtx)
105 * Mark the mutex has having waiters.
106 *
107 * MUTEX_ACQUIRE(mtx, owner)
108 * Try to acquire an adaptive mutex such that:
109 * if (lock held OR waiters)
110 * return 0;
111 * else
112 * return 1;
113 * Must be MP/interrupt atomic.
114 *
115 * MUTEX_RELEASE(mtx)
116 * Release the lock and clear the "has waiters" indication.
117 * Must be interrupt atomic, need not be MP safe.
118 *
119 * MUTEX_DEBUG_P(mtx)
120 * Evaluates to true if the mutex is initialized with
121 * dodebug==true. Only used in the LOCKDEBUG case.
122 *
123 * Machine dependent code may optionally provide stubs for the following
124 * functions to implement the easy (unlocked / no waiters) cases. If
125 * these stubs are provided, __HAVE_MUTEX_STUBS should be defined.
126 *
127 * mutex_enter()
128 * mutex_exit()
129 *
130 * Two additional stubs may be implemented that handle only the spinlock
131 * case, primarily for the scheduler. __HAVE_SPIN_MUTEX_STUBS should be
132 * defined if these are provided:
133 *
134 * mutex_spin_enter()
135 * mutex_spin_exit()
136 */
137
138#if defined(_KERNEL_OPT)
139#include "opt_lockdebug.h"
140#endif
141
142#if !defined(_KERNEL)
143#include <sys/types.h>
144#include <sys/inttypes.h>
145#endif
146
147typedef enum kmutex_type_t {
148 MUTEX_SPIN = 0, /* To get a spin mutex at IPL_NONE */
149 MUTEX_ADAPTIVE = 1, /* For porting code written for Solaris */
150 MUTEX_DEFAULT = 2, /* The only native, endorsed type */
151 MUTEX_DRIVER = 3, /* For porting code written for Solaris */
152 MUTEX_NODEBUG = 4 /* Disables LOCKDEBUG; use with care */
153} kmutex_type_t;
154
155typedef struct kmutex kmutex_t;
156
157#if defined(__MUTEX_PRIVATE)
158
159#define MUTEX_THREAD ((uintptr_t)-16L)
160
161#define MUTEX_BIT_SPIN 0x01
162#define MUTEX_BIT_WAITERS 0x02
163
164#if defined(LOCKDEBUG)
165#define MUTEX_BIT_NODEBUG 0x04 /* LOCKDEBUG disabled */
166#else
167#define MUTEX_BIT_NODEBUG 0x00 /* do nothing */
168#endif /* LOCKDEBUG */
169
170#define MUTEX_SPIN_IPL(mtx) ((mtx)->mtx_ipl)
171#define MUTEX_SPIN_OLDSPL(ci) ((ci)->ci_mtx_oldspl)
172
173void mutex_vector_enter(kmutex_t *);
174void mutex_vector_exit(kmutex_t *);
175void mutex_spin_retry(kmutex_t *);
176void mutex_wakeup(kmutex_t *);
177
178#endif /* __MUTEX_PRIVATE */
179
180#ifdef _KERNEL
181#include <sys/intr.h>
182#endif
183
184#include <machine/mutex.h>
185
186/*
187 * Return true if no spin mutexes are held by the current CPU.
188 */
189#ifndef MUTEX_NO_SPIN_ACTIVE_P
190#define MUTEX_NO_SPIN_ACTIVE_P(ci) ((ci)->ci_mtx_count == 0)
191#endif
192
193#ifdef _KERNEL
194
195void mutex_init(kmutex_t *, kmutex_type_t, int);
196void mutex_destroy(kmutex_t *);
197
198void mutex_enter(kmutex_t *);
199void mutex_exit(kmutex_t *);
200
201void mutex_spin_enter(kmutex_t *);
202void mutex_spin_exit(kmutex_t *);
203
204int mutex_tryenter(kmutex_t *);
205
206int mutex_owned(kmutex_t *);
207lwp_t *mutex_owner(kmutex_t *);
208
209void mutex_obj_init(void);
210kmutex_t *mutex_obj_alloc(kmutex_type_t, int);
211void mutex_obj_hold(kmutex_t *);
212bool mutex_obj_free(kmutex_t *);
213
214#endif /* _KERNEL */
215
216#endif /* _SYS_MUTEX_H_ */
217