1 | /* $NetBSD: uipc_socket.c,v 1.252 2016/10/13 19:10:23 uwe Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 2002, 2007, 2008, 2009 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of Wasabi Systems, Inc, and by Andrew Doran. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /* |
33 | * Copyright (c) 2004 The FreeBSD Foundation |
34 | * Copyright (c) 2004 Robert Watson |
35 | * Copyright (c) 1982, 1986, 1988, 1990, 1993 |
36 | * The Regents of the University of California. All rights reserved. |
37 | * |
38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions |
40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. |
46 | * 3. Neither the name of the University nor the names of its contributors |
47 | * may be used to endorse or promote products derived from this software |
48 | * without specific prior written permission. |
49 | * |
50 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
51 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
52 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
53 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
54 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
55 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
56 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
57 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
58 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
59 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
60 | * SUCH DAMAGE. |
61 | * |
62 | * @(#)uipc_socket.c 8.6 (Berkeley) 5/2/95 |
63 | */ |
64 | |
65 | /* |
66 | * Socket operation routines. |
67 | * |
68 | * These routines are called by the routines in sys_socket.c or from a |
69 | * system process, and implement the semantics of socket operations by |
70 | * switching out to the protocol specific routines. |
71 | */ |
72 | |
73 | #include <sys/cdefs.h> |
74 | __KERNEL_RCSID(0, "$NetBSD: uipc_socket.c,v 1.252 2016/10/13 19:10:23 uwe Exp $" ); |
75 | |
76 | #ifdef _KERNEL_OPT |
77 | #include "opt_compat_netbsd.h" |
78 | #include "opt_sock_counters.h" |
79 | #include "opt_sosend_loan.h" |
80 | #include "opt_mbuftrace.h" |
81 | #include "opt_somaxkva.h" |
82 | #include "opt_multiprocessor.h" /* XXX */ |
83 | #include "opt_sctp.h" |
84 | #endif |
85 | |
86 | #include <sys/param.h> |
87 | #include <sys/systm.h> |
88 | #include <sys/proc.h> |
89 | #include <sys/file.h> |
90 | #include <sys/filedesc.h> |
91 | #include <sys/kmem.h> |
92 | #include <sys/mbuf.h> |
93 | #include <sys/domain.h> |
94 | #include <sys/kernel.h> |
95 | #include <sys/protosw.h> |
96 | #include <sys/socket.h> |
97 | #include <sys/socketvar.h> |
98 | #include <sys/signalvar.h> |
99 | #include <sys/resourcevar.h> |
100 | #include <sys/uidinfo.h> |
101 | #include <sys/event.h> |
102 | #include <sys/poll.h> |
103 | #include <sys/kauth.h> |
104 | #include <sys/mutex.h> |
105 | #include <sys/condvar.h> |
106 | #include <sys/kthread.h> |
107 | |
108 | #ifdef COMPAT_50 |
109 | #include <compat/sys/time.h> |
110 | #include <compat/sys/socket.h> |
111 | #endif |
112 | |
113 | #include <uvm/uvm_extern.h> |
114 | #include <uvm/uvm_loan.h> |
115 | #include <uvm/uvm_page.h> |
116 | |
117 | MALLOC_DEFINE(M_SONAME, "soname" , "socket name" ); |
118 | |
119 | extern const struct fileops socketops; |
120 | |
121 | extern int somaxconn; /* patchable (XXX sysctl) */ |
122 | int somaxconn = SOMAXCONN; |
123 | kmutex_t *softnet_lock; |
124 | |
125 | #ifdef SOSEND_COUNTERS |
126 | #include <sys/device.h> |
127 | |
128 | static struct evcnt sosend_loan_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
129 | NULL, "sosend" , "loan big" ); |
130 | static struct evcnt sosend_copy_big = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
131 | NULL, "sosend" , "copy big" ); |
132 | static struct evcnt sosend_copy_small = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
133 | NULL, "sosend" , "copy small" ); |
134 | static struct evcnt sosend_kvalimit = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, |
135 | NULL, "sosend" , "kva limit" ); |
136 | |
137 | #define SOSEND_COUNTER_INCR(ev) (ev)->ev_count++ |
138 | |
139 | EVCNT_ATTACH_STATIC(sosend_loan_big); |
140 | EVCNT_ATTACH_STATIC(sosend_copy_big); |
141 | EVCNT_ATTACH_STATIC(sosend_copy_small); |
142 | EVCNT_ATTACH_STATIC(sosend_kvalimit); |
143 | #else |
144 | |
145 | #define SOSEND_COUNTER_INCR(ev) /* nothing */ |
146 | |
147 | #endif /* SOSEND_COUNTERS */ |
148 | |
149 | #if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR) |
150 | int sock_loan_thresh = -1; |
151 | #else |
152 | int sock_loan_thresh = 4096; |
153 | #endif |
154 | |
155 | static kmutex_t so_pendfree_lock; |
156 | static struct mbuf *so_pendfree = NULL; |
157 | |
158 | #ifndef SOMAXKVA |
159 | #define SOMAXKVA (16 * 1024 * 1024) |
160 | #endif |
161 | int somaxkva = SOMAXKVA; |
162 | static int socurkva; |
163 | static kcondvar_t socurkva_cv; |
164 | |
165 | static kauth_listener_t socket_listener; |
166 | |
167 | #define SOCK_LOAN_CHUNK 65536 |
168 | |
169 | static void sopendfree_thread(void *); |
170 | static kcondvar_t pendfree_thread_cv; |
171 | static lwp_t *sopendfree_lwp; |
172 | |
173 | static void sysctl_kern_socket_setup(void); |
174 | static struct sysctllog *socket_sysctllog; |
175 | |
176 | static vsize_t |
177 | sokvareserve(struct socket *so, vsize_t len) |
178 | { |
179 | int error; |
180 | |
181 | mutex_enter(&so_pendfree_lock); |
182 | while (socurkva + len > somaxkva) { |
183 | SOSEND_COUNTER_INCR(&sosend_kvalimit); |
184 | error = cv_wait_sig(&socurkva_cv, &so_pendfree_lock); |
185 | if (error) { |
186 | len = 0; |
187 | break; |
188 | } |
189 | } |
190 | socurkva += len; |
191 | mutex_exit(&so_pendfree_lock); |
192 | return len; |
193 | } |
194 | |
195 | static void |
196 | sokvaunreserve(vsize_t len) |
197 | { |
198 | |
199 | mutex_enter(&so_pendfree_lock); |
200 | socurkva -= len; |
201 | cv_broadcast(&socurkva_cv); |
202 | mutex_exit(&so_pendfree_lock); |
203 | } |
204 | |
205 | /* |
206 | * sokvaalloc: allocate kva for loan. |
207 | */ |
208 | |
209 | vaddr_t |
210 | sokvaalloc(vaddr_t sva, vsize_t len, struct socket *so) |
211 | { |
212 | vaddr_t lva; |
213 | |
214 | /* |
215 | * reserve kva. |
216 | */ |
217 | |
218 | if (sokvareserve(so, len) == 0) |
219 | return 0; |
220 | |
221 | /* |
222 | * allocate kva. |
223 | */ |
224 | |
225 | lva = uvm_km_alloc(kernel_map, len, atop(sva) & uvmexp.colormask, |
226 | UVM_KMF_COLORMATCH | UVM_KMF_VAONLY | UVM_KMF_WAITVA); |
227 | if (lva == 0) { |
228 | sokvaunreserve(len); |
229 | return (0); |
230 | } |
231 | |
232 | return lva; |
233 | } |
234 | |
235 | /* |
236 | * sokvafree: free kva for loan. |
237 | */ |
238 | |
239 | void |
240 | sokvafree(vaddr_t sva, vsize_t len) |
241 | { |
242 | |
243 | /* |
244 | * free kva. |
245 | */ |
246 | |
247 | uvm_km_free(kernel_map, sva, len, UVM_KMF_VAONLY); |
248 | |
249 | /* |
250 | * unreserve kva. |
251 | */ |
252 | |
253 | sokvaunreserve(len); |
254 | } |
255 | |
256 | static void |
257 | sodoloanfree(struct vm_page **pgs, void *buf, size_t size) |
258 | { |
259 | vaddr_t sva, eva; |
260 | vsize_t len; |
261 | int npgs; |
262 | |
263 | KASSERT(pgs != NULL); |
264 | |
265 | eva = round_page((vaddr_t) buf + size); |
266 | sva = trunc_page((vaddr_t) buf); |
267 | len = eva - sva; |
268 | npgs = len >> PAGE_SHIFT; |
269 | |
270 | pmap_kremove(sva, len); |
271 | pmap_update(pmap_kernel()); |
272 | uvm_unloan(pgs, npgs, UVM_LOAN_TOPAGE); |
273 | sokvafree(sva, len); |
274 | } |
275 | |
276 | /* |
277 | * sopendfree_thread: free mbufs on "pendfree" list. |
278 | * unlock and relock so_pendfree_lock when freeing mbufs. |
279 | */ |
280 | |
281 | static void |
282 | sopendfree_thread(void *v) |
283 | { |
284 | struct mbuf *m, *next; |
285 | size_t rv; |
286 | |
287 | mutex_enter(&so_pendfree_lock); |
288 | |
289 | for (;;) { |
290 | rv = 0; |
291 | while (so_pendfree != NULL) { |
292 | m = so_pendfree; |
293 | so_pendfree = NULL; |
294 | mutex_exit(&so_pendfree_lock); |
295 | |
296 | for (; m != NULL; m = next) { |
297 | next = m->m_next; |
298 | KASSERT((~m->m_flags & (M_EXT|M_EXT_PAGES)) == 0); |
299 | KASSERT(m->m_ext.ext_refcnt == 0); |
300 | |
301 | rv += m->m_ext.ext_size; |
302 | sodoloanfree(m->m_ext.ext_pgs, m->m_ext.ext_buf, |
303 | m->m_ext.ext_size); |
304 | pool_cache_put(mb_cache, m); |
305 | } |
306 | |
307 | mutex_enter(&so_pendfree_lock); |
308 | } |
309 | if (rv) |
310 | cv_broadcast(&socurkva_cv); |
311 | cv_wait(&pendfree_thread_cv, &so_pendfree_lock); |
312 | } |
313 | panic("sopendfree_thread" ); |
314 | /* NOTREACHED */ |
315 | } |
316 | |
317 | void |
318 | soloanfree(struct mbuf *m, void *buf, size_t size, void *arg) |
319 | { |
320 | |
321 | KASSERT(m != NULL); |
322 | |
323 | /* |
324 | * postpone freeing mbuf. |
325 | * |
326 | * we can't do it in interrupt context |
327 | * because we need to put kva back to kernel_map. |
328 | */ |
329 | |
330 | mutex_enter(&so_pendfree_lock); |
331 | m->m_next = so_pendfree; |
332 | so_pendfree = m; |
333 | cv_signal(&pendfree_thread_cv); |
334 | mutex_exit(&so_pendfree_lock); |
335 | } |
336 | |
337 | static long |
338 | sosend_loan(struct socket *so, struct uio *uio, struct mbuf *m, long space) |
339 | { |
340 | struct iovec *iov = uio->uio_iov; |
341 | vaddr_t sva, eva; |
342 | vsize_t len; |
343 | vaddr_t lva; |
344 | int npgs, error; |
345 | vaddr_t va; |
346 | int i; |
347 | |
348 | if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) |
349 | return (0); |
350 | |
351 | if (iov->iov_len < (size_t) space) |
352 | space = iov->iov_len; |
353 | if (space > SOCK_LOAN_CHUNK) |
354 | space = SOCK_LOAN_CHUNK; |
355 | |
356 | eva = round_page((vaddr_t) iov->iov_base + space); |
357 | sva = trunc_page((vaddr_t) iov->iov_base); |
358 | len = eva - sva; |
359 | npgs = len >> PAGE_SHIFT; |
360 | |
361 | KASSERT(npgs <= M_EXT_MAXPAGES); |
362 | |
363 | lva = sokvaalloc(sva, len, so); |
364 | if (lva == 0) |
365 | return 0; |
366 | |
367 | error = uvm_loan(&uio->uio_vmspace->vm_map, sva, len, |
368 | m->m_ext.ext_pgs, UVM_LOAN_TOPAGE); |
369 | if (error) { |
370 | sokvafree(lva, len); |
371 | return (0); |
372 | } |
373 | |
374 | for (i = 0, va = lva; i < npgs; i++, va += PAGE_SIZE) |
375 | pmap_kenter_pa(va, VM_PAGE_TO_PHYS(m->m_ext.ext_pgs[i]), |
376 | VM_PROT_READ, 0); |
377 | pmap_update(pmap_kernel()); |
378 | |
379 | lva += (vaddr_t) iov->iov_base & PAGE_MASK; |
380 | |
381 | MEXTADD(m, (void *) lva, space, M_MBUF, soloanfree, so); |
382 | m->m_flags |= M_EXT_PAGES | M_EXT_ROMAP; |
383 | |
384 | uio->uio_resid -= space; |
385 | /* uio_offset not updated, not set/used for write(2) */ |
386 | uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + space; |
387 | uio->uio_iov->iov_len -= space; |
388 | if (uio->uio_iov->iov_len == 0) { |
389 | uio->uio_iov++; |
390 | uio->uio_iovcnt--; |
391 | } |
392 | |
393 | return (space); |
394 | } |
395 | |
396 | struct mbuf * |
397 | getsombuf(struct socket *so, int type) |
398 | { |
399 | struct mbuf *m; |
400 | |
401 | m = m_get(M_WAIT, type); |
402 | MCLAIM(m, so->so_mowner); |
403 | return m; |
404 | } |
405 | |
406 | static int |
407 | socket_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, |
408 | void *arg0, void *arg1, void *arg2, void *arg3) |
409 | { |
410 | int result; |
411 | enum kauth_network_req req; |
412 | |
413 | result = KAUTH_RESULT_DEFER; |
414 | req = (enum kauth_network_req)arg0; |
415 | |
416 | if ((action != KAUTH_NETWORK_SOCKET) && |
417 | (action != KAUTH_NETWORK_BIND)) |
418 | return result; |
419 | |
420 | switch (req) { |
421 | case KAUTH_REQ_NETWORK_BIND_PORT: |
422 | result = KAUTH_RESULT_ALLOW; |
423 | break; |
424 | |
425 | case KAUTH_REQ_NETWORK_SOCKET_DROP: { |
426 | /* Normal users can only drop their own connections. */ |
427 | struct socket *so = (struct socket *)arg1; |
428 | |
429 | if (so->so_cred && proc_uidmatch(cred, so->so_cred) == 0) |
430 | result = KAUTH_RESULT_ALLOW; |
431 | |
432 | break; |
433 | } |
434 | |
435 | case KAUTH_REQ_NETWORK_SOCKET_OPEN: |
436 | /* We allow "raw" routing/bluetooth sockets to anyone. */ |
437 | if ((u_long)arg1 == PF_ROUTE || (u_long)arg1 == PF_OROUTE |
438 | || (u_long)arg1 == PF_BLUETOOTH) { |
439 | result = KAUTH_RESULT_ALLOW; |
440 | } else { |
441 | /* Privileged, let secmodel handle this. */ |
442 | if ((u_long)arg2 == SOCK_RAW) |
443 | break; |
444 | } |
445 | |
446 | result = KAUTH_RESULT_ALLOW; |
447 | |
448 | break; |
449 | |
450 | case KAUTH_REQ_NETWORK_SOCKET_CANSEE: |
451 | result = KAUTH_RESULT_ALLOW; |
452 | |
453 | break; |
454 | |
455 | default: |
456 | break; |
457 | } |
458 | |
459 | return result; |
460 | } |
461 | |
462 | void |
463 | soinit(void) |
464 | { |
465 | |
466 | sysctl_kern_socket_setup(); |
467 | |
468 | mutex_init(&so_pendfree_lock, MUTEX_DEFAULT, IPL_VM); |
469 | softnet_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); |
470 | cv_init(&socurkva_cv, "sokva" ); |
471 | cv_init(&pendfree_thread_cv, "sopendfr" ); |
472 | soinit2(); |
473 | |
474 | /* Set the initial adjusted socket buffer size. */ |
475 | if (sb_max_set(sb_max)) |
476 | panic("bad initial sb_max value: %lu" , sb_max); |
477 | |
478 | socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK, |
479 | socket_listener_cb, NULL); |
480 | } |
481 | |
482 | void |
483 | soinit1(void) |
484 | { |
485 | int error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, |
486 | sopendfree_thread, NULL, &sopendfree_lwp, "sopendfree" ); |
487 | if (error) |
488 | panic("soinit1 %d" , error); |
489 | } |
490 | |
491 | /* |
492 | * socreate: create a new socket of the specified type and the protocol. |
493 | * |
494 | * => Caller may specify another socket for lock sharing (must not be held). |
495 | * => Returns the new socket without lock held. |
496 | */ |
497 | int |
498 | socreate(int dom, struct socket **aso, int type, int proto, struct lwp *l, |
499 | struct socket *lockso) |
500 | { |
501 | const struct protosw *prp; |
502 | struct socket *so; |
503 | uid_t uid; |
504 | int error; |
505 | kmutex_t *lock; |
506 | |
507 | error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET, |
508 | KAUTH_REQ_NETWORK_SOCKET_OPEN, KAUTH_ARG(dom), KAUTH_ARG(type), |
509 | KAUTH_ARG(proto)); |
510 | if (error != 0) |
511 | return error; |
512 | |
513 | if (proto) |
514 | prp = pffindproto(dom, proto, type); |
515 | else |
516 | prp = pffindtype(dom, type); |
517 | if (prp == NULL) { |
518 | /* no support for domain */ |
519 | if (pffinddomain(dom) == 0) |
520 | return EAFNOSUPPORT; |
521 | /* no support for socket type */ |
522 | if (proto == 0 && type != 0) |
523 | return EPROTOTYPE; |
524 | return EPROTONOSUPPORT; |
525 | } |
526 | if (prp->pr_usrreqs == NULL) |
527 | return EPROTONOSUPPORT; |
528 | if (prp->pr_type != type) |
529 | return EPROTOTYPE; |
530 | |
531 | so = soget(true); |
532 | so->so_type = type; |
533 | so->so_proto = prp; |
534 | so->so_send = sosend; |
535 | so->so_receive = soreceive; |
536 | #ifdef MBUFTRACE |
537 | so->so_rcv.sb_mowner = &prp->pr_domain->dom_mowner; |
538 | so->so_snd.sb_mowner = &prp->pr_domain->dom_mowner; |
539 | so->so_mowner = &prp->pr_domain->dom_mowner; |
540 | #endif |
541 | uid = kauth_cred_geteuid(l->l_cred); |
542 | so->so_uidinfo = uid_find(uid); |
543 | so->so_cpid = l->l_proc->p_pid; |
544 | |
545 | /* |
546 | * Lock assigned and taken during PCB attach, unless we share |
547 | * the lock with another socket, e.g. socketpair(2) case. |
548 | */ |
549 | if (lockso) { |
550 | lock = lockso->so_lock; |
551 | so->so_lock = lock; |
552 | mutex_obj_hold(lock); |
553 | mutex_enter(lock); |
554 | } |
555 | |
556 | /* Attach the PCB (returns with the socket lock held). */ |
557 | error = (*prp->pr_usrreqs->pr_attach)(so, proto); |
558 | KASSERT(solocked(so)); |
559 | |
560 | if (error) { |
561 | KASSERT(so->so_pcb == NULL); |
562 | so->so_state |= SS_NOFDREF; |
563 | sofree(so); |
564 | return error; |
565 | } |
566 | so->so_cred = kauth_cred_dup(l->l_cred); |
567 | sounlock(so); |
568 | |
569 | *aso = so; |
570 | return 0; |
571 | } |
572 | |
573 | /* |
574 | * fsocreate: create a socket and a file descriptor associated with it. |
575 | * |
576 | * => On success, write file descriptor to fdout and return zero. |
577 | * => On failure, return non-zero; *fdout will be undefined. |
578 | */ |
579 | int |
580 | fsocreate(int domain, struct socket **sop, int type, int proto, int *fdout) |
581 | { |
582 | lwp_t *l = curlwp; |
583 | int error, fd, flags; |
584 | struct socket *so; |
585 | struct file *fp; |
586 | |
587 | if ((error = fd_allocfile(&fp, &fd)) != 0) { |
588 | return error; |
589 | } |
590 | flags = type & SOCK_FLAGS_MASK; |
591 | fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0); |
592 | fp->f_flag = FREAD|FWRITE|((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)| |
593 | ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0); |
594 | fp->f_type = DTYPE_SOCKET; |
595 | fp->f_ops = &socketops; |
596 | |
597 | type &= ~SOCK_FLAGS_MASK; |
598 | error = socreate(domain, &so, type, proto, l, NULL); |
599 | if (error) { |
600 | fd_abort(curproc, fp, fd); |
601 | return error; |
602 | } |
603 | if (flags & SOCK_NONBLOCK) { |
604 | so->so_state |= SS_NBIO; |
605 | } |
606 | fp->f_socket = so; |
607 | fd_affix(curproc, fp, fd); |
608 | |
609 | if (sop != NULL) { |
610 | *sop = so; |
611 | } |
612 | *fdout = fd; |
613 | return error; |
614 | } |
615 | |
616 | int |
617 | sofamily(const struct socket *so) |
618 | { |
619 | const struct protosw *pr; |
620 | const struct domain *dom; |
621 | |
622 | if ((pr = so->so_proto) == NULL) |
623 | return AF_UNSPEC; |
624 | if ((dom = pr->pr_domain) == NULL) |
625 | return AF_UNSPEC; |
626 | return dom->dom_family; |
627 | } |
628 | |
629 | int |
630 | sobind(struct socket *so, struct sockaddr *nam, struct lwp *l) |
631 | { |
632 | int error; |
633 | |
634 | solock(so); |
635 | if (nam->sa_family != so->so_proto->pr_domain->dom_family) { |
636 | sounlock(so); |
637 | return EAFNOSUPPORT; |
638 | } |
639 | error = (*so->so_proto->pr_usrreqs->pr_bind)(so, nam, l); |
640 | sounlock(so); |
641 | return error; |
642 | } |
643 | |
644 | int |
645 | solisten(struct socket *so, int backlog, struct lwp *l) |
646 | { |
647 | int error; |
648 | short oldopt, oldqlimit; |
649 | |
650 | solock(so); |
651 | if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | |
652 | SS_ISDISCONNECTING)) != 0) { |
653 | sounlock(so); |
654 | return EINVAL; |
655 | } |
656 | oldopt = so->so_options; |
657 | oldqlimit = so->so_qlimit; |
658 | if (TAILQ_EMPTY(&so->so_q)) |
659 | so->so_options |= SO_ACCEPTCONN; |
660 | if (backlog < 0) |
661 | backlog = 0; |
662 | so->so_qlimit = min(backlog, somaxconn); |
663 | |
664 | error = (*so->so_proto->pr_usrreqs->pr_listen)(so, l); |
665 | if (error != 0) { |
666 | so->so_options = oldopt; |
667 | so->so_qlimit = oldqlimit; |
668 | sounlock(so); |
669 | return error; |
670 | } |
671 | sounlock(so); |
672 | return 0; |
673 | } |
674 | |
675 | void |
676 | sofree(struct socket *so) |
677 | { |
678 | u_int refs; |
679 | |
680 | KASSERT(solocked(so)); |
681 | |
682 | if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { |
683 | sounlock(so); |
684 | return; |
685 | } |
686 | if (so->so_head) { |
687 | /* |
688 | * We must not decommission a socket that's on the accept(2) |
689 | * queue. If we do, then accept(2) may hang after select(2) |
690 | * indicated that the listening socket was ready. |
691 | */ |
692 | if (!soqremque(so, 0)) { |
693 | sounlock(so); |
694 | return; |
695 | } |
696 | } |
697 | if (so->so_rcv.sb_hiwat) |
698 | (void)chgsbsize(so->so_uidinfo, &so->so_rcv.sb_hiwat, 0, |
699 | RLIM_INFINITY); |
700 | if (so->so_snd.sb_hiwat) |
701 | (void)chgsbsize(so->so_uidinfo, &so->so_snd.sb_hiwat, 0, |
702 | RLIM_INFINITY); |
703 | sbrelease(&so->so_snd, so); |
704 | KASSERT(!cv_has_waiters(&so->so_cv)); |
705 | KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv)); |
706 | KASSERT(!cv_has_waiters(&so->so_snd.sb_cv)); |
707 | sorflush(so); |
708 | refs = so->so_aborting; /* XXX */ |
709 | /* Remove acccept filter if one is present. */ |
710 | if (so->so_accf != NULL) |
711 | (void)accept_filt_clear(so); |
712 | sounlock(so); |
713 | if (refs == 0) /* XXX */ |
714 | soput(so); |
715 | } |
716 | |
717 | /* |
718 | * soclose: close a socket on last file table reference removal. |
719 | * Initiate disconnect if connected. Free socket when disconnect complete. |
720 | */ |
721 | int |
722 | soclose(struct socket *so) |
723 | { |
724 | struct socket *so2; |
725 | int error = 0; |
726 | |
727 | solock(so); |
728 | if (so->so_options & SO_ACCEPTCONN) { |
729 | for (;;) { |
730 | if ((so2 = TAILQ_FIRST(&so->so_q0)) != 0) { |
731 | KASSERT(solocked2(so, so2)); |
732 | (void) soqremque(so2, 0); |
733 | /* soabort drops the lock. */ |
734 | (void) soabort(so2); |
735 | solock(so); |
736 | continue; |
737 | } |
738 | if ((so2 = TAILQ_FIRST(&so->so_q)) != 0) { |
739 | KASSERT(solocked2(so, so2)); |
740 | (void) soqremque(so2, 1); |
741 | /* soabort drops the lock. */ |
742 | (void) soabort(so2); |
743 | solock(so); |
744 | continue; |
745 | } |
746 | break; |
747 | } |
748 | } |
749 | if (so->so_pcb == NULL) |
750 | goto discard; |
751 | if (so->so_state & SS_ISCONNECTED) { |
752 | if ((so->so_state & SS_ISDISCONNECTING) == 0) { |
753 | error = sodisconnect(so); |
754 | if (error) |
755 | goto drop; |
756 | } |
757 | if (so->so_options & SO_LINGER) { |
758 | if ((so->so_state & (SS_ISDISCONNECTING|SS_NBIO)) == |
759 | (SS_ISDISCONNECTING|SS_NBIO)) |
760 | goto drop; |
761 | while (so->so_state & SS_ISCONNECTED) { |
762 | error = sowait(so, true, so->so_linger * hz); |
763 | if (error) |
764 | break; |
765 | } |
766 | } |
767 | } |
768 | drop: |
769 | if (so->so_pcb) { |
770 | KASSERT(solocked(so)); |
771 | (*so->so_proto->pr_usrreqs->pr_detach)(so); |
772 | } |
773 | discard: |
774 | KASSERT((so->so_state & SS_NOFDREF) == 0); |
775 | kauth_cred_free(so->so_cred); |
776 | so->so_state |= SS_NOFDREF; |
777 | sofree(so); |
778 | return error; |
779 | } |
780 | |
781 | /* |
782 | * Must be called with the socket locked.. Will return with it unlocked. |
783 | */ |
784 | int |
785 | soabort(struct socket *so) |
786 | { |
787 | u_int refs; |
788 | int error; |
789 | |
790 | KASSERT(solocked(so)); |
791 | KASSERT(so->so_head == NULL); |
792 | |
793 | so->so_aborting++; /* XXX */ |
794 | error = (*so->so_proto->pr_usrreqs->pr_abort)(so); |
795 | refs = --so->so_aborting; /* XXX */ |
796 | if (error || (refs == 0)) { |
797 | sofree(so); |
798 | } else { |
799 | sounlock(so); |
800 | } |
801 | return error; |
802 | } |
803 | |
804 | int |
805 | soaccept(struct socket *so, struct sockaddr *nam) |
806 | { |
807 | int error; |
808 | |
809 | KASSERT(solocked(so)); |
810 | KASSERT((so->so_state & SS_NOFDREF) != 0); |
811 | |
812 | so->so_state &= ~SS_NOFDREF; |
813 | if ((so->so_state & SS_ISDISCONNECTED) == 0 || |
814 | (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) |
815 | error = (*so->so_proto->pr_usrreqs->pr_accept)(so, nam); |
816 | else |
817 | error = ECONNABORTED; |
818 | |
819 | return error; |
820 | } |
821 | |
822 | int |
823 | soconnect(struct socket *so, struct sockaddr *nam, struct lwp *l) |
824 | { |
825 | int error; |
826 | |
827 | KASSERT(solocked(so)); |
828 | |
829 | if (so->so_options & SO_ACCEPTCONN) |
830 | return EOPNOTSUPP; |
831 | /* |
832 | * If protocol is connection-based, can only connect once. |
833 | * Otherwise, if connected, try to disconnect first. |
834 | * This allows user to disconnect by connecting to, e.g., |
835 | * a null address. |
836 | */ |
837 | if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && |
838 | ((so->so_proto->pr_flags & PR_CONNREQUIRED) || |
839 | (error = sodisconnect(so)))) { |
840 | error = EISCONN; |
841 | } else { |
842 | if (nam->sa_family != so->so_proto->pr_domain->dom_family) { |
843 | return EAFNOSUPPORT; |
844 | } |
845 | error = (*so->so_proto->pr_usrreqs->pr_connect)(so, nam, l); |
846 | } |
847 | |
848 | return error; |
849 | } |
850 | |
851 | int |
852 | soconnect2(struct socket *so1, struct socket *so2) |
853 | { |
854 | KASSERT(solocked2(so1, so2)); |
855 | |
856 | return (*so1->so_proto->pr_usrreqs->pr_connect2)(so1, so2); |
857 | } |
858 | |
859 | int |
860 | sodisconnect(struct socket *so) |
861 | { |
862 | int error; |
863 | |
864 | KASSERT(solocked(so)); |
865 | |
866 | if ((so->so_state & SS_ISCONNECTED) == 0) { |
867 | error = ENOTCONN; |
868 | } else if (so->so_state & SS_ISDISCONNECTING) { |
869 | error = EALREADY; |
870 | } else { |
871 | error = (*so->so_proto->pr_usrreqs->pr_disconnect)(so); |
872 | } |
873 | return (error); |
874 | } |
875 | |
876 | #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) |
877 | /* |
878 | * Send on a socket. |
879 | * If send must go all at once and message is larger than |
880 | * send buffering, then hard error. |
881 | * Lock against other senders. |
882 | * If must go all at once and not enough room now, then |
883 | * inform user that this would block and do nothing. |
884 | * Otherwise, if nonblocking, send as much as possible. |
885 | * The data to be sent is described by "uio" if nonzero, |
886 | * otherwise by the mbuf chain "top" (which must be null |
887 | * if uio is not). Data provided in mbuf chain must be small |
888 | * enough to send all at once. |
889 | * |
890 | * Returns nonzero on error, timeout or signal; callers |
891 | * must check for short counts if EINTR/ERESTART are returned. |
892 | * Data and control buffers are freed on return. |
893 | */ |
894 | int |
895 | sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, |
896 | struct mbuf *top, struct mbuf *control, int flags, struct lwp *l) |
897 | { |
898 | struct mbuf **mp, *m; |
899 | long space, len, resid, clen, mlen; |
900 | int error, s, dontroute, atomic; |
901 | short wakeup_state = 0; |
902 | |
903 | clen = 0; |
904 | |
905 | /* |
906 | * solock() provides atomicity of access. splsoftnet() prevents |
907 | * protocol processing soft interrupts from interrupting us and |
908 | * blocking (expensive). |
909 | */ |
910 | s = splsoftnet(); |
911 | solock(so); |
912 | atomic = sosendallatonce(so) || top; |
913 | if (uio) |
914 | resid = uio->uio_resid; |
915 | else |
916 | resid = top->m_pkthdr.len; |
917 | /* |
918 | * In theory resid should be unsigned. |
919 | * However, space must be signed, as it might be less than 0 |
920 | * if we over-committed, and we must use a signed comparison |
921 | * of space and resid. On the other hand, a negative resid |
922 | * causes us to loop sending 0-length segments to the protocol. |
923 | */ |
924 | if (resid < 0) { |
925 | error = EINVAL; |
926 | goto out; |
927 | } |
928 | dontroute = |
929 | (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && |
930 | (so->so_proto->pr_flags & PR_ATOMIC); |
931 | l->l_ru.ru_msgsnd++; |
932 | if (control) |
933 | clen = control->m_len; |
934 | restart: |
935 | if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) |
936 | goto out; |
937 | do { |
938 | if (so->so_state & SS_CANTSENDMORE) { |
939 | error = EPIPE; |
940 | goto release; |
941 | } |
942 | if (so->so_error) { |
943 | error = so->so_error; |
944 | so->so_error = 0; |
945 | goto release; |
946 | } |
947 | if ((so->so_state & SS_ISCONNECTED) == 0) { |
948 | if (so->so_proto->pr_flags & PR_CONNREQUIRED) { |
949 | if (resid || clen == 0) { |
950 | error = ENOTCONN; |
951 | goto release; |
952 | } |
953 | } else if (addr == NULL) { |
954 | error = EDESTADDRREQ; |
955 | goto release; |
956 | } |
957 | } |
958 | space = sbspace(&so->so_snd); |
959 | if (flags & MSG_OOB) |
960 | space += 1024; |
961 | if ((atomic && resid > so->so_snd.sb_hiwat) || |
962 | clen > so->so_snd.sb_hiwat) { |
963 | error = EMSGSIZE; |
964 | goto release; |
965 | } |
966 | if (space < resid + clen && |
967 | (atomic || space < so->so_snd.sb_lowat || space < clen)) { |
968 | if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO)) { |
969 | error = EWOULDBLOCK; |
970 | goto release; |
971 | } |
972 | sbunlock(&so->so_snd); |
973 | if (wakeup_state & SS_RESTARTSYS) { |
974 | error = ERESTART; |
975 | goto out; |
976 | } |
977 | error = sbwait(&so->so_snd); |
978 | if (error) |
979 | goto out; |
980 | wakeup_state = so->so_state; |
981 | goto restart; |
982 | } |
983 | wakeup_state = 0; |
984 | mp = ⊤ |
985 | space -= clen; |
986 | do { |
987 | if (uio == NULL) { |
988 | /* |
989 | * Data is prepackaged in "top". |
990 | */ |
991 | resid = 0; |
992 | if (flags & MSG_EOR) |
993 | top->m_flags |= M_EOR; |
994 | } else do { |
995 | sounlock(so); |
996 | splx(s); |
997 | if (top == NULL) { |
998 | m = m_gethdr(M_WAIT, MT_DATA); |
999 | mlen = MHLEN; |
1000 | m->m_pkthdr.len = 0; |
1001 | m_reset_rcvif(m); |
1002 | } else { |
1003 | m = m_get(M_WAIT, MT_DATA); |
1004 | mlen = MLEN; |
1005 | } |
1006 | MCLAIM(m, so->so_snd.sb_mowner); |
1007 | if (sock_loan_thresh >= 0 && |
1008 | uio->uio_iov->iov_len >= sock_loan_thresh && |
1009 | space >= sock_loan_thresh && |
1010 | (len = sosend_loan(so, uio, m, |
1011 | space)) != 0) { |
1012 | SOSEND_COUNTER_INCR(&sosend_loan_big); |
1013 | space -= len; |
1014 | goto have_data; |
1015 | } |
1016 | if (resid >= MINCLSIZE && space >= MCLBYTES) { |
1017 | SOSEND_COUNTER_INCR(&sosend_copy_big); |
1018 | m_clget(m, M_DONTWAIT); |
1019 | if ((m->m_flags & M_EXT) == 0) |
1020 | goto nopages; |
1021 | mlen = MCLBYTES; |
1022 | if (atomic && top == 0) { |
1023 | len = lmin(MCLBYTES - max_hdr, |
1024 | resid); |
1025 | m->m_data += max_hdr; |
1026 | } else |
1027 | len = lmin(MCLBYTES, resid); |
1028 | space -= len; |
1029 | } else { |
1030 | nopages: |
1031 | SOSEND_COUNTER_INCR(&sosend_copy_small); |
1032 | len = lmin(lmin(mlen, resid), space); |
1033 | space -= len; |
1034 | /* |
1035 | * For datagram protocols, leave room |
1036 | * for protocol headers in first mbuf. |
1037 | */ |
1038 | if (atomic && top == 0 && len < mlen) |
1039 | MH_ALIGN(m, len); |
1040 | } |
1041 | error = uiomove(mtod(m, void *), (int)len, uio); |
1042 | have_data: |
1043 | resid = uio->uio_resid; |
1044 | m->m_len = len; |
1045 | *mp = m; |
1046 | top->m_pkthdr.len += len; |
1047 | s = splsoftnet(); |
1048 | solock(so); |
1049 | if (error != 0) |
1050 | goto release; |
1051 | mp = &m->m_next; |
1052 | if (resid <= 0) { |
1053 | if (flags & MSG_EOR) |
1054 | top->m_flags |= M_EOR; |
1055 | break; |
1056 | } |
1057 | } while (space > 0 && atomic); |
1058 | |
1059 | if (so->so_state & SS_CANTSENDMORE) { |
1060 | error = EPIPE; |
1061 | goto release; |
1062 | } |
1063 | if (dontroute) |
1064 | so->so_options |= SO_DONTROUTE; |
1065 | if (resid > 0) |
1066 | so->so_state |= SS_MORETOCOME; |
1067 | if (flags & MSG_OOB) { |
1068 | error = (*so->so_proto->pr_usrreqs->pr_sendoob)(so, |
1069 | top, control); |
1070 | } else { |
1071 | error = (*so->so_proto->pr_usrreqs->pr_send)(so, |
1072 | top, addr, control, l); |
1073 | } |
1074 | if (dontroute) |
1075 | so->so_options &= ~SO_DONTROUTE; |
1076 | if (resid > 0) |
1077 | so->so_state &= ~SS_MORETOCOME; |
1078 | clen = 0; |
1079 | control = NULL; |
1080 | top = NULL; |
1081 | mp = ⊤ |
1082 | if (error != 0) |
1083 | goto release; |
1084 | } while (resid && space > 0); |
1085 | } while (resid); |
1086 | |
1087 | release: |
1088 | sbunlock(&so->so_snd); |
1089 | out: |
1090 | sounlock(so); |
1091 | splx(s); |
1092 | if (top) |
1093 | m_freem(top); |
1094 | if (control) |
1095 | m_freem(control); |
1096 | return (error); |
1097 | } |
1098 | |
1099 | /* |
1100 | * Following replacement or removal of the first mbuf on the first |
1101 | * mbuf chain of a socket buffer, push necessary state changes back |
1102 | * into the socket buffer so that other consumers see the values |
1103 | * consistently. 'nextrecord' is the callers locally stored value of |
1104 | * the original value of sb->sb_mb->m_nextpkt which must be restored |
1105 | * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. |
1106 | */ |
1107 | static void |
1108 | sbsync(struct sockbuf *sb, struct mbuf *nextrecord) |
1109 | { |
1110 | |
1111 | KASSERT(solocked(sb->sb_so)); |
1112 | |
1113 | /* |
1114 | * First, update for the new value of nextrecord. If necessary, |
1115 | * make it the first record. |
1116 | */ |
1117 | if (sb->sb_mb != NULL) |
1118 | sb->sb_mb->m_nextpkt = nextrecord; |
1119 | else |
1120 | sb->sb_mb = nextrecord; |
1121 | |
1122 | /* |
1123 | * Now update any dependent socket buffer fields to reflect |
1124 | * the new state. This is an inline of SB_EMPTY_FIXUP, with |
1125 | * the addition of a second clause that takes care of the |
1126 | * case where sb_mb has been updated, but remains the last |
1127 | * record. |
1128 | */ |
1129 | if (sb->sb_mb == NULL) { |
1130 | sb->sb_mbtail = NULL; |
1131 | sb->sb_lastrecord = NULL; |
1132 | } else if (sb->sb_mb->m_nextpkt == NULL) |
1133 | sb->sb_lastrecord = sb->sb_mb; |
1134 | } |
1135 | |
1136 | /* |
1137 | * Implement receive operations on a socket. |
1138 | * We depend on the way that records are added to the sockbuf |
1139 | * by sbappend*. In particular, each record (mbufs linked through m_next) |
1140 | * must begin with an address if the protocol so specifies, |
1141 | * followed by an optional mbuf or mbufs containing ancillary data, |
1142 | * and then zero or more mbufs of data. |
1143 | * In order to avoid blocking network interrupts for the entire time here, |
1144 | * we splx() while doing the actual copy to user space. |
1145 | * Although the sockbuf is locked, new data may still be appended, |
1146 | * and thus we must maintain consistency of the sockbuf during that time. |
1147 | * |
1148 | * The caller may receive the data as a single mbuf chain by supplying |
1149 | * an mbuf **mp0 for use in returning the chain. The uio is then used |
1150 | * only for the count in uio_resid. |
1151 | */ |
1152 | int |
1153 | soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, |
1154 | struct mbuf **mp0, struct mbuf **controlp, int *flagsp) |
1155 | { |
1156 | struct lwp *l = curlwp; |
1157 | struct mbuf *m, **mp, *mt; |
1158 | size_t len, offset, moff, orig_resid; |
1159 | int atomic, flags, error, s, type; |
1160 | const struct protosw *pr; |
1161 | struct mbuf *nextrecord; |
1162 | int mbuf_removed = 0; |
1163 | const struct domain *dom; |
1164 | short wakeup_state = 0; |
1165 | |
1166 | pr = so->so_proto; |
1167 | atomic = pr->pr_flags & PR_ATOMIC; |
1168 | dom = pr->pr_domain; |
1169 | mp = mp0; |
1170 | type = 0; |
1171 | orig_resid = uio->uio_resid; |
1172 | |
1173 | if (paddr != NULL) |
1174 | *paddr = NULL; |
1175 | if (controlp != NULL) |
1176 | *controlp = NULL; |
1177 | if (flagsp != NULL) |
1178 | flags = *flagsp &~ MSG_EOR; |
1179 | else |
1180 | flags = 0; |
1181 | |
1182 | if (flags & MSG_OOB) { |
1183 | m = m_get(M_WAIT, MT_DATA); |
1184 | solock(so); |
1185 | error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK); |
1186 | sounlock(so); |
1187 | if (error) |
1188 | goto bad; |
1189 | do { |
1190 | error = uiomove(mtod(m, void *), |
1191 | MIN(uio->uio_resid, m->m_len), uio); |
1192 | m = m_free(m); |
1193 | } while (uio->uio_resid > 0 && error == 0 && m); |
1194 | bad: |
1195 | if (m != NULL) |
1196 | m_freem(m); |
1197 | return error; |
1198 | } |
1199 | if (mp != NULL) |
1200 | *mp = NULL; |
1201 | |
1202 | /* |
1203 | * solock() provides atomicity of access. splsoftnet() prevents |
1204 | * protocol processing soft interrupts from interrupting us and |
1205 | * blocking (expensive). |
1206 | */ |
1207 | s = splsoftnet(); |
1208 | solock(so); |
1209 | restart: |
1210 | if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) { |
1211 | sounlock(so); |
1212 | splx(s); |
1213 | return error; |
1214 | } |
1215 | |
1216 | m = so->so_rcv.sb_mb; |
1217 | /* |
1218 | * If we have less data than requested, block awaiting more |
1219 | * (subject to any timeout) if: |
1220 | * 1. the current count is less than the low water mark, |
1221 | * 2. MSG_WAITALL is set, and it is possible to do the entire |
1222 | * receive operation at once if we block (resid <= hiwat), or |
1223 | * 3. MSG_DONTWAIT is not set. |
1224 | * If MSG_WAITALL is set but resid is larger than the receive buffer, |
1225 | * we have to do the receive in sections, and thus risk returning |
1226 | * a short count if a timeout or signal occurs after we start. |
1227 | */ |
1228 | if (m == NULL || |
1229 | ((flags & MSG_DONTWAIT) == 0 && |
1230 | so->so_rcv.sb_cc < uio->uio_resid && |
1231 | (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || |
1232 | ((flags & MSG_WAITALL) && |
1233 | uio->uio_resid <= so->so_rcv.sb_hiwat)) && |
1234 | m->m_nextpkt == NULL && !atomic)) { |
1235 | #ifdef DIAGNOSTIC |
1236 | if (m == NULL && so->so_rcv.sb_cc) |
1237 | panic("receive 1" ); |
1238 | #endif |
1239 | if (so->so_error) { |
1240 | if (m != NULL) |
1241 | goto dontblock; |
1242 | error = so->so_error; |
1243 | if ((flags & MSG_PEEK) == 0) |
1244 | so->so_error = 0; |
1245 | goto release; |
1246 | } |
1247 | if (so->so_state & SS_CANTRCVMORE) { |
1248 | if (m != NULL) |
1249 | goto dontblock; |
1250 | else |
1251 | goto release; |
1252 | } |
1253 | for (; m != NULL; m = m->m_next) |
1254 | if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { |
1255 | m = so->so_rcv.sb_mb; |
1256 | goto dontblock; |
1257 | } |
1258 | if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && |
1259 | (so->so_proto->pr_flags & PR_CONNREQUIRED)) { |
1260 | error = ENOTCONN; |
1261 | goto release; |
1262 | } |
1263 | if (uio->uio_resid == 0) |
1264 | goto release; |
1265 | if ((so->so_state & SS_NBIO) || |
1266 | (flags & (MSG_DONTWAIT|MSG_NBIO))) { |
1267 | error = EWOULDBLOCK; |
1268 | goto release; |
1269 | } |
1270 | SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1" ); |
1271 | SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1" ); |
1272 | sbunlock(&so->so_rcv); |
1273 | if (wakeup_state & SS_RESTARTSYS) |
1274 | error = ERESTART; |
1275 | else |
1276 | error = sbwait(&so->so_rcv); |
1277 | if (error != 0) { |
1278 | sounlock(so); |
1279 | splx(s); |
1280 | return error; |
1281 | } |
1282 | wakeup_state = so->so_state; |
1283 | goto restart; |
1284 | } |
1285 | dontblock: |
1286 | /* |
1287 | * On entry here, m points to the first record of the socket buffer. |
1288 | * From this point onward, we maintain 'nextrecord' as a cache of the |
1289 | * pointer to the next record in the socket buffer. We must keep the |
1290 | * various socket buffer pointers and local stack versions of the |
1291 | * pointers in sync, pushing out modifications before dropping the |
1292 | * socket lock, and re-reading them when picking it up. |
1293 | * |
1294 | * Otherwise, we will race with the network stack appending new data |
1295 | * or records onto the socket buffer by using inconsistent/stale |
1296 | * versions of the field, possibly resulting in socket buffer |
1297 | * corruption. |
1298 | * |
1299 | * By holding the high-level sblock(), we prevent simultaneous |
1300 | * readers from pulling off the front of the socket buffer. |
1301 | */ |
1302 | if (l != NULL) |
1303 | l->l_ru.ru_msgrcv++; |
1304 | KASSERT(m == so->so_rcv.sb_mb); |
1305 | SBLASTRECORDCHK(&so->so_rcv, "soreceive 1" ); |
1306 | SBLASTMBUFCHK(&so->so_rcv, "soreceive 1" ); |
1307 | nextrecord = m->m_nextpkt; |
1308 | if (pr->pr_flags & PR_ADDR) { |
1309 | #ifdef DIAGNOSTIC |
1310 | if (m->m_type != MT_SONAME) |
1311 | panic("receive 1a" ); |
1312 | #endif |
1313 | orig_resid = 0; |
1314 | if (flags & MSG_PEEK) { |
1315 | if (paddr) |
1316 | *paddr = m_copy(m, 0, m->m_len); |
1317 | m = m->m_next; |
1318 | } else { |
1319 | sbfree(&so->so_rcv, m); |
1320 | mbuf_removed = 1; |
1321 | if (paddr != NULL) { |
1322 | *paddr = m; |
1323 | so->so_rcv.sb_mb = m->m_next; |
1324 | m->m_next = NULL; |
1325 | m = so->so_rcv.sb_mb; |
1326 | } else { |
1327 | m = so->so_rcv.sb_mb = m_free(m); |
1328 | } |
1329 | sbsync(&so->so_rcv, nextrecord); |
1330 | } |
1331 | } |
1332 | if (pr->pr_flags & PR_ADDR_OPT) { |
1333 | /* |
1334 | * For SCTP we may be getting a |
1335 | * whole message OR a partial delivery. |
1336 | */ |
1337 | if (m->m_type == MT_SONAME) { |
1338 | orig_resid = 0; |
1339 | if (flags & MSG_PEEK) { |
1340 | if (paddr) |
1341 | *paddr = m_copy(m, 0, m->m_len); |
1342 | m = m->m_next; |
1343 | } else { |
1344 | sbfree(&so->so_rcv, m); |
1345 | if (paddr) { |
1346 | *paddr = m; |
1347 | so->so_rcv.sb_mb = m->m_next; |
1348 | m->m_next = 0; |
1349 | m = so->so_rcv.sb_mb; |
1350 | } else { |
1351 | m = so->so_rcv.sb_mb = m_free(m); |
1352 | } |
1353 | } |
1354 | } |
1355 | } |
1356 | |
1357 | /* |
1358 | * Process one or more MT_CONTROL mbufs present before any data mbufs |
1359 | * in the first mbuf chain on the socket buffer. If MSG_PEEK, we |
1360 | * just copy the data; if !MSG_PEEK, we call into the protocol to |
1361 | * perform externalization (or freeing if controlp == NULL). |
1362 | */ |
1363 | if (__predict_false(m != NULL && m->m_type == MT_CONTROL)) { |
1364 | struct mbuf *cm = NULL, *cmn; |
1365 | struct mbuf **cme = &cm; |
1366 | |
1367 | do { |
1368 | if (flags & MSG_PEEK) { |
1369 | if (controlp != NULL) { |
1370 | *controlp = m_copy(m, 0, m->m_len); |
1371 | controlp = &(*controlp)->m_next; |
1372 | } |
1373 | m = m->m_next; |
1374 | } else { |
1375 | sbfree(&so->so_rcv, m); |
1376 | so->so_rcv.sb_mb = m->m_next; |
1377 | m->m_next = NULL; |
1378 | *cme = m; |
1379 | cme = &(*cme)->m_next; |
1380 | m = so->so_rcv.sb_mb; |
1381 | } |
1382 | } while (m != NULL && m->m_type == MT_CONTROL); |
1383 | if ((flags & MSG_PEEK) == 0) |
1384 | sbsync(&so->so_rcv, nextrecord); |
1385 | for (; cm != NULL; cm = cmn) { |
1386 | cmn = cm->m_next; |
1387 | cm->m_next = NULL; |
1388 | type = mtod(cm, struct cmsghdr *)->cmsg_type; |
1389 | if (controlp != NULL) { |
1390 | if (dom->dom_externalize != NULL && |
1391 | type == SCM_RIGHTS) { |
1392 | sounlock(so); |
1393 | splx(s); |
1394 | error = (*dom->dom_externalize)(cm, l, |
1395 | (flags & MSG_CMSG_CLOEXEC) ? |
1396 | O_CLOEXEC : 0); |
1397 | s = splsoftnet(); |
1398 | solock(so); |
1399 | } |
1400 | *controlp = cm; |
1401 | while (*controlp != NULL) |
1402 | controlp = &(*controlp)->m_next; |
1403 | } else { |
1404 | /* |
1405 | * Dispose of any SCM_RIGHTS message that went |
1406 | * through the read path rather than recv. |
1407 | */ |
1408 | if (dom->dom_dispose != NULL && |
1409 | type == SCM_RIGHTS) { |
1410 | sounlock(so); |
1411 | (*dom->dom_dispose)(cm); |
1412 | solock(so); |
1413 | } |
1414 | m_freem(cm); |
1415 | } |
1416 | } |
1417 | if (m != NULL) |
1418 | nextrecord = so->so_rcv.sb_mb->m_nextpkt; |
1419 | else |
1420 | nextrecord = so->so_rcv.sb_mb; |
1421 | orig_resid = 0; |
1422 | } |
1423 | |
1424 | /* If m is non-NULL, we have some data to read. */ |
1425 | if (__predict_true(m != NULL)) { |
1426 | type = m->m_type; |
1427 | if (type == MT_OOBDATA) |
1428 | flags |= MSG_OOB; |
1429 | } |
1430 | SBLASTRECORDCHK(&so->so_rcv, "soreceive 2" ); |
1431 | SBLASTMBUFCHK(&so->so_rcv, "soreceive 2" ); |
1432 | |
1433 | moff = 0; |
1434 | offset = 0; |
1435 | while (m != NULL && uio->uio_resid > 0 && error == 0) { |
1436 | if (m->m_type == MT_OOBDATA) { |
1437 | if (type != MT_OOBDATA) |
1438 | break; |
1439 | } else if (type == MT_OOBDATA) |
1440 | break; |
1441 | #ifdef DIAGNOSTIC |
1442 | else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) |
1443 | panic("receive 3" ); |
1444 | #endif |
1445 | so->so_state &= ~SS_RCVATMARK; |
1446 | wakeup_state = 0; |
1447 | len = uio->uio_resid; |
1448 | if (so->so_oobmark && len > so->so_oobmark - offset) |
1449 | len = so->so_oobmark - offset; |
1450 | if (len > m->m_len - moff) |
1451 | len = m->m_len - moff; |
1452 | /* |
1453 | * If mp is set, just pass back the mbufs. |
1454 | * Otherwise copy them out via the uio, then free. |
1455 | * Sockbuf must be consistent here (points to current mbuf, |
1456 | * it points to next record) when we drop priority; |
1457 | * we must note any additions to the sockbuf when we |
1458 | * block interrupts again. |
1459 | */ |
1460 | if (mp == NULL) { |
1461 | SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove" ); |
1462 | SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove" ); |
1463 | sounlock(so); |
1464 | splx(s); |
1465 | error = uiomove(mtod(m, char *) + moff, len, uio); |
1466 | s = splsoftnet(); |
1467 | solock(so); |
1468 | if (error != 0) { |
1469 | /* |
1470 | * If any part of the record has been removed |
1471 | * (such as the MT_SONAME mbuf, which will |
1472 | * happen when PR_ADDR, and thus also |
1473 | * PR_ATOMIC, is set), then drop the entire |
1474 | * record to maintain the atomicity of the |
1475 | * receive operation. |
1476 | * |
1477 | * This avoids a later panic("receive 1a") |
1478 | * when compiled with DIAGNOSTIC. |
1479 | */ |
1480 | if (m && mbuf_removed && atomic) |
1481 | (void) sbdroprecord(&so->so_rcv); |
1482 | |
1483 | goto release; |
1484 | } |
1485 | } else |
1486 | uio->uio_resid -= len; |
1487 | if (len == m->m_len - moff) { |
1488 | if (m->m_flags & M_EOR) |
1489 | flags |= MSG_EOR; |
1490 | #ifdef SCTP |
1491 | if (m->m_flags & M_NOTIFICATION) |
1492 | flags |= MSG_NOTIFICATION; |
1493 | #endif /* SCTP */ |
1494 | if (flags & MSG_PEEK) { |
1495 | m = m->m_next; |
1496 | moff = 0; |
1497 | } else { |
1498 | nextrecord = m->m_nextpkt; |
1499 | sbfree(&so->so_rcv, m); |
1500 | if (mp) { |
1501 | *mp = m; |
1502 | mp = &m->m_next; |
1503 | so->so_rcv.sb_mb = m = m->m_next; |
1504 | *mp = NULL; |
1505 | } else { |
1506 | m = so->so_rcv.sb_mb = m_free(m); |
1507 | } |
1508 | /* |
1509 | * If m != NULL, we also know that |
1510 | * so->so_rcv.sb_mb != NULL. |
1511 | */ |
1512 | KASSERT(so->so_rcv.sb_mb == m); |
1513 | if (m) { |
1514 | m->m_nextpkt = nextrecord; |
1515 | if (nextrecord == NULL) |
1516 | so->so_rcv.sb_lastrecord = m; |
1517 | } else { |
1518 | so->so_rcv.sb_mb = nextrecord; |
1519 | SB_EMPTY_FIXUP(&so->so_rcv); |
1520 | } |
1521 | SBLASTRECORDCHK(&so->so_rcv, "soreceive 3" ); |
1522 | SBLASTMBUFCHK(&so->so_rcv, "soreceive 3" ); |
1523 | } |
1524 | } else if (flags & MSG_PEEK) |
1525 | moff += len; |
1526 | else { |
1527 | if (mp != NULL) { |
1528 | mt = m_copym(m, 0, len, M_NOWAIT); |
1529 | if (__predict_false(mt == NULL)) { |
1530 | sounlock(so); |
1531 | mt = m_copym(m, 0, len, M_WAIT); |
1532 | solock(so); |
1533 | } |
1534 | *mp = mt; |
1535 | } |
1536 | m->m_data += len; |
1537 | m->m_len -= len; |
1538 | so->so_rcv.sb_cc -= len; |
1539 | } |
1540 | if (so->so_oobmark) { |
1541 | if ((flags & MSG_PEEK) == 0) { |
1542 | so->so_oobmark -= len; |
1543 | if (so->so_oobmark == 0) { |
1544 | so->so_state |= SS_RCVATMARK; |
1545 | break; |
1546 | } |
1547 | } else { |
1548 | offset += len; |
1549 | if (offset == so->so_oobmark) |
1550 | break; |
1551 | } |
1552 | } |
1553 | if (flags & MSG_EOR) |
1554 | break; |
1555 | /* |
1556 | * If the MSG_WAITALL flag is set (for non-atomic socket), |
1557 | * we must not quit until "uio->uio_resid == 0" or an error |
1558 | * termination. If a signal/timeout occurs, return |
1559 | * with a short count but without error. |
1560 | * Keep sockbuf locked against other readers. |
1561 | */ |
1562 | while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && |
1563 | !sosendallatonce(so) && !nextrecord) { |
1564 | if (so->so_error || so->so_state & SS_CANTRCVMORE) |
1565 | break; |
1566 | /* |
1567 | * If we are peeking and the socket receive buffer is |
1568 | * full, stop since we can't get more data to peek at. |
1569 | */ |
1570 | if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0) |
1571 | break; |
1572 | /* |
1573 | * If we've drained the socket buffer, tell the |
1574 | * protocol in case it needs to do something to |
1575 | * get it filled again. |
1576 | */ |
1577 | if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) |
1578 | (*pr->pr_usrreqs->pr_rcvd)(so, flags, l); |
1579 | SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2" ); |
1580 | SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2" ); |
1581 | if (wakeup_state & SS_RESTARTSYS) |
1582 | error = ERESTART; |
1583 | else |
1584 | error = sbwait(&so->so_rcv); |
1585 | if (error != 0) { |
1586 | sbunlock(&so->so_rcv); |
1587 | sounlock(so); |
1588 | splx(s); |
1589 | return 0; |
1590 | } |
1591 | if ((m = so->so_rcv.sb_mb) != NULL) |
1592 | nextrecord = m->m_nextpkt; |
1593 | wakeup_state = so->so_state; |
1594 | } |
1595 | } |
1596 | |
1597 | if (m && atomic) { |
1598 | flags |= MSG_TRUNC; |
1599 | if ((flags & MSG_PEEK) == 0) |
1600 | (void) sbdroprecord(&so->so_rcv); |
1601 | } |
1602 | if ((flags & MSG_PEEK) == 0) { |
1603 | if (m == NULL) { |
1604 | /* |
1605 | * First part is an inline SB_EMPTY_FIXUP(). Second |
1606 | * part makes sure sb_lastrecord is up-to-date if |
1607 | * there is still data in the socket buffer. |
1608 | */ |
1609 | so->so_rcv.sb_mb = nextrecord; |
1610 | if (so->so_rcv.sb_mb == NULL) { |
1611 | so->so_rcv.sb_mbtail = NULL; |
1612 | so->so_rcv.sb_lastrecord = NULL; |
1613 | } else if (nextrecord->m_nextpkt == NULL) |
1614 | so->so_rcv.sb_lastrecord = nextrecord; |
1615 | } |
1616 | SBLASTRECORDCHK(&so->so_rcv, "soreceive 4" ); |
1617 | SBLASTMBUFCHK(&so->so_rcv, "soreceive 4" ); |
1618 | if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) |
1619 | (*pr->pr_usrreqs->pr_rcvd)(so, flags, l); |
1620 | } |
1621 | if (orig_resid == uio->uio_resid && orig_resid && |
1622 | (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { |
1623 | sbunlock(&so->so_rcv); |
1624 | goto restart; |
1625 | } |
1626 | |
1627 | if (flagsp != NULL) |
1628 | *flagsp |= flags; |
1629 | release: |
1630 | sbunlock(&so->so_rcv); |
1631 | sounlock(so); |
1632 | splx(s); |
1633 | return error; |
1634 | } |
1635 | |
1636 | int |
1637 | soshutdown(struct socket *so, int how) |
1638 | { |
1639 | const struct protosw *pr; |
1640 | int error; |
1641 | |
1642 | KASSERT(solocked(so)); |
1643 | |
1644 | pr = so->so_proto; |
1645 | if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) |
1646 | return (EINVAL); |
1647 | |
1648 | if (how == SHUT_RD || how == SHUT_RDWR) { |
1649 | sorflush(so); |
1650 | error = 0; |
1651 | } |
1652 | if (how == SHUT_WR || how == SHUT_RDWR) |
1653 | error = (*pr->pr_usrreqs->pr_shutdown)(so); |
1654 | |
1655 | return error; |
1656 | } |
1657 | |
1658 | void |
1659 | sorestart(struct socket *so) |
1660 | { |
1661 | /* |
1662 | * An application has called close() on an fd on which another |
1663 | * of its threads has called a socket system call. |
1664 | * Mark this and wake everyone up, and code that would block again |
1665 | * instead returns ERESTART. |
1666 | * On system call re-entry the fd is validated and EBADF returned. |
1667 | * Any other fd will block again on the 2nd syscall. |
1668 | */ |
1669 | solock(so); |
1670 | so->so_state |= SS_RESTARTSYS; |
1671 | cv_broadcast(&so->so_cv); |
1672 | cv_broadcast(&so->so_snd.sb_cv); |
1673 | cv_broadcast(&so->so_rcv.sb_cv); |
1674 | sounlock(so); |
1675 | } |
1676 | |
1677 | void |
1678 | sorflush(struct socket *so) |
1679 | { |
1680 | struct sockbuf *sb, asb; |
1681 | const struct protosw *pr; |
1682 | |
1683 | KASSERT(solocked(so)); |
1684 | |
1685 | sb = &so->so_rcv; |
1686 | pr = so->so_proto; |
1687 | socantrcvmore(so); |
1688 | sb->sb_flags |= SB_NOINTR; |
1689 | (void )sblock(sb, M_WAITOK); |
1690 | sbunlock(sb); |
1691 | asb = *sb; |
1692 | /* |
1693 | * Clear most of the sockbuf structure, but leave some of the |
1694 | * fields valid. |
1695 | */ |
1696 | memset(&sb->sb_startzero, 0, |
1697 | sizeof(*sb) - offsetof(struct sockbuf, sb_startzero)); |
1698 | if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) { |
1699 | sounlock(so); |
1700 | (*pr->pr_domain->dom_dispose)(asb.sb_mb); |
1701 | solock(so); |
1702 | } |
1703 | sbrelease(&asb, so); |
1704 | } |
1705 | |
1706 | /* |
1707 | * internal set SOL_SOCKET options |
1708 | */ |
1709 | static int |
1710 | sosetopt1(struct socket *so, const struct sockopt *sopt) |
1711 | { |
1712 | int error = EINVAL, opt; |
1713 | int optval = 0; /* XXX: gcc */ |
1714 | struct linger l; |
1715 | struct timeval tv; |
1716 | |
1717 | switch ((opt = sopt->sopt_name)) { |
1718 | |
1719 | case SO_ACCEPTFILTER: |
1720 | error = accept_filt_setopt(so, sopt); |
1721 | KASSERT(solocked(so)); |
1722 | break; |
1723 | |
1724 | case SO_LINGER: |
1725 | error = sockopt_get(sopt, &l, sizeof(l)); |
1726 | solock(so); |
1727 | if (error) |
1728 | break; |
1729 | if (l.l_linger < 0 || l.l_linger > USHRT_MAX || |
1730 | l.l_linger > (INT_MAX / hz)) { |
1731 | error = EDOM; |
1732 | break; |
1733 | } |
1734 | so->so_linger = l.l_linger; |
1735 | if (l.l_onoff) |
1736 | so->so_options |= SO_LINGER; |
1737 | else |
1738 | so->so_options &= ~SO_LINGER; |
1739 | break; |
1740 | |
1741 | case SO_DEBUG: |
1742 | case SO_KEEPALIVE: |
1743 | case SO_DONTROUTE: |
1744 | case SO_USELOOPBACK: |
1745 | case SO_BROADCAST: |
1746 | case SO_REUSEADDR: |
1747 | case SO_REUSEPORT: |
1748 | case SO_OOBINLINE: |
1749 | case SO_TIMESTAMP: |
1750 | case SO_NOSIGPIPE: |
1751 | #ifdef SO_OTIMESTAMP |
1752 | case SO_OTIMESTAMP: |
1753 | #endif |
1754 | error = sockopt_getint(sopt, &optval); |
1755 | solock(so); |
1756 | if (error) |
1757 | break; |
1758 | if (optval) |
1759 | so->so_options |= opt; |
1760 | else |
1761 | so->so_options &= ~opt; |
1762 | break; |
1763 | |
1764 | case SO_SNDBUF: |
1765 | case SO_RCVBUF: |
1766 | case SO_SNDLOWAT: |
1767 | case SO_RCVLOWAT: |
1768 | error = sockopt_getint(sopt, &optval); |
1769 | solock(so); |
1770 | if (error) |
1771 | break; |
1772 | |
1773 | /* |
1774 | * Values < 1 make no sense for any of these |
1775 | * options, so disallow them. |
1776 | */ |
1777 | if (optval < 1) { |
1778 | error = EINVAL; |
1779 | break; |
1780 | } |
1781 | |
1782 | switch (opt) { |
1783 | case SO_SNDBUF: |
1784 | if (sbreserve(&so->so_snd, (u_long)optval, so) == 0) { |
1785 | error = ENOBUFS; |
1786 | break; |
1787 | } |
1788 | so->so_snd.sb_flags &= ~SB_AUTOSIZE; |
1789 | break; |
1790 | |
1791 | case SO_RCVBUF: |
1792 | if (sbreserve(&so->so_rcv, (u_long)optval, so) == 0) { |
1793 | error = ENOBUFS; |
1794 | break; |
1795 | } |
1796 | so->so_rcv.sb_flags &= ~SB_AUTOSIZE; |
1797 | break; |
1798 | |
1799 | /* |
1800 | * Make sure the low-water is never greater than |
1801 | * the high-water. |
1802 | */ |
1803 | case SO_SNDLOWAT: |
1804 | if (optval > so->so_snd.sb_hiwat) |
1805 | optval = so->so_snd.sb_hiwat; |
1806 | |
1807 | so->so_snd.sb_lowat = optval; |
1808 | break; |
1809 | |
1810 | case SO_RCVLOWAT: |
1811 | if (optval > so->so_rcv.sb_hiwat) |
1812 | optval = so->so_rcv.sb_hiwat; |
1813 | |
1814 | so->so_rcv.sb_lowat = optval; |
1815 | break; |
1816 | } |
1817 | break; |
1818 | |
1819 | #ifdef COMPAT_50 |
1820 | case SO_OSNDTIMEO: |
1821 | case SO_ORCVTIMEO: { |
1822 | struct timeval50 otv; |
1823 | error = sockopt_get(sopt, &otv, sizeof(otv)); |
1824 | if (error) { |
1825 | solock(so); |
1826 | break; |
1827 | } |
1828 | timeval50_to_timeval(&otv, &tv); |
1829 | opt = opt == SO_OSNDTIMEO ? SO_SNDTIMEO : SO_RCVTIMEO; |
1830 | error = 0; |
1831 | /*FALLTHROUGH*/ |
1832 | } |
1833 | #endif /* COMPAT_50 */ |
1834 | |
1835 | case SO_SNDTIMEO: |
1836 | case SO_RCVTIMEO: |
1837 | if (error) |
1838 | error = sockopt_get(sopt, &tv, sizeof(tv)); |
1839 | solock(so); |
1840 | if (error) |
1841 | break; |
1842 | |
1843 | if (tv.tv_sec > (INT_MAX - tv.tv_usec / tick) / hz) { |
1844 | error = EDOM; |
1845 | break; |
1846 | } |
1847 | |
1848 | optval = tv.tv_sec * hz + tv.tv_usec / tick; |
1849 | if (optval == 0 && tv.tv_usec != 0) |
1850 | optval = 1; |
1851 | |
1852 | switch (opt) { |
1853 | case SO_SNDTIMEO: |
1854 | so->so_snd.sb_timeo = optval; |
1855 | break; |
1856 | case SO_RCVTIMEO: |
1857 | so->so_rcv.sb_timeo = optval; |
1858 | break; |
1859 | } |
1860 | break; |
1861 | |
1862 | default: |
1863 | solock(so); |
1864 | error = ENOPROTOOPT; |
1865 | break; |
1866 | } |
1867 | KASSERT(solocked(so)); |
1868 | return error; |
1869 | } |
1870 | |
1871 | int |
1872 | sosetopt(struct socket *so, struct sockopt *sopt) |
1873 | { |
1874 | int error, prerr; |
1875 | |
1876 | if (sopt->sopt_level == SOL_SOCKET) { |
1877 | error = sosetopt1(so, sopt); |
1878 | KASSERT(solocked(so)); |
1879 | } else { |
1880 | error = ENOPROTOOPT; |
1881 | solock(so); |
1882 | } |
1883 | |
1884 | if ((error == 0 || error == ENOPROTOOPT) && |
1885 | so->so_proto != NULL && so->so_proto->pr_ctloutput != NULL) { |
1886 | /* give the protocol stack a shot */ |
1887 | prerr = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, sopt); |
1888 | if (prerr == 0) |
1889 | error = 0; |
1890 | else if (prerr != ENOPROTOOPT) |
1891 | error = prerr; |
1892 | } |
1893 | sounlock(so); |
1894 | return error; |
1895 | } |
1896 | |
1897 | /* |
1898 | * so_setsockopt() is a wrapper providing a sockopt structure for sosetopt() |
1899 | */ |
1900 | int |
1901 | so_setsockopt(struct lwp *l, struct socket *so, int level, int name, |
1902 | const void *val, size_t valsize) |
1903 | { |
1904 | struct sockopt sopt; |
1905 | int error; |
1906 | |
1907 | KASSERT(valsize == 0 || val != NULL); |
1908 | |
1909 | sockopt_init(&sopt, level, name, valsize); |
1910 | sockopt_set(&sopt, val, valsize); |
1911 | |
1912 | error = sosetopt(so, &sopt); |
1913 | |
1914 | sockopt_destroy(&sopt); |
1915 | |
1916 | return error; |
1917 | } |
1918 | |
1919 | /* |
1920 | * internal get SOL_SOCKET options |
1921 | */ |
1922 | static int |
1923 | sogetopt1(struct socket *so, struct sockopt *sopt) |
1924 | { |
1925 | int error, optval, opt; |
1926 | struct linger l; |
1927 | struct timeval tv; |
1928 | |
1929 | switch ((opt = sopt->sopt_name)) { |
1930 | |
1931 | case SO_ACCEPTFILTER: |
1932 | error = accept_filt_getopt(so, sopt); |
1933 | break; |
1934 | |
1935 | case SO_LINGER: |
1936 | l.l_onoff = (so->so_options & SO_LINGER) ? 1 : 0; |
1937 | l.l_linger = so->so_linger; |
1938 | |
1939 | error = sockopt_set(sopt, &l, sizeof(l)); |
1940 | break; |
1941 | |
1942 | case SO_USELOOPBACK: |
1943 | case SO_DONTROUTE: |
1944 | case SO_DEBUG: |
1945 | case SO_KEEPALIVE: |
1946 | case SO_REUSEADDR: |
1947 | case SO_REUSEPORT: |
1948 | case SO_BROADCAST: |
1949 | case SO_OOBINLINE: |
1950 | case SO_TIMESTAMP: |
1951 | case SO_NOSIGPIPE: |
1952 | #ifdef SO_OTIMESTAMP |
1953 | case SO_OTIMESTAMP: |
1954 | #endif |
1955 | case SO_ACCEPTCONN: |
1956 | error = sockopt_setint(sopt, (so->so_options & opt) ? 1 : 0); |
1957 | break; |
1958 | |
1959 | case SO_TYPE: |
1960 | error = sockopt_setint(sopt, so->so_type); |
1961 | break; |
1962 | |
1963 | case SO_ERROR: |
1964 | error = sockopt_setint(sopt, so->so_error); |
1965 | so->so_error = 0; |
1966 | break; |
1967 | |
1968 | case SO_SNDBUF: |
1969 | error = sockopt_setint(sopt, so->so_snd.sb_hiwat); |
1970 | break; |
1971 | |
1972 | case SO_RCVBUF: |
1973 | error = sockopt_setint(sopt, so->so_rcv.sb_hiwat); |
1974 | break; |
1975 | |
1976 | case SO_SNDLOWAT: |
1977 | error = sockopt_setint(sopt, so->so_snd.sb_lowat); |
1978 | break; |
1979 | |
1980 | case SO_RCVLOWAT: |
1981 | error = sockopt_setint(sopt, so->so_rcv.sb_lowat); |
1982 | break; |
1983 | |
1984 | #ifdef COMPAT_50 |
1985 | case SO_OSNDTIMEO: |
1986 | case SO_ORCVTIMEO: { |
1987 | struct timeval50 otv; |
1988 | |
1989 | optval = (opt == SO_OSNDTIMEO ? |
1990 | so->so_snd.sb_timeo : so->so_rcv.sb_timeo); |
1991 | |
1992 | otv.tv_sec = optval / hz; |
1993 | otv.tv_usec = (optval % hz) * tick; |
1994 | |
1995 | error = sockopt_set(sopt, &otv, sizeof(otv)); |
1996 | break; |
1997 | } |
1998 | #endif /* COMPAT_50 */ |
1999 | |
2000 | case SO_SNDTIMEO: |
2001 | case SO_RCVTIMEO: |
2002 | optval = (opt == SO_SNDTIMEO ? |
2003 | so->so_snd.sb_timeo : so->so_rcv.sb_timeo); |
2004 | |
2005 | tv.tv_sec = optval / hz; |
2006 | tv.tv_usec = (optval % hz) * tick; |
2007 | |
2008 | error = sockopt_set(sopt, &tv, sizeof(tv)); |
2009 | break; |
2010 | |
2011 | case SO_OVERFLOWED: |
2012 | error = sockopt_setint(sopt, so->so_rcv.sb_overflowed); |
2013 | break; |
2014 | |
2015 | default: |
2016 | error = ENOPROTOOPT; |
2017 | break; |
2018 | } |
2019 | |
2020 | return (error); |
2021 | } |
2022 | |
2023 | int |
2024 | sogetopt(struct socket *so, struct sockopt *sopt) |
2025 | { |
2026 | int error; |
2027 | |
2028 | solock(so); |
2029 | if (sopt->sopt_level != SOL_SOCKET) { |
2030 | if (so->so_proto && so->so_proto->pr_ctloutput) { |
2031 | error = ((*so->so_proto->pr_ctloutput) |
2032 | (PRCO_GETOPT, so, sopt)); |
2033 | } else |
2034 | error = (ENOPROTOOPT); |
2035 | } else { |
2036 | error = sogetopt1(so, sopt); |
2037 | } |
2038 | sounlock(so); |
2039 | return (error); |
2040 | } |
2041 | |
2042 | /* |
2043 | * alloc sockopt data buffer buffer |
2044 | * - will be released at destroy |
2045 | */ |
2046 | static int |
2047 | sockopt_alloc(struct sockopt *sopt, size_t len, km_flag_t kmflag) |
2048 | { |
2049 | |
2050 | KASSERT(sopt->sopt_size == 0); |
2051 | |
2052 | if (len > sizeof(sopt->sopt_buf)) { |
2053 | sopt->sopt_data = kmem_zalloc(len, kmflag); |
2054 | if (sopt->sopt_data == NULL) |
2055 | return ENOMEM; |
2056 | } else |
2057 | sopt->sopt_data = sopt->sopt_buf; |
2058 | |
2059 | sopt->sopt_size = len; |
2060 | return 0; |
2061 | } |
2062 | |
2063 | /* |
2064 | * initialise sockopt storage |
2065 | * - MAY sleep during allocation |
2066 | */ |
2067 | void |
2068 | sockopt_init(struct sockopt *sopt, int level, int name, size_t size) |
2069 | { |
2070 | |
2071 | memset(sopt, 0, sizeof(*sopt)); |
2072 | |
2073 | sopt->sopt_level = level; |
2074 | sopt->sopt_name = name; |
2075 | (void)sockopt_alloc(sopt, size, KM_SLEEP); |
2076 | } |
2077 | |
2078 | /* |
2079 | * destroy sockopt storage |
2080 | * - will release any held memory references |
2081 | */ |
2082 | void |
2083 | sockopt_destroy(struct sockopt *sopt) |
2084 | { |
2085 | |
2086 | if (sopt->sopt_data != sopt->sopt_buf) |
2087 | kmem_free(sopt->sopt_data, sopt->sopt_size); |
2088 | |
2089 | memset(sopt, 0, sizeof(*sopt)); |
2090 | } |
2091 | |
2092 | /* |
2093 | * set sockopt value |
2094 | * - value is copied into sockopt |
2095 | * - memory is allocated when necessary, will not sleep |
2096 | */ |
2097 | int |
2098 | sockopt_set(struct sockopt *sopt, const void *buf, size_t len) |
2099 | { |
2100 | int error; |
2101 | |
2102 | if (sopt->sopt_size == 0) { |
2103 | error = sockopt_alloc(sopt, len, KM_NOSLEEP); |
2104 | if (error) |
2105 | return error; |
2106 | } |
2107 | |
2108 | KASSERT(sopt->sopt_size == len); |
2109 | memcpy(sopt->sopt_data, buf, len); |
2110 | return 0; |
2111 | } |
2112 | |
2113 | /* |
2114 | * common case of set sockopt integer value |
2115 | */ |
2116 | int |
2117 | sockopt_setint(struct sockopt *sopt, int val) |
2118 | { |
2119 | |
2120 | return sockopt_set(sopt, &val, sizeof(int)); |
2121 | } |
2122 | |
2123 | /* |
2124 | * get sockopt value |
2125 | * - correct size must be given |
2126 | */ |
2127 | int |
2128 | sockopt_get(const struct sockopt *sopt, void *buf, size_t len) |
2129 | { |
2130 | |
2131 | if (sopt->sopt_size != len) |
2132 | return EINVAL; |
2133 | |
2134 | memcpy(buf, sopt->sopt_data, len); |
2135 | return 0; |
2136 | } |
2137 | |
2138 | /* |
2139 | * common case of get sockopt integer value |
2140 | */ |
2141 | int |
2142 | sockopt_getint(const struct sockopt *sopt, int *valp) |
2143 | { |
2144 | |
2145 | return sockopt_get(sopt, valp, sizeof(int)); |
2146 | } |
2147 | |
2148 | /* |
2149 | * set sockopt value from mbuf |
2150 | * - ONLY for legacy code |
2151 | * - mbuf is released by sockopt |
2152 | * - will not sleep |
2153 | */ |
2154 | int |
2155 | sockopt_setmbuf(struct sockopt *sopt, struct mbuf *m) |
2156 | { |
2157 | size_t len; |
2158 | int error; |
2159 | |
2160 | len = m_length(m); |
2161 | |
2162 | if (sopt->sopt_size == 0) { |
2163 | error = sockopt_alloc(sopt, len, KM_NOSLEEP); |
2164 | if (error) |
2165 | return error; |
2166 | } |
2167 | |
2168 | KASSERT(sopt->sopt_size == len); |
2169 | m_copydata(m, 0, len, sopt->sopt_data); |
2170 | m_freem(m); |
2171 | |
2172 | return 0; |
2173 | } |
2174 | |
2175 | /* |
2176 | * get sockopt value into mbuf |
2177 | * - ONLY for legacy code |
2178 | * - mbuf to be released by the caller |
2179 | * - will not sleep |
2180 | */ |
2181 | struct mbuf * |
2182 | sockopt_getmbuf(const struct sockopt *sopt) |
2183 | { |
2184 | struct mbuf *m; |
2185 | |
2186 | if (sopt->sopt_size > MCLBYTES) |
2187 | return NULL; |
2188 | |
2189 | m = m_get(M_DONTWAIT, MT_SOOPTS); |
2190 | if (m == NULL) |
2191 | return NULL; |
2192 | |
2193 | if (sopt->sopt_size > MLEN) { |
2194 | MCLGET(m, M_DONTWAIT); |
2195 | if ((m->m_flags & M_EXT) == 0) { |
2196 | m_free(m); |
2197 | return NULL; |
2198 | } |
2199 | } |
2200 | |
2201 | memcpy(mtod(m, void *), sopt->sopt_data, sopt->sopt_size); |
2202 | m->m_len = sopt->sopt_size; |
2203 | |
2204 | return m; |
2205 | } |
2206 | |
2207 | void |
2208 | sohasoutofband(struct socket *so) |
2209 | { |
2210 | |
2211 | fownsignal(so->so_pgid, SIGURG, POLL_PRI, POLLPRI|POLLRDBAND, so); |
2212 | selnotify(&so->so_rcv.sb_sel, POLLPRI | POLLRDBAND, NOTE_SUBMIT); |
2213 | } |
2214 | |
2215 | static void |
2216 | filt_sordetach(struct knote *kn) |
2217 | { |
2218 | struct socket *so; |
2219 | |
2220 | so = ((file_t *)kn->kn_obj)->f_socket; |
2221 | solock(so); |
2222 | SLIST_REMOVE(&so->so_rcv.sb_sel.sel_klist, kn, knote, kn_selnext); |
2223 | if (SLIST_EMPTY(&so->so_rcv.sb_sel.sel_klist)) |
2224 | so->so_rcv.sb_flags &= ~SB_KNOTE; |
2225 | sounlock(so); |
2226 | } |
2227 | |
2228 | /*ARGSUSED*/ |
2229 | static int |
2230 | filt_soread(struct knote *kn, long hint) |
2231 | { |
2232 | struct socket *so; |
2233 | int rv; |
2234 | |
2235 | so = ((file_t *)kn->kn_obj)->f_socket; |
2236 | if (hint != NOTE_SUBMIT) |
2237 | solock(so); |
2238 | kn->kn_data = so->so_rcv.sb_cc; |
2239 | if (so->so_state & SS_CANTRCVMORE) { |
2240 | kn->kn_flags |= EV_EOF; |
2241 | kn->kn_fflags = so->so_error; |
2242 | rv = 1; |
2243 | } else if (so->so_error) /* temporary udp error */ |
2244 | rv = 1; |
2245 | else if (kn->kn_sfflags & NOTE_LOWAT) |
2246 | rv = (kn->kn_data >= kn->kn_sdata); |
2247 | else |
2248 | rv = (kn->kn_data >= so->so_rcv.sb_lowat); |
2249 | if (hint != NOTE_SUBMIT) |
2250 | sounlock(so); |
2251 | return rv; |
2252 | } |
2253 | |
2254 | static void |
2255 | filt_sowdetach(struct knote *kn) |
2256 | { |
2257 | struct socket *so; |
2258 | |
2259 | so = ((file_t *)kn->kn_obj)->f_socket; |
2260 | solock(so); |
2261 | SLIST_REMOVE(&so->so_snd.sb_sel.sel_klist, kn, knote, kn_selnext); |
2262 | if (SLIST_EMPTY(&so->so_snd.sb_sel.sel_klist)) |
2263 | so->so_snd.sb_flags &= ~SB_KNOTE; |
2264 | sounlock(so); |
2265 | } |
2266 | |
2267 | /*ARGSUSED*/ |
2268 | static int |
2269 | filt_sowrite(struct knote *kn, long hint) |
2270 | { |
2271 | struct socket *so; |
2272 | int rv; |
2273 | |
2274 | so = ((file_t *)kn->kn_obj)->f_socket; |
2275 | if (hint != NOTE_SUBMIT) |
2276 | solock(so); |
2277 | kn->kn_data = sbspace(&so->so_snd); |
2278 | if (so->so_state & SS_CANTSENDMORE) { |
2279 | kn->kn_flags |= EV_EOF; |
2280 | kn->kn_fflags = so->so_error; |
2281 | rv = 1; |
2282 | } else if (so->so_error) /* temporary udp error */ |
2283 | rv = 1; |
2284 | else if (((so->so_state & SS_ISCONNECTED) == 0) && |
2285 | (so->so_proto->pr_flags & PR_CONNREQUIRED)) |
2286 | rv = 0; |
2287 | else if (kn->kn_sfflags & NOTE_LOWAT) |
2288 | rv = (kn->kn_data >= kn->kn_sdata); |
2289 | else |
2290 | rv = (kn->kn_data >= so->so_snd.sb_lowat); |
2291 | if (hint != NOTE_SUBMIT) |
2292 | sounlock(so); |
2293 | return rv; |
2294 | } |
2295 | |
2296 | /*ARGSUSED*/ |
2297 | static int |
2298 | filt_solisten(struct knote *kn, long hint) |
2299 | { |
2300 | struct socket *so; |
2301 | int rv; |
2302 | |
2303 | so = ((file_t *)kn->kn_obj)->f_socket; |
2304 | |
2305 | /* |
2306 | * Set kn_data to number of incoming connections, not |
2307 | * counting partial (incomplete) connections. |
2308 | */ |
2309 | if (hint != NOTE_SUBMIT) |
2310 | solock(so); |
2311 | kn->kn_data = so->so_qlen; |
2312 | rv = (kn->kn_data > 0); |
2313 | if (hint != NOTE_SUBMIT) |
2314 | sounlock(so); |
2315 | return rv; |
2316 | } |
2317 | |
2318 | static const struct filterops solisten_filtops = |
2319 | { 1, NULL, filt_sordetach, filt_solisten }; |
2320 | static const struct filterops soread_filtops = |
2321 | { 1, NULL, filt_sordetach, filt_soread }; |
2322 | static const struct filterops sowrite_filtops = |
2323 | { 1, NULL, filt_sowdetach, filt_sowrite }; |
2324 | |
2325 | int |
2326 | soo_kqfilter(struct file *fp, struct knote *kn) |
2327 | { |
2328 | struct socket *so; |
2329 | struct sockbuf *sb; |
2330 | |
2331 | so = ((file_t *)kn->kn_obj)->f_socket; |
2332 | solock(so); |
2333 | switch (kn->kn_filter) { |
2334 | case EVFILT_READ: |
2335 | if (so->so_options & SO_ACCEPTCONN) |
2336 | kn->kn_fop = &solisten_filtops; |
2337 | else |
2338 | kn->kn_fop = &soread_filtops; |
2339 | sb = &so->so_rcv; |
2340 | break; |
2341 | case EVFILT_WRITE: |
2342 | kn->kn_fop = &sowrite_filtops; |
2343 | sb = &so->so_snd; |
2344 | break; |
2345 | default: |
2346 | sounlock(so); |
2347 | return (EINVAL); |
2348 | } |
2349 | SLIST_INSERT_HEAD(&sb->sb_sel.sel_klist, kn, kn_selnext); |
2350 | sb->sb_flags |= SB_KNOTE; |
2351 | sounlock(so); |
2352 | return (0); |
2353 | } |
2354 | |
2355 | static int |
2356 | sodopoll(struct socket *so, int events) |
2357 | { |
2358 | int revents; |
2359 | |
2360 | revents = 0; |
2361 | |
2362 | if (events & (POLLIN | POLLRDNORM)) |
2363 | if (soreadable(so)) |
2364 | revents |= events & (POLLIN | POLLRDNORM); |
2365 | |
2366 | if (events & (POLLOUT | POLLWRNORM)) |
2367 | if (sowritable(so)) |
2368 | revents |= events & (POLLOUT | POLLWRNORM); |
2369 | |
2370 | if (events & (POLLPRI | POLLRDBAND)) |
2371 | if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) |
2372 | revents |= events & (POLLPRI | POLLRDBAND); |
2373 | |
2374 | return revents; |
2375 | } |
2376 | |
2377 | int |
2378 | sopoll(struct socket *so, int events) |
2379 | { |
2380 | int revents = 0; |
2381 | |
2382 | #ifndef DIAGNOSTIC |
2383 | /* |
2384 | * Do a quick, unlocked check in expectation that the socket |
2385 | * will be ready for I/O. Don't do this check if DIAGNOSTIC, |
2386 | * as the solocked() assertions will fail. |
2387 | */ |
2388 | if ((revents = sodopoll(so, events)) != 0) |
2389 | return revents; |
2390 | #endif |
2391 | |
2392 | solock(so); |
2393 | if ((revents = sodopoll(so, events)) == 0) { |
2394 | if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { |
2395 | selrecord(curlwp, &so->so_rcv.sb_sel); |
2396 | so->so_rcv.sb_flags |= SB_NOTIFY; |
2397 | } |
2398 | |
2399 | if (events & (POLLOUT | POLLWRNORM)) { |
2400 | selrecord(curlwp, &so->so_snd.sb_sel); |
2401 | so->so_snd.sb_flags |= SB_NOTIFY; |
2402 | } |
2403 | } |
2404 | sounlock(so); |
2405 | |
2406 | return revents; |
2407 | } |
2408 | |
2409 | |
2410 | #include <sys/sysctl.h> |
2411 | |
2412 | static int sysctl_kern_somaxkva(SYSCTLFN_PROTO); |
2413 | static int sysctl_kern_sbmax(SYSCTLFN_PROTO); |
2414 | |
2415 | /* |
2416 | * sysctl helper routine for kern.somaxkva. ensures that the given |
2417 | * value is not too small. |
2418 | * (XXX should we maybe make sure it's not too large as well?) |
2419 | */ |
2420 | static int |
2421 | sysctl_kern_somaxkva(SYSCTLFN_ARGS) |
2422 | { |
2423 | int error, new_somaxkva; |
2424 | struct sysctlnode node; |
2425 | |
2426 | new_somaxkva = somaxkva; |
2427 | node = *rnode; |
2428 | node.sysctl_data = &new_somaxkva; |
2429 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
2430 | if (error || newp == NULL) |
2431 | return (error); |
2432 | |
2433 | if (new_somaxkva < (16 * 1024 * 1024)) /* sanity */ |
2434 | return (EINVAL); |
2435 | |
2436 | mutex_enter(&so_pendfree_lock); |
2437 | somaxkva = new_somaxkva; |
2438 | cv_broadcast(&socurkva_cv); |
2439 | mutex_exit(&so_pendfree_lock); |
2440 | |
2441 | return (error); |
2442 | } |
2443 | |
2444 | /* |
2445 | * sysctl helper routine for kern.sbmax. Basically just ensures that |
2446 | * any new value is not too small. |
2447 | */ |
2448 | static int |
2449 | sysctl_kern_sbmax(SYSCTLFN_ARGS) |
2450 | { |
2451 | int error, new_sbmax; |
2452 | struct sysctlnode node; |
2453 | |
2454 | new_sbmax = sb_max; |
2455 | node = *rnode; |
2456 | node.sysctl_data = &new_sbmax; |
2457 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
2458 | if (error || newp == NULL) |
2459 | return (error); |
2460 | |
2461 | KERNEL_LOCK(1, NULL); |
2462 | error = sb_max_set(new_sbmax); |
2463 | KERNEL_UNLOCK_ONE(NULL); |
2464 | |
2465 | return (error); |
2466 | } |
2467 | |
2468 | static void |
2469 | sysctl_kern_socket_setup(void) |
2470 | { |
2471 | |
2472 | KASSERT(socket_sysctllog == NULL); |
2473 | |
2474 | sysctl_createv(&socket_sysctllog, 0, NULL, NULL, |
2475 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
2476 | CTLTYPE_INT, "somaxkva" , |
2477 | SYSCTL_DESCR("Maximum amount of kernel memory to be " |
2478 | "used for socket buffers" ), |
2479 | sysctl_kern_somaxkva, 0, NULL, 0, |
2480 | CTL_KERN, KERN_SOMAXKVA, CTL_EOL); |
2481 | |
2482 | sysctl_createv(&socket_sysctllog, 0, NULL, NULL, |
2483 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
2484 | CTLTYPE_INT, "sbmax" , |
2485 | SYSCTL_DESCR("Maximum socket buffer size" ), |
2486 | sysctl_kern_sbmax, 0, NULL, 0, |
2487 | CTL_KERN, KERN_SBMAX, CTL_EOL); |
2488 | } |
2489 | |