1/* $NetBSD: smb_iod.c,v 1.40 2012/04/29 20:27:31 dsl Exp $ */
2
3/*
4 * Copyright (c) 2000-2001 Boris Popov
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Boris Popov.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * FreeBSD: src/sys/netsmb/smb_iod.c,v 1.4 2001/12/09 17:48:08 arr Exp
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: smb_iod.c,v 1.40 2012/04/29 20:27:31 dsl Exp $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/proc.h>
43#include <sys/kernel.h>
44#include <sys/kthread.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/unistd.h>
48
49#include <netsmb/smb.h>
50#include <netsmb/smb_conn.h>
51#include <netsmb/smb_rq.h>
52#include <netsmb/smb_tran.h>
53#include <netsmb/smb_trantcp.h>
54
55#define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock))
56#define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock))
57#define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock))
58
59#define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock))
60#define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock))
61#define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock))
62
63#define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags)
64
65MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
66MALLOC_DECLARE(M_SMBIOD);
67
68static int smb_iod_next;
69
70static bool smb_iod_sendall(struct smbiod *iod);
71static int smb_iod_disconnect(struct smbiod *iod);
72static void smb_iod_thread(void *);
73
74static void
75smb_iod_rqprocessed(struct smb_rq *rqp, int error)
76{
77 SMBRQ_SLOCK(rqp);
78 rqp->sr_lerror = error;
79 rqp->sr_rpgen++;
80 rqp->sr_state = SMBRQ_NOTIFIED;
81 wakeup(&rqp->sr_state);
82 if (rqp->sr_timo > 0)
83 callout_stop(&rqp->sr_timo_ch);
84 if (rqp->sr_recvcallback)
85 (*rqp->sr_recvcallback)(rqp->sr_recvarg);
86 SMBRQ_SUNLOCK(rqp);
87}
88
89static void
90smb_iod_rqtimedout(void *arg)
91{
92 smb_iod_rqprocessed((struct smb_rq *)arg, ETIMEDOUT);
93}
94
95static void
96smb_iod_invrq(struct smbiod *iod)
97{
98 struct smb_rq *rqp;
99
100 /*
101 * Invalidate all outstanding requests for this connection
102 */
103 SMB_IOD_RQLOCK(iod);
104 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
105 if (rqp->sr_flags & SMBR_INTERNAL)
106 SMBRQ_SUNLOCK(rqp);
107 rqp->sr_flags |= SMBR_RESTART;
108 smb_iod_rqprocessed(rqp, ENOTCONN);
109 }
110 SMB_IOD_RQUNLOCK(iod);
111}
112
113static void
114smb_iod_closetran(struct smbiod *iod)
115{
116 struct smb_vc *vcp = iod->iod_vc;
117 struct lwp *l = iod->iod_l;
118
119 if (vcp->vc_tdata == NULL)
120 return;
121 SMB_TRAN_DISCONNECT(vcp, l);
122 SMB_TRAN_DONE(vcp, l);
123 vcp->vc_tdata = NULL;
124}
125
126static void
127smb_iod_dead(struct smbiod *iod)
128{
129 iod->iod_state = SMBIOD_ST_DEAD;
130 smb_iod_closetran(iod);
131 smb_iod_invrq(iod);
132}
133
134static int
135smb_iod_connect(struct smbiod *iod)
136{
137 struct smb_vc *vcp = iod->iod_vc;
138 struct lwp *l = iod->iod_l;
139 int error;
140
141 SMBIODEBUG(("%d\n", iod->iod_state));
142 switch(iod->iod_state) {
143 case SMBIOD_ST_VCACTIVE:
144 SMBIODEBUG(("called for already opened connection\n"));
145 return EISCONN;
146 case SMBIOD_ST_DEAD:
147 return ENOTCONN; /* XXX: last error code ? */
148 default:
149 break;
150 }
151 vcp->vc_genid++;
152
153#define ithrow(cmd) \
154 if ((error = cmd)) \
155 goto fail
156
157 ithrow(SMB_TRAN_CREATE(vcp, l));
158 SMBIODEBUG(("tcreate\n"));
159 if (vcp->vc_laddr) {
160 ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, l));
161 }
162 SMBIODEBUG(("tbind\n"));
163 ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, l));
164 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
165 iod->iod_state = SMBIOD_ST_TRANACTIVE;
166 SMBIODEBUG(("tconnect\n"));
167/* vcp->vc_mid = 0;*/
168 ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
169 SMBIODEBUG(("snegotiate\n"));
170 ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
171 iod->iod_state = SMBIOD_ST_VCACTIVE;
172
173#undef ithrow
174
175 SMBIODEBUG(("completed\n"));
176 smb_iod_invrq(iod);
177
178 return (0);
179
180 fail:
181 smb_iod_dead(iod);
182 return (error);
183}
184
185static int
186smb_iod_disconnect(struct smbiod *iod)
187{
188 struct smb_vc *vcp = iod->iod_vc;
189
190 SMBIODEBUG(("\n"));
191 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
192 smb_smb_ssnclose(vcp, &iod->iod_scred);
193 iod->iod_state = SMBIOD_ST_TRANACTIVE;
194 }
195 vcp->vc_smbuid = SMB_UID_UNKNOWN;
196 smb_iod_closetran(iod);
197 iod->iod_state = SMBIOD_ST_NOTCONN;
198 return 0;
199}
200
201static int
202smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
203{
204 int error;
205
206 if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
207 if (iod->iod_state != SMBIOD_ST_DEAD)
208 return ENOTCONN;
209 iod->iod_state = SMBIOD_ST_RECONNECT;
210 error = smb_iod_connect(iod);
211 if (error)
212 return error;
213 }
214 SMBIODEBUG(("tree reconnect\n"));
215 SMBS_ST_LOCK(ssp);
216 ssp->ss_flags |= SMBS_RECONNECTING;
217 SMBS_ST_UNLOCK(ssp);
218 error = smb_smb_treeconnect(ssp, &iod->iod_scred);
219 SMBS_ST_LOCK(ssp);
220 ssp->ss_flags &= ~SMBS_RECONNECTING;
221 SMBS_ST_UNLOCK(ssp);
222 wakeup(&ssp->ss_vcgenid);
223 return error;
224}
225
226static int
227smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
228{
229 struct lwp *l = iod->iod_l;
230 struct smb_vc *vcp = iod->iod_vc;
231 struct smb_share *ssp = rqp->sr_share;
232 struct mbuf *m;
233 int error;
234
235 SMBIODEBUG(("iod_state = %d, rqmid %d\n", iod->iod_state, rqp->sr_mid));
236 switch (iod->iod_state) {
237 case SMBIOD_ST_NOTCONN:
238 smb_iod_rqprocessed(rqp, ENOTCONN);
239 return 0;
240 case SMBIOD_ST_DEAD:
241 iod->iod_state = SMBIOD_ST_RECONNECT;
242 return 0;
243 case SMBIOD_ST_RECONNECT:
244 return 0;
245 default:
246 break;
247 }
248 if (rqp->sr_sendcnt == 0) {
249 u_int16_t tid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
250 u_int16_t rquid = vcp ? vcp->vc_smbuid : 0;
251#ifdef movedtoanotherplace
252 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
253 return 0;
254#endif
255 SMBRQ_PUTLE16(rqp->sr_rqtid, tid);
256 SMBRQ_PUTLE16(rqp->sr_rquid, rquid);
257 mb_fixhdr(&rqp->sr_rq);
258 }
259 if (rqp->sr_sendcnt++ > 5) {
260 rqp->sr_flags |= SMBR_RESTART;
261 smb_iod_rqprocessed(rqp, rqp->sr_lerror);
262 /*
263 * If all attempts to send a request failed, then
264 * something is seriously hosed.
265 */
266 return ENOTCONN;
267 }
268 SMBSDEBUG(("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0));
269 m_dumpm(rqp->sr_rq.mb_top);
270 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
271 error = rqp->sr_lerror = (m) ? SMB_TRAN_SEND(vcp, m, l) : ENOBUFS;
272 if (error == 0) {
273 if (rqp->sr_timo > 0)
274 callout_reset(&rqp->sr_timo_ch, rqp->sr_timo,
275 smb_iod_rqtimedout, rqp);
276
277 if (rqp->sr_flags & SMBR_NOWAIT) {
278 /* caller doesn't want to wait, flag as processed */
279 smb_iod_rqprocessed(rqp, 0);
280 return (0);
281 }
282
283#if 0
284 iod->iod_lastrqsent = ts;
285#endif
286 rqp->sr_flags |= SMBR_SENT;
287 rqp->sr_state = SMBRQ_SENT;
288 return 0;
289 }
290 /*
291 * Check for fatal errors
292 */
293 if (vcp && SMB_TRAN_FATAL(vcp, error)) {
294 /*
295 * No further attempts should be made
296 */
297 return ENOTCONN;
298 }
299 if (smb_rq_intr(rqp))
300 smb_iod_rqprocessed(rqp, EINTR);
301 return 0;
302}
303
304/*
305 * Process incoming packets
306 */
307static void
308smb_iod_recvall(struct smbiod *iod)
309{
310 struct smb_vc *vcp = iod->iod_vc;
311 struct lwp *l = iod->iod_l;
312 struct smb_rq *rqp;
313 struct mbuf *m;
314 u_char *hp;
315 u_short mid;
316 int error;
317
318 switch (iod->iod_state) {
319 case SMBIOD_ST_NOTCONN:
320 case SMBIOD_ST_DEAD:
321 case SMBIOD_ST_RECONNECT:
322 return;
323 default:
324 break;
325 }
326
327 for (;;) {
328 m = NULL;
329 error = SMB_TRAN_RECV(vcp, &m, l);
330 if (error == EWOULDBLOCK)
331 break;
332 if (SMB_TRAN_FATAL(vcp, error)) {
333 smb_iod_dead(iod);
334 break;
335 }
336 if (error)
337 break;
338 KASSERT(m != NULL);
339
340 m = m_pullup(m, SMB_HDRLEN);
341 if (m == NULL)
342 continue; /* wait for a good packet */
343 /*
344 * Now we got an entire and possibly invalid SMB packet.
345 * Be careful while parsing it.
346 */
347 m_dumpm(m);
348 hp = mtod(m, u_char*);
349 if (memcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
350 m_freem(m);
351 continue;
352 }
353 mid = SMB_HDRMID(hp);
354 SMBSDEBUG(("mid %04x\n", (u_int)mid));
355 SMB_IOD_RQLOCK(iod);
356 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
357 if (rqp->sr_mid != mid)
358 continue;
359 SMBRQ_SLOCK(rqp);
360 if (rqp->sr_rp.md_top == NULL) {
361 md_initm(&rqp->sr_rp, m);
362 } else {
363 if (rqp->sr_flags & SMBR_MULTIPACKET) {
364 md_append_record(&rqp->sr_rp, m);
365 } else {
366 SMBRQ_SUNLOCK(rqp);
367 SMBIODEBUG(("duplicate response %d (ignored)\n", mid));
368 break;
369 }
370 }
371 SMBRQ_SUNLOCK(rqp);
372 smb_iod_rqprocessed(rqp, 0);
373 break;
374 }
375 SMB_IOD_RQUNLOCK(iod);
376 if (rqp == NULL) {
377 SMBIODEBUG(("drop resp with mid %d\n", (u_int)mid));
378/* smb_printrqlist(vcp);*/
379 m_freem(m);
380 }
381 }
382 /*
383 * check for interrupts
384 */
385 SMB_IOD_RQLOCK(iod);
386 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
387 if (smb_proc_intr(rqp->sr_cred->scr_l)) {
388 smb_iod_rqprocessed(rqp, EINTR);
389 }
390 }
391 SMB_IOD_RQUNLOCK(iod);
392}
393
394int
395smb_iod_request(struct smbiod *iod, int event, void *ident)
396{
397 struct smbiod_event *evp;
398 int error;
399
400 SMBIODEBUG(("\n"));
401 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
402 evp->ev_type = event;
403 evp->ev_ident = ident;
404 SMB_IOD_EVLOCK(iod);
405 SIMPLEQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
406 if ((event & SMBIOD_EV_SYNC) == 0) {
407 SMB_IOD_EVUNLOCK(iod);
408 smb_iod_wakeup(iod);
409 return 0;
410 }
411 smb_iod_wakeup(iod);
412 mtsleep(evp, PWAIT | PNORELOCK, "smbevw", 0, SMB_IOD_EVLOCKPTR(iod));
413 error = evp->ev_error;
414 free(evp, M_SMBIOD);
415 return error;
416}
417
418/*
419 * Place request in the queue.
420 * Request from smbiod have a high priority.
421 */
422int
423smb_iod_addrq(struct smb_rq *rqp)
424{
425 struct smb_vc *vcp = rqp->sr_vc;
426 struct smbiod *iod = vcp->vc_iod;
427 int error;
428
429 SMBIODEBUG(("\n"));
430 if (rqp->sr_cred->scr_l == iod->iod_l) {
431 rqp->sr_flags |= SMBR_INTERNAL;
432 SMB_IOD_RQLOCK(iod);
433 SIMPLEQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
434 SMB_IOD_RQUNLOCK(iod);
435 for (;;) {
436 if (smb_iod_sendrq(iod, rqp) != 0) {
437 smb_iod_dead(iod);
438 break;
439 }
440 /*
441 * we don't need to lock state field here
442 */
443 if (rqp->sr_state != SMBRQ_NOTSENT)
444 break;
445 tsleep(&iod->iod_flags, PWAIT, "smbsndw", hz);
446 }
447 if (rqp->sr_lerror)
448 smb_iod_removerq(rqp);
449 return rqp->sr_lerror;
450 }
451
452 switch (iod->iod_state) {
453 case SMBIOD_ST_NOTCONN:
454 return ENOTCONN;
455 case SMBIOD_ST_DEAD:
456 error = smb_iod_request(iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
457 if (error)
458 return error;
459 /*
460 * Return error to force the caller reissue the request
461 * using new connection state.
462 */
463 return EXDEV;
464 default:
465 break;
466 }
467
468 SMB_IOD_RQLOCK(iod);
469 for (;;) {
470#ifdef DIAGNOSTIC
471 if (vcp->vc_maxmux == 0)
472 panic("%s: vc maxmum == 0", __func__);
473#endif
474 if (iod->iod_muxcnt < vcp->vc_maxmux)
475 break;
476 iod->iod_muxwant++;
477 /* XXX use interruptible sleep? */
478 mtsleep(&iod->iod_muxwant, PWAIT, "smbmux",
479 0, SMB_IOD_RQLOCKPTR(iod));
480 }
481 iod->iod_muxcnt++;
482 SIMPLEQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
483 SMB_IOD_RQUNLOCK(iod);
484 smb_iod_wakeup(iod);
485 return 0;
486}
487
488int
489smb_iod_removerq(struct smb_rq *rqp)
490{
491 struct smb_vc *vcp = rqp->sr_vc;
492 struct smbiod *iod = vcp->vc_iod;
493
494 SMBIODEBUG(("\n"));
495 if (rqp->sr_flags & SMBR_INTERNAL) {
496 SMB_IOD_RQLOCK(iod);
497 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
498 SMB_IOD_RQUNLOCK(iod);
499 return 0;
500 }
501 SMB_IOD_RQLOCK(iod);
502 while (rqp->sr_flags & SMBR_XLOCK) {
503 rqp->sr_flags |= SMBR_XLOCKWANT;
504 mtsleep(rqp, PWAIT, "smbxrm", 0, SMB_IOD_RQLOCKPTR(iod));
505 }
506 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
507 iod->iod_muxcnt--;
508 if (iod->iod_muxwant) {
509 iod->iod_muxwant--;
510 wakeup(&iod->iod_muxwant);
511 }
512 SMB_IOD_RQUNLOCK(iod);
513 return 0;
514}
515
516int
517smb_iod_waitrq(struct smb_rq *rqp)
518{
519 struct smbiod *iod = rqp->sr_vc->vc_iod;
520 int error;
521
522 SMBIODEBUG(("\n"));
523 if (rqp->sr_flags & SMBR_INTERNAL) {
524 for (;;) {
525 smb_iod_sendall(iod);
526 smb_iod_recvall(iod);
527 if (rqp->sr_rpgen != rqp->sr_rplast)
528 break;
529 tsleep(&iod->iod_flags, PWAIT, "smbirq", hz);
530 }
531 smb_iod_removerq(rqp);
532 return rqp->sr_lerror;
533
534 }
535 SMBRQ_SLOCK(rqp);
536 if (rqp->sr_rpgen == rqp->sr_rplast) {
537 /* XXX interruptible sleep? */
538 mtsleep(&rqp->sr_state, PWAIT, "smbwrq", 0,
539 SMBRQ_SLOCKPTR(rqp));
540 }
541 rqp->sr_rplast++;
542 SMBRQ_SUNLOCK(rqp);
543 error = rqp->sr_lerror;
544 if (rqp->sr_flags & SMBR_MULTIPACKET) {
545 /*
546 * If request should stay in the list, then reinsert it
547 * at the end of queue so other waiters have chance to concur
548 */
549 SMB_IOD_RQLOCK(iod);
550 SIMPLEQ_REMOVE(&iod->iod_rqlist, rqp, smb_rq, sr_link);
551 SIMPLEQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
552 SMB_IOD_RQUNLOCK(iod);
553 } else
554 smb_iod_removerq(rqp);
555 return error;
556}
557
558
559static bool
560smb_iod_sendall(struct smbiod *iod)
561{
562 struct smb_rq *rqp;
563 int herror;
564 bool sentany = false;
565
566 herror = 0;
567 /*
568 * Loop through the list of requests and send them if possible
569 */
570 SMB_IOD_RQLOCK(iod);
571 SIMPLEQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
572 if (__predict_false(rqp->sr_state == SMBRQ_NOTSENT)) {
573 rqp->sr_flags |= SMBR_XLOCK;
574 SMB_IOD_RQUNLOCK(iod);
575 herror = smb_iod_sendrq(iod, rqp);
576 SMB_IOD_RQLOCK(iod);
577 rqp->sr_flags &= ~SMBR_XLOCK;
578 if (rqp->sr_flags & SMBR_XLOCKWANT) {
579 rqp->sr_flags &= ~SMBR_XLOCKWANT;
580 wakeup(rqp);
581 }
582
583 if (__predict_false(herror != 0))
584 break;
585 sentany = true;
586 }
587 }
588 SMB_IOD_RQUNLOCK(iod);
589 if (herror == ENOTCONN)
590 smb_iod_dead(iod);
591
592 return sentany;
593}
594
595/*
596 * "main" function for smbiod daemon
597 */
598static inline void
599smb_iod_main(struct smbiod *iod)
600{
601#if 0
602 struct smb_vc *vcp = iod->iod_vc;
603 struct timespec tsnow;
604#endif
605 struct smbiod_event *evp;
606
607 SMBIODEBUG(("\n"));
608
609 /*
610 * Check all interesting events
611 */
612 for (;;) {
613 SMB_IOD_EVLOCK(iod);
614 evp = SIMPLEQ_FIRST(&iod->iod_evlist);
615 if (evp == NULL) {
616 SMB_IOD_EVUNLOCK(iod);
617 break;
618 }
619 SIMPLEQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
620 evp->ev_type |= SMBIOD_EV_PROCESSING;
621 SMB_IOD_EVUNLOCK(iod);
622 switch (evp->ev_type & SMBIOD_EV_MASK) {
623 case SMBIOD_EV_CONNECT:
624 iod->iod_state = SMBIOD_ST_RECONNECT;
625 evp->ev_error = smb_iod_connect(iod);
626 break;
627 case SMBIOD_EV_DISCONNECT:
628 evp->ev_error = smb_iod_disconnect(iod);
629 break;
630 case SMBIOD_EV_TREECONNECT:
631 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
632 break;
633 case SMBIOD_EV_SHUTDOWN:
634 iod->iod_flags |= SMBIOD_SHUTDOWN;
635 break;
636 case SMBIOD_EV_NEWRQ:
637 break;
638 }
639 if (evp->ev_type & SMBIOD_EV_SYNC) {
640 SMB_IOD_EVLOCK(iod);
641 wakeup(evp);
642 SMB_IOD_EVUNLOCK(iod);
643 } else
644 free(evp, M_SMBIOD);
645 }
646#if 0
647 if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
648 getnanotime(&tsnow);
649 timespecsub(&tsnow, &iod->iod_pingtimo);
650 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
651 smb_smb_echo(vcp, &iod->iod_scred);
652 }
653 }
654#endif
655
656 /*
657 * Do a send/receive cycle once and then as many times
658 * afterwards as we can send out new data. This is to make
659 * sure we got all data sent which might have ended up in the
660 * queue during the receive phase (which might block releasing
661 * the kernel lock).
662 */
663 smb_iod_sendall(iod);
664 smb_iod_recvall(iod);
665 while (smb_iod_sendall(iod)) {
666 smb_iod_recvall(iod);
667 }
668}
669
670void
671smb_iod_thread(void *arg)
672{
673 struct smbiod *iod = arg;
674 int s;
675
676 /*
677 * Here we assume that the thread structure will be the same
678 * for an entire kthread (kproc, to be more precise) life.
679 */
680 KASSERT(iod->iod_l == curlwp);
681 smb_makescred(&iod->iod_scred, iod->iod_l, NULL);
682 s = splnet();
683 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
684 smb_iod_main(iod);
685 if (iod->iod_flags & SMBIOD_SHUTDOWN)
686 break;
687 SMBIODEBUG(("going to sleep\n"));
688 /*
689 * technically wakeup every hz is unnecessary, but keep
690 * this here until smb has been made mpsafe.
691 */
692 tsleep(&iod->iod_flags, PSOCK, "smbidle", hz);
693 }
694 splx(s);
695 kthread_exit(0);
696}
697
698int
699smb_iod_create(struct smb_vc *vcp)
700{
701 struct smbiod *iod;
702 int error;
703
704 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
705 iod->iod_id = smb_iod_next++;
706 iod->iod_state = SMBIOD_ST_NOTCONN;
707 iod->iod_vc = vcp;
708#if 0
709 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
710 microtime(&iod->iod_lastrqsent);
711#endif
712 vcp->vc_iod = iod;
713 smb_sl_init(&iod->iod_rqlock, "smbrql");
714 SIMPLEQ_INIT(&iod->iod_rqlist);
715 smb_sl_init(&iod->iod_evlock, "smbevl");
716 SIMPLEQ_INIT(&iod->iod_evlist);
717 error = kthread_create(PRI_NONE, 0, NULL, smb_iod_thread, iod,
718 &iod->iod_l, "smbiod%d", iod->iod_id);
719 if (error) {
720 SMBIODEBUG(("can't start smbiod: %d", error));
721 free(iod, M_SMBIOD);
722 return error;
723 }
724 return 0;
725}
726
727int
728smb_iod_destroy(struct smbiod *iod)
729{
730 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
731 smb_sl_destroy(&iod->iod_rqlock);
732 smb_sl_destroy(&iod->iod_evlock);
733 free(iod, M_SMBIOD);
734 return 0;
735}
736
737int
738smb_iod_init(void)
739{
740 return 0;
741}
742
743int
744smb_iod_done(void)
745{
746 return 0;
747}
748