1/* $NetBSD: hd64570.c,v 1.50 2016/06/10 13:27:13 ozaki-r Exp $ */
2
3/*
4 * Copyright (c) 1999 Christian E. Hopps
5 * Copyright (c) 1998 Vixie Enterprises
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of Vixie Enterprises nor the names
18 * of its contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND
22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR
26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * This software has been written for Vixie Enterprises by Michael Graff
36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see
37 * ``http://www.vix.com''.
38 */
39
40/*
41 * TODO:
42 *
43 * o teach the receive logic about errors, and about long frames that
44 * span more than one input buffer. (Right now, receive/transmit is
45 * limited to one descriptor's buffer space, which is MTU + 4 bytes.
46 * This is currently 1504, which is large enough to hold the HDLC
47 * header and the packet itself. Packets which are too long are
48 * silently dropped on transmit and silently dropped on receive.
49 * o write code to handle the msci interrupts, needed only for CD
50 * and CTS changes.
51 * o consider switching back to a "queue tx with DMA active" model which
52 * should help sustain outgoing traffic
53 * o through clever use of bus_dma*() functions, it should be possible
54 * to map the mbuf's data area directly into a descriptor transmit
55 * buffer, removing the need to allocate extra memory. If, however,
56 * we run out of descriptors for this, we will need to then allocate
57 * one large mbuf, copy the fragmented chain into it, and put it onto
58 * a single descriptor.
59 * o use bus_dmamap_sync() with the right offset and lengths, rather
60 * than cheating and always sync'ing the whole region.
61 *
62 * o perhaps allow rx and tx to be in more than one page
63 * if not using DMA. currently the assumption is that
64 * rx uses a page and tx uses a page.
65 */
66
67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.50 2016/06/10 13:27:13 ozaki-r Exp $");
69
70#include "opt_inet.h"
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/device.h>
75#include <sys/mbuf.h>
76#include <sys/socket.h>
77#include <sys/sockio.h>
78#include <sys/kernel.h>
79
80#include <net/if.h>
81#include <net/if_types.h>
82#include <net/netisr.h>
83
84#if defined(INET) || defined(INET6)
85#include <netinet/in.h>
86#include <netinet/in_systm.h>
87#include <netinet/in_var.h>
88#include <netinet/ip.h>
89#ifdef INET6
90#include <netinet6/in6_var.h>
91#endif
92#endif
93
94#include <net/bpf.h>
95
96#include <sys/cpu.h>
97#include <sys/bus.h>
98#include <sys/intr.h>
99
100#include <dev/pci/pcivar.h>
101#include <dev/pci/pcireg.h>
102#include <dev/pci/pcidevs.h>
103
104#include <dev/ic/hd64570reg.h>
105#include <dev/ic/hd64570var.h>
106
107#define SCA_DEBUG_RX 0x0001
108#define SCA_DEBUG_TX 0x0002
109#define SCA_DEBUG_CISCO 0x0004
110#define SCA_DEBUG_DMA 0x0008
111#define SCA_DEBUG_RXPKT 0x0010
112#define SCA_DEBUG_TXPKT 0x0020
113#define SCA_DEBUG_INTR 0x0040
114#define SCA_DEBUG_CLOCK 0x0080
115
116#if 0
117#define SCA_DEBUG_LEVEL ( 0xFFFF )
118#else
119#define SCA_DEBUG_LEVEL 0
120#endif
121
122u_int32_t sca_debug = SCA_DEBUG_LEVEL;
123
124#if SCA_DEBUG_LEVEL > 0
125#define SCA_DPRINTF(l, x) do { \
126 if ((l) & sca_debug) \
127 printf x;\
128 } while (0)
129#else
130#define SCA_DPRINTF(l, x)
131#endif
132
133#if 0
134#define SCA_USE_FASTQ /* use a split queue, one for fast traffic */
135#endif
136
137static inline void msci_write_1(sca_port_t *, u_int, u_int8_t);
138static inline u_int8_t msci_read_1(sca_port_t *, u_int);
139
140static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t);
141static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t);
142static inline u_int8_t dmac_read_1(sca_port_t *, u_int);
143static inline u_int16_t dmac_read_2(sca_port_t *, u_int);
144
145static void sca_msci_init(struct sca_softc *, sca_port_t *);
146static void sca_dmac_init(struct sca_softc *, sca_port_t *);
147static void sca_dmac_rxinit(sca_port_t *);
148
149static int sca_dmac_intr(sca_port_t *, u_int8_t);
150static int sca_msci_intr(sca_port_t *, u_int8_t);
151
152static void sca_get_packets(sca_port_t *);
153static int sca_frame_avail(sca_port_t *);
154static void sca_frame_process(sca_port_t *);
155static void sca_frame_read_done(sca_port_t *);
156
157static void sca_port_starttx(sca_port_t *);
158
159static void sca_port_up(sca_port_t *);
160static void sca_port_down(sca_port_t *);
161
162static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *,
163 const struct rtentry *);
164static int sca_ioctl(struct ifnet *, u_long, void *);
165static void sca_start(struct ifnet *);
166static void sca_watchdog(struct ifnet *);
167
168static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int);
169
170#if SCA_DEBUG_LEVEL > 0
171static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *);
172#endif
173
174
175#define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg)
176#define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg)
177#define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val)
178#define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val)
179
180#define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask)
181
182static inline void
183msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
184{
185 sca_write_1(scp->sca, scp->msci_off + reg, val);
186}
187
188static inline u_int8_t
189msci_read_1(sca_port_t *scp, u_int reg)
190{
191 return sca_read_1(scp->sca, scp->msci_off + reg);
192}
193
194static inline void
195dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val)
196{
197 sca_write_1(scp->sca, scp->dmac_off + reg, val);
198}
199
200static inline void
201dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val)
202{
203 sca_write_2(scp->sca, scp->dmac_off + reg, val);
204}
205
206static inline u_int8_t
207dmac_read_1(sca_port_t *scp, u_int reg)
208{
209 return sca_read_1(scp->sca, scp->dmac_off + reg);
210}
211
212static inline u_int16_t
213dmac_read_2(sca_port_t *scp, u_int reg)
214{
215 return sca_read_2(scp->sca, scp->dmac_off + reg);
216}
217
218#if SCA_DEBUG_LEVEL > 0
219/*
220 * read the chain pointer
221 */
222static inline u_int16_t
223sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp)
224{
225 if (sc->sc_usedma)
226 return ((dp)->sd_chainp);
227 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
228 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp)));
229}
230#endif
231
232/*
233 * write the chain pointer
234 */
235static inline void
236sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp)
237{
238 if (sc->sc_usedma)
239 (dp)->sd_chainp = cp;
240 else
241 bus_space_write_2(sc->scu_memt, sc->scu_memh,
242 sca_page_addr(sc, dp)
243 + offsetof(struct sca_desc, sd_chainp), cp);
244}
245
246#if SCA_DEBUG_LEVEL > 0
247/*
248 * read the buffer pointer
249 */
250static inline u_int32_t
251sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp)
252{
253 u_int32_t address;
254
255 if (sc->sc_usedma)
256 address = dp->sd_bufp | dp->sd_hbufp << 16;
257 else {
258 address = bus_space_read_2(sc->scu_memt, sc->scu_memh,
259 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp));
260 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh,
261 sca_page_addr(sc, dp)
262 + offsetof(struct sca_desc, sd_hbufp)) << 16;
263 }
264 return (address);
265}
266#endif
267
268/*
269 * write the buffer pointer
270 */
271static inline void
272sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp)
273{
274 if (sc->sc_usedma) {
275 dp->sd_bufp = bufp & 0xFFFF;
276 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16;
277 } else {
278 bus_space_write_2(sc->scu_memt, sc->scu_memh,
279 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp),
280 bufp & 0xFFFF);
281 bus_space_write_1(sc->scu_memt, sc->scu_memh,
282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp),
283 (bufp & 0x00FF0000) >> 16);
284 }
285}
286
287/*
288 * read the buffer length
289 */
290static inline u_int16_t
291sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp)
292{
293 if (sc->sc_usedma)
294 return ((dp)->sd_buflen);
295 return (bus_space_read_2(sc->scu_memt, sc->scu_memh,
296 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen)));
297}
298
299/*
300 * write the buffer length
301 */
302static inline void
303sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len)
304{
305 if (sc->sc_usedma)
306 (dp)->sd_buflen = len;
307 else
308 bus_space_write_2(sc->scu_memt, sc->scu_memh,
309 sca_page_addr(sc, dp)
310 + offsetof(struct sca_desc, sd_buflen), len);
311}
312
313/*
314 * read the descriptor status
315 */
316static inline u_int8_t
317sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp)
318{
319 if (sc->sc_usedma)
320 return ((dp)->sd_stat);
321 return (bus_space_read_1(sc->scu_memt, sc->scu_memh,
322 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat)));
323}
324
325/*
326 * write the descriptor status
327 */
328static inline void
329sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat)
330{
331 if (sc->sc_usedma)
332 (dp)->sd_stat = stat;
333 else
334 bus_space_write_1(sc->scu_memt, sc->scu_memh,
335 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat),
336 stat);
337}
338
339void
340sca_init(struct sca_softc *sc)
341{
342 /*
343 * Do a little sanity check: check number of ports.
344 */
345 if (sc->sc_numports < 1 || sc->sc_numports > 2)
346 panic("sca can\'t handle more than 2 or less than 1 ports");
347
348 /*
349 * disable DMA and MSCI interrupts
350 */
351 sca_write_1(sc, SCA_DMER, 0);
352 sca_write_1(sc, SCA_IER0, 0);
353 sca_write_1(sc, SCA_IER1, 0);
354 sca_write_1(sc, SCA_IER2, 0);
355
356 /*
357 * configure interrupt system
358 */
359 sca_write_1(sc, SCA_ITCR,
360 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR);
361#if 0
362 /* these are for the intrerrupt ack cycle which we don't use */
363 sca_write_1(sc, SCA_IVR, 0x40);
364 sca_write_1(sc, SCA_IMVR, 0x40);
365#endif
366
367 /*
368 * set wait control register to zero wait states
369 */
370 sca_write_1(sc, SCA_PABR0, 0);
371 sca_write_1(sc, SCA_PABR1, 0);
372 sca_write_1(sc, SCA_WCRL, 0);
373 sca_write_1(sc, SCA_WCRM, 0);
374 sca_write_1(sc, SCA_WCRH, 0);
375
376 /*
377 * disable DMA and reset status
378 */
379 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2);
380
381 /*
382 * disable transmit DMA for all channels
383 */
384 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0);
385 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
386 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0);
387 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT);
388 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0);
389 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
390 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0);
391 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT);
392
393 /*
394 * enable DMA based on channel enable flags for each channel
395 */
396 sca_write_1(sc, SCA_DMER, SCA_DMER_EN);
397
398 /*
399 * Should check to see if the chip is responding, but for now
400 * assume it is.
401 */
402}
403
404/*
405 * initialize the port and attach it to the networking layer
406 */
407void
408sca_port_attach(struct sca_softc *sc, u_int port)
409{
410 struct timeval now;
411 sca_port_t *scp = &sc->sc_ports[port];
412 struct ifnet *ifp;
413 static u_int ntwo_unit = 0;
414
415 scp->sca = sc; /* point back to the parent */
416
417 scp->sp_port = port;
418
419 if (port == 0) {
420 scp->msci_off = SCA_MSCI_OFF_0;
421 scp->dmac_off = SCA_DMAC_OFF_0;
422 if(sc->sc_parent != NULL)
423 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0;
424 else
425 ntwo_unit = 0; /* XXX */
426 } else {
427 scp->msci_off = SCA_MSCI_OFF_1;
428 scp->dmac_off = SCA_DMAC_OFF_1;
429 if(sc->sc_parent != NULL)
430 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1;
431 else
432 ntwo_unit = 1; /* XXX */
433 }
434
435 sca_msci_init(sc, scp);
436 sca_dmac_init(sc, scp);
437
438 /*
439 * attach to the network layer
440 */
441 ifp = &scp->sp_if;
442 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit);
443 ifp->if_softc = scp;
444 ifp->if_mtu = SCA_MTU;
445 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
446 ifp->if_type = IFT_PTPSERIAL;
447 ifp->if_hdrlen = HDLC_HDRLEN;
448 ifp->if_ioctl = sca_ioctl;
449 ifp->if_output = sca_output;
450 ifp->if_watchdog = sca_watchdog;
451 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
452 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */
453#ifdef SCA_USE_FASTQ
454 scp->fastq.ifq_maxlen = IFQ_MAXLEN;
455#endif
456 IFQ_SET_READY(&ifp->if_snd);
457 if_attach(ifp);
458 if_alloc_sadl(ifp);
459 bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN);
460
461 if (sc->sc_parent == NULL)
462 printf("%s: port %d\n", ifp->if_xname, port);
463 else
464 printf("%s at %s port %d\n",
465 ifp->if_xname, device_xname(sc->sc_parent), port);
466
467 /*
468 * reset the last seen times on the cisco keepalive protocol
469 */
470 getmicrotime(&now);
471 scp->cka_lasttx = now.tv_usec;
472 scp->cka_lastrx = 0;
473}
474
475#if 0
476/*
477 * returns log2(div), sets 'tmc' for the required freq 'hz'
478 */
479static u_int8_t
480sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp)
481{
482 u_int32_t tmc, div;
483 u_int32_t clock;
484
485 /* clock hz = (chipclock / tmc) / 2^(div); */
486 /*
487 * TD == tmc * 2^(n)
488 *
489 * note:
490 * 1 <= TD <= 256 TD is inc of 1
491 * 2 <= TD <= 512 TD is inc of 2
492 * 4 <= TD <= 1024 TD is inc of 4
493 * ...
494 * 512 <= TD <= 256*512 TD is inc of 512
495 *
496 * so note there are overlaps. We lose prec
497 * as div increases so we wish to minize div.
498 *
499 * basically we want to do
500 *
501 * tmc = chip / hz, but have tmc <= 256
502 */
503
504 /* assume system clock is 9.8304MHz or 9830400Hz */
505 clock = clock = 9830400 >> 1;
506
507 /* round down */
508 div = 0;
509 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) {
510 clock >>= 1;
511 div++;
512 }
513 if (clock / tmc > hz)
514 tmc++;
515 if (!tmc)
516 tmc = 1;
517
518 if (div > SCA_RXS_DIV_512) {
519 /* set to maximums */
520 div = SCA_RXS_DIV_512;
521 tmc = 0;
522 }
523
524 *tmcp = (tmc & 0xFF); /* 0 == 256 */
525 return (div & 0xFF);
526}
527#endif
528
529/*
530 * initialize the port's MSCI
531 */
532static void
533sca_msci_init(struct sca_softc *sc, sca_port_t *scp)
534{
535 /* reset the channel */
536 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET);
537
538 msci_write_1(scp, SCA_MD00,
539 ( SCA_MD0_CRC_1
540 | SCA_MD0_CRC_CCITT
541 | SCA_MD0_CRC_ENABLE
542 | SCA_MD0_MODE_HDLC));
543#if 0
544 /* immediately send receive reset so the above takes */
545 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
546#endif
547
548 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK);
549 msci_write_1(scp, SCA_MD20,
550 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ));
551
552 /* be safe and do it again */
553 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
554
555 /* setup underrun and idle control, and initial RTS state */
556 msci_write_1(scp, SCA_CTL0,
557 (SCA_CTL_IDLC_PATTERN
558 | SCA_CTL_UDRNC_AFTER_FCS
559 | SCA_CTL_RTS_LOW));
560
561 /* reset the transmitter */
562 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
563
564 /*
565 * set the clock sources
566 */
567 msci_write_1(scp, SCA_RXS0, scp->sp_rxs);
568 msci_write_1(scp, SCA_TXS0, scp->sp_txs);
569 msci_write_1(scp, SCA_TMC0, scp->sp_tmc);
570
571 /* set external clock generate as requested */
572 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock);
573
574 /*
575 * XXX don't pay attention to CTS or CD changes right now. I can't
576 * simulate one, and the transmitter will try to transmit even if
577 * CD isn't there anyway, so nothing bad SHOULD happen.
578 */
579#if 0
580 msci_write_1(scp, SCA_IE00, 0);
581 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */
582#else
583 /* this would deliver transmitter underrun to ST1/ISR1 */
584 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN);
585 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT);
586#endif
587 msci_write_1(scp, SCA_IE20, 0);
588
589 msci_write_1(scp, SCA_FIE0, 0);
590
591 msci_write_1(scp, SCA_SA00, 0);
592 msci_write_1(scp, SCA_SA10, 0);
593
594 msci_write_1(scp, SCA_IDL0, 0x7e);
595
596 msci_write_1(scp, SCA_RRC0, 0x0e);
597 /* msci_write_1(scp, SCA_TRC00, 0x10); */
598 /*
599 * the correct values here are important for avoiding underruns
600 * for any value less than or equal to TRC0 txrdy is activated
601 * which will start the dmac transfer to the fifo.
602 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA.
603 *
604 * thus if we are using a very fast clock that empties the fifo
605 * quickly, delays in the dmac starting to fill the fifo can
606 * lead to underruns so we want a fairly full fifo to still
607 * cause the dmac to start. for cards with on board ram this
608 * has no effect on system performance. For cards that DMA
609 * to/from system memory it will cause more, shorter,
610 * bus accesses rather than fewer longer ones.
611 */
612 msci_write_1(scp, SCA_TRC00, 0x00);
613 msci_write_1(scp, SCA_TRC10, 0x1f);
614}
615
616/*
617 * Take the memory for the port and construct two circular linked lists of
618 * descriptors (one tx, one rx) and set the pointers in these descriptors
619 * to point to the buffer space for this port.
620 */
621static void
622sca_dmac_init(struct sca_softc *sc, sca_port_t *scp)
623{
624 sca_desc_t *desc;
625 u_int32_t desc_p;
626 u_int32_t buf_p;
627 int i;
628
629 if (sc->sc_usedma)
630 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize,
631 BUS_DMASYNC_PREWRITE);
632 else {
633 /*
634 * XXX assumes that all tx desc and bufs in same page
635 */
636 sc->scu_page_on(sc);
637 sc->scu_set_page(sc, scp->sp_txdesc_p);
638 }
639
640 desc = scp->sp_txdesc;
641 desc_p = scp->sp_txdesc_p;
642 buf_p = scp->sp_txbuf_p;
643 scp->sp_txcur = 0;
644 scp->sp_txinuse = 0;
645
646#ifdef DEBUG
647 /* make sure that we won't wrap */
648 if ((desc_p & 0xffff0000) !=
649 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000))
650 panic("sca: tx descriptors cross architecural boundary");
651 if ((buf_p & 0xff000000) !=
652 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000))
653 panic("sca: tx buffers cross architecural boundary");
654#endif
655
656 for (i = 0 ; i < scp->sp_ntxdesc ; i++) {
657 /*
658 * desc_p points to the physcial address of the NEXT desc
659 */
660 desc_p += sizeof(sca_desc_t);
661
662 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
663 sca_desc_write_bufp(sc, desc, buf_p);
664 sca_desc_write_buflen(sc, desc, SCA_BSIZE);
665 sca_desc_write_stat(sc, desc, 0);
666
667 desc++; /* point to the next descriptor */
668 buf_p += SCA_BSIZE;
669 }
670
671 /*
672 * "heal" the circular list by making the last entry point to the
673 * first.
674 */
675 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff);
676
677 /*
678 * Now, initialize the transmit DMA logic
679 *
680 * CPB == chain pointer base address
681 */
682 dmac_write_1(scp, SCA_DSR1, 0);
683 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT);
684 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF);
685 /* XXX1
686 dmac_write_1(scp, SCA_DIR1,
687 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
688 */
689 dmac_write_1(scp, SCA_DIR1,
690 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF));
691 dmac_write_1(scp, SCA_CPB1,
692 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16));
693
694 /*
695 * now, do the same thing for receive descriptors
696 *
697 * XXX assumes that all rx desc and bufs in same page
698 */
699 if (!sc->sc_usedma)
700 sc->scu_set_page(sc, scp->sp_rxdesc_p);
701
702 desc = scp->sp_rxdesc;
703 desc_p = scp->sp_rxdesc_p;
704 buf_p = scp->sp_rxbuf_p;
705
706#ifdef DEBUG
707 /* make sure that we won't wrap */
708 if ((desc_p & 0xffff0000) !=
709 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000))
710 panic("sca: rx descriptors cross architecural boundary");
711 if ((buf_p & 0xff000000) !=
712 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000))
713 panic("sca: rx buffers cross architecural boundary");
714#endif
715
716 for (i = 0 ; i < scp->sp_nrxdesc; i++) {
717 /*
718 * desc_p points to the physcial address of the NEXT desc
719 */
720 desc_p += sizeof(sca_desc_t);
721
722 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff);
723 sca_desc_write_bufp(sc, desc, buf_p);
724 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */
725 sca_desc_write_buflen(sc, desc, 0);
726 sca_desc_write_stat(sc, desc, 0);
727
728 desc++; /* point to the next descriptor */
729 buf_p += SCA_BSIZE;
730 }
731
732 /*
733 * "heal" the circular list by making the last entry point to the
734 * first.
735 */
736 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff);
737
738 sca_dmac_rxinit(scp);
739
740 if (sc->sc_usedma)
741 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
742 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE);
743 else
744 sc->scu_page_off(sc);
745}
746
747/*
748 * reset and reinitialize the receive DMA logic
749 */
750static void
751sca_dmac_rxinit(sca_port_t *scp)
752{
753 /*
754 * ... and the receive DMA logic ...
755 */
756 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */
757 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT);
758
759 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF);
760 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE);
761
762 /* reset descriptors to initial state */
763 scp->sp_rxstart = 0;
764 scp->sp_rxend = scp->sp_nrxdesc - 1;
765
766 /*
767 * CPB == chain pointer base
768 * CDA == current descriptor address
769 * EDA == error descriptor address (overwrite position)
770 * because cda can't be eda when starting we always
771 * have a single buffer gap between cda and eda
772 */
773 dmac_write_1(scp, SCA_CPB0,
774 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16));
775 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff));
776 dmac_write_2(scp, SCA_EDAL0, (u_int16_t)
777 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend)));
778
779 /*
780 * enable receiver DMA
781 */
782 dmac_write_1(scp, SCA_DIR0,
783 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF));
784 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE);
785}
786
787/*
788 * Queue the packet for our start routine to transmit
789 */
790static int
791sca_output(
792 struct ifnet *ifp,
793 struct mbuf *m,
794 const struct sockaddr *dst,
795 const struct rtentry *rt0)
796{
797 struct hdlc_header *hdlc;
798 struct ifqueue *ifq = NULL;
799 int s, error, len;
800 short mflags;
801
802 error = 0;
803
804 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
805 error = ENETDOWN;
806 goto bad;
807 }
808
809 /*
810 * If the queueing discipline needs packet classification,
811 * do it before prepending link headers.
812 */
813 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family);
814
815 /*
816 * determine address family, and priority for this packet
817 */
818 switch (dst->sa_family) {
819#ifdef INET
820 case AF_INET:
821#ifdef SCA_USE_FASTQ
822 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY)
823 == IPTOS_LOWDELAY)
824 ifq = &((sca_port_t *)ifp->if_softc)->fastq;
825#endif
826 /*
827 * Add cisco serial line header. If there is no
828 * space in the first mbuf, allocate another.
829 */
830 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
831 if (m == 0)
832 return (ENOBUFS);
833 hdlc = mtod(m, struct hdlc_header *);
834 hdlc->h_proto = htons(HDLC_PROTOCOL_IP);
835 break;
836#endif
837#ifdef INET6
838 case AF_INET6:
839 /*
840 * Add cisco serial line header. If there is no
841 * space in the first mbuf, allocate another.
842 */
843 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT);
844 if (m == 0)
845 return (ENOBUFS);
846 hdlc = mtod(m, struct hdlc_header *);
847 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6);
848 break;
849#endif
850 default:
851 printf("%s: address family %d unsupported\n",
852 ifp->if_xname, dst->sa_family);
853 error = EAFNOSUPPORT;
854 goto bad;
855 }
856
857 /* finish */
858 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0)
859 hdlc->h_addr = CISCO_MULTICAST;
860 else
861 hdlc->h_addr = CISCO_UNICAST;
862 hdlc->h_resv = 0;
863
864 /*
865 * queue the packet. If interactive, use the fast queue.
866 */
867 mflags = m->m_flags;
868 len = m->m_pkthdr.len;
869 s = splnet();
870 if (ifq != NULL) {
871 if (IF_QFULL(ifq)) {
872 IF_DROP(ifq);
873 m_freem(m);
874 error = ENOBUFS;
875 } else
876 IF_ENQUEUE(ifq, m);
877 } else
878 IFQ_ENQUEUE(&ifp->if_snd, m, error);
879 if (error != 0) {
880 splx(s);
881 ifp->if_oerrors++;
882 ifp->if_collisions++;
883 return (error);
884 }
885 ifp->if_obytes += len;
886 if (mflags & M_MCAST)
887 ifp->if_omcasts++;
888
889 sca_start(ifp);
890 splx(s);
891
892 return (error);
893
894 bad:
895 if (m)
896 m_freem(m);
897 return (error);
898}
899
900static int
901sca_ioctl(struct ifnet *ifp, u_long cmd, void *data)
902{
903 struct ifreq *ifr;
904 struct ifaddr *ifa;
905 int error;
906 int s;
907
908 s = splnet();
909
910 ifr = (struct ifreq *)data;
911 ifa = (struct ifaddr *)data;
912 error = 0;
913
914 switch (cmd) {
915 case SIOCINITIFADDR:
916 switch(ifa->ifa_addr->sa_family) {
917#ifdef INET
918 case AF_INET:
919#endif
920#ifdef INET6
921 case AF_INET6:
922#endif
923#if defined(INET) || defined(INET6)
924 ifp->if_flags |= IFF_UP;
925 sca_port_up(ifp->if_softc);
926 break;
927#endif
928 default:
929 error = EAFNOSUPPORT;
930 break;
931 }
932 break;
933
934 case SIOCSIFDSTADDR:
935#ifdef INET
936 if (ifa->ifa_addr->sa_family == AF_INET)
937 break;
938#endif
939#ifdef INET6
940 if (ifa->ifa_addr->sa_family == AF_INET6)
941 break;
942#endif
943 error = EAFNOSUPPORT;
944 break;
945
946 case SIOCADDMULTI:
947 case SIOCDELMULTI:
948 /* XXX need multicast group management code */
949 if (ifr == 0) {
950 error = EAFNOSUPPORT; /* XXX */
951 break;
952 }
953 switch (ifreq_getaddr(cmd, ifr)->sa_family) {
954#ifdef INET
955 case AF_INET:
956 break;
957#endif
958#ifdef INET6
959 case AF_INET6:
960 break;
961#endif
962 default:
963 error = EAFNOSUPPORT;
964 break;
965 }
966 break;
967
968 case SIOCSIFFLAGS:
969 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
970 break;
971 if (ifr->ifr_flags & IFF_UP) {
972 ifp->if_flags |= IFF_UP;
973 sca_port_up(ifp->if_softc);
974 } else {
975 ifp->if_flags &= ~IFF_UP;
976 sca_port_down(ifp->if_softc);
977 }
978
979 break;
980
981 default:
982 error = ifioctl_common(ifp, cmd, data);
983 }
984
985 splx(s);
986 return error;
987}
988
989/*
990 * start packet transmission on the interface
991 *
992 * MUST BE CALLED AT splnet()
993 */
994static void
995sca_start(struct ifnet *ifp)
996{
997 sca_port_t *scp = ifp->if_softc;
998 struct sca_softc *sc = scp->sca;
999 struct mbuf *m, *mb_head;
1000 sca_desc_t *desc;
1001 u_int8_t *buf, stat;
1002 u_int32_t buf_p;
1003 int nexttx;
1004 int trigger_xmit;
1005 u_int len;
1006
1007 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n"));
1008
1009 /*
1010 * can't queue when we are full or transmitter is busy
1011 */
1012#ifdef oldcode
1013 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1))
1014 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1015 return;
1016#else
1017 if (scp->sp_txinuse
1018 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE))
1019 return;
1020#endif
1021 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse));
1022
1023 /*
1024 * XXX assume that all tx desc and bufs in same page
1025 */
1026 if (sc->sc_usedma)
1027 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1028 0, sc->scu_allocsize,
1029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1030 else {
1031 sc->scu_page_on(sc);
1032 sc->scu_set_page(sc, scp->sp_txdesc_p);
1033 }
1034
1035 trigger_xmit = 0;
1036
1037 txloop:
1038 IF_DEQUEUE(&scp->linkq, mb_head);
1039 if (mb_head == NULL)
1040#ifdef SCA_USE_FASTQ
1041 IF_DEQUEUE(&scp->fastq, mb_head);
1042 if (mb_head == NULL)
1043#endif
1044 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1045 if (mb_head == NULL)
1046 goto start_xmit;
1047
1048 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n"));
1049#ifdef oldcode
1050 if (scp->txinuse != 0) {
1051 /* Kill EOT interrupts on the previous descriptor. */
1052 desc = &scp->sp_txdesc[scp->txcur];
1053 stat = sca_desc_read_stat(sc, desc);
1054 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT);
1055
1056 /* Figure out what the next free descriptor is. */
1057 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1058 } else
1059 nexttx = 0;
1060#endif /* oldcode */
1061
1062 if (scp->sp_txinuse)
1063 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1064 else
1065 nexttx = 0;
1066
1067 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx));
1068
1069 buf = scp->sp_txbuf + SCA_BSIZE * nexttx;
1070 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx;
1071
1072 /* XXX hoping we can delay the desc write till after we don't drop. */
1073 desc = &scp->sp_txdesc[nexttx];
1074
1075 /* XXX isn't this set already?? */
1076 sca_desc_write_bufp(sc, desc, buf_p);
1077 len = 0;
1078
1079 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p));
1080
1081#if 0 /* uncomment this for a core in cc1 */
1082X
1083#endif
1084 /*
1085 * Run through the chain, copying data into the descriptor as we
1086 * go. If it won't fit in one transmission block, drop the packet.
1087 * No, this isn't nice, but most of the time it _will_ fit.
1088 */
1089 for (m = mb_head ; m != NULL ; m = m->m_next) {
1090 if (m->m_len != 0) {
1091 len += m->m_len;
1092 if (len > SCA_BSIZE) {
1093 m_freem(mb_head);
1094 goto txloop;
1095 }
1096 SCA_DPRINTF(SCA_DEBUG_TX,
1097 ("TX: about to mbuf len %d\n", m->m_len));
1098
1099 if (sc->sc_usedma)
1100 memcpy(buf, mtod(m, u_int8_t *), m->m_len);
1101 else
1102 bus_space_write_region_1(sc->scu_memt,
1103 sc->scu_memh, sca_page_addr(sc, buf_p),
1104 mtod(m, u_int8_t *), m->m_len);
1105 buf += m->m_len;
1106 buf_p += m->m_len;
1107 }
1108 }
1109
1110 /* set the buffer, the length, and mark end of frame and end of xfer */
1111 sca_desc_write_buflen(sc, desc, len);
1112 sca_desc_write_stat(sc, desc, SCA_DESC_EOM);
1113
1114 ifp->if_opackets++;
1115
1116 /*
1117 * Pass packet to bpf if there is a listener.
1118 */
1119 bpf_mtap(ifp, mb_head);
1120
1121 m_freem(mb_head);
1122
1123 scp->sp_txcur = nexttx;
1124 scp->sp_txinuse++;
1125 trigger_xmit = 1;
1126
1127 SCA_DPRINTF(SCA_DEBUG_TX,
1128 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur));
1129
1130 /*
1131 * XXX so didn't this used to limit us to 1?! - multi may be untested
1132 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard
1133 * to find bug
1134 */
1135#ifdef oldcode
1136 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1))
1137#endif
1138 if (scp->sp_txinuse < scp->sp_ntxdesc)
1139 goto txloop;
1140
1141 start_xmit:
1142 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit));
1143
1144 if (trigger_xmit != 0) {
1145 /* set EOT on final descriptor */
1146 desc = &scp->sp_txdesc[scp->sp_txcur];
1147 stat = sca_desc_read_stat(sc, desc);
1148 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT);
1149 }
1150
1151 if (sc->sc_usedma)
1152 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0,
1153 sc->scu_allocsize,
1154 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1155
1156 if (trigger_xmit != 0)
1157 sca_port_starttx(scp);
1158
1159 if (!sc->sc_usedma)
1160 sc->scu_page_off(sc);
1161}
1162
1163static void
1164sca_watchdog(struct ifnet *ifp)
1165{
1166}
1167
1168int
1169sca_hardintr(struct sca_softc *sc)
1170{
1171 u_int8_t isr0, isr1, isr2;
1172 int ret;
1173
1174 ret = 0; /* non-zero means we processed at least one interrupt */
1175
1176 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n"));
1177
1178 while (1) {
1179 /*
1180 * read SCA interrupts
1181 */
1182 isr0 = sca_read_1(sc, SCA_ISR0);
1183 isr1 = sca_read_1(sc, SCA_ISR1);
1184 isr2 = sca_read_1(sc, SCA_ISR2);
1185
1186 if (isr0 == 0 && isr1 == 0 && isr2 == 0)
1187 break;
1188
1189 SCA_DPRINTF(SCA_DEBUG_INTR,
1190 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n",
1191 isr0, isr1, isr2));
1192
1193 /*
1194 * check DMAC interrupt
1195 */
1196 if (isr1 & 0x0f)
1197 ret += sca_dmac_intr(&sc->sc_ports[0],
1198 isr1 & 0x0f);
1199
1200 if (isr1 & 0xf0)
1201 ret += sca_dmac_intr(&sc->sc_ports[1],
1202 (isr1 & 0xf0) >> 4);
1203
1204 /*
1205 * mcsi intterupts
1206 */
1207 if (isr0 & 0x0f)
1208 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f);
1209
1210 if (isr0 & 0xf0)
1211 ret += sca_msci_intr(&sc->sc_ports[1],
1212 (isr0 & 0xf0) >> 4);
1213
1214#if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */
1215 if (isr2)
1216 ret += sca_timer_intr(sc, isr2);
1217#endif
1218 }
1219
1220 return (ret);
1221}
1222
1223static int
1224sca_dmac_intr(sca_port_t *scp, u_int8_t isr)
1225{
1226 u_int8_t dsr;
1227 int ret;
1228
1229 ret = 0;
1230
1231 /*
1232 * Check transmit channel
1233 */
1234 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) {
1235 SCA_DPRINTF(SCA_DEBUG_INTR,
1236 ("TX INTERRUPT port %d\n", scp->sp_port));
1237
1238 dsr = 1;
1239 while (dsr != 0) {
1240 ret++;
1241 /*
1242 * reset interrupt
1243 */
1244 dsr = dmac_read_1(scp, SCA_DSR1);
1245 dmac_write_1(scp, SCA_DSR1,
1246 dsr | SCA_DSR_DEWD);
1247
1248 /*
1249 * filter out the bits we don't care about
1250 */
1251 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT);
1252 if (dsr == 0)
1253 break;
1254
1255 /*
1256 * check for counter overflow
1257 */
1258 if (dsr & SCA_DSR_COF) {
1259 printf("%s: TXDMA counter overflow\n",
1260 scp->sp_if.if_xname);
1261
1262 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1263 scp->sp_txcur = 0;
1264 scp->sp_txinuse = 0;
1265 }
1266
1267 /*
1268 * check for buffer overflow
1269 */
1270 if (dsr & SCA_DSR_BOF) {
1271 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n",
1272 scp->sp_if.if_xname,
1273 dmac_read_2(scp, SCA_CDAL1),
1274 dmac_read_2(scp, SCA_EDAL1),
1275 dmac_read_1(scp, SCA_CPB1));
1276
1277 /*
1278 * Yikes. Arrange for a full
1279 * transmitter restart.
1280 */
1281 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1282 scp->sp_txcur = 0;
1283 scp->sp_txinuse = 0;
1284 }
1285
1286 /*
1287 * check for end of transfer, which is not
1288 * an error. It means that all data queued
1289 * was transmitted, and we mark ourself as
1290 * not in use and stop the watchdog timer.
1291 */
1292 if (dsr & SCA_DSR_EOT) {
1293 SCA_DPRINTF(SCA_DEBUG_TX,
1294 ("Transmit completed. cda %x eda %x dsr %x\n",
1295 dmac_read_2(scp, SCA_CDAL1),
1296 dmac_read_2(scp, SCA_EDAL1),
1297 dsr));
1298
1299 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1300 scp->sp_txcur = 0;
1301 scp->sp_txinuse = 0;
1302
1303 /*
1304 * check for more packets
1305 */
1306 sca_start(&scp->sp_if);
1307 }
1308 }
1309 }
1310 /*
1311 * receive channel check
1312 */
1313 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) {
1314 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n",
1315 (scp == &scp->sca->sc_ports[0] ? 0 : 1)));
1316
1317 dsr = 1;
1318 while (dsr != 0) {
1319 ret++;
1320
1321 dsr = dmac_read_1(scp, SCA_DSR0);
1322 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD);
1323
1324 /*
1325 * filter out the bits we don't care about
1326 */
1327 dsr &= (SCA_DSR_EOM | SCA_DSR_COF
1328 | SCA_DSR_BOF | SCA_DSR_EOT);
1329 if (dsr == 0)
1330 break;
1331
1332 /*
1333 * End of frame
1334 */
1335 if (dsr & SCA_DSR_EOM) {
1336 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n"));
1337
1338 sca_get_packets(scp);
1339 }
1340
1341 /*
1342 * check for counter overflow
1343 */
1344 if (dsr & SCA_DSR_COF) {
1345 printf("%s: RXDMA counter overflow\n",
1346 scp->sp_if.if_xname);
1347
1348 sca_dmac_rxinit(scp);
1349 }
1350
1351 /*
1352 * check for end of transfer, which means we
1353 * ran out of descriptors to receive into.
1354 * This means the line is much faster than
1355 * we can handle.
1356 */
1357 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) {
1358 printf("%s: RXDMA buffer overflow\n",
1359 scp->sp_if.if_xname);
1360
1361 sca_dmac_rxinit(scp);
1362 }
1363 }
1364 }
1365
1366 return ret;
1367}
1368
1369static int
1370sca_msci_intr(sca_port_t *scp, u_int8_t isr)
1371{
1372 u_int8_t st1, trc0;
1373
1374 /* get and clear the specific interrupt -- should act on it :)*/
1375 if ((st1 = msci_read_1(scp, SCA_ST10))) {
1376 /* clear the interrupt */
1377 msci_write_1(scp, SCA_ST10, st1);
1378
1379 if (st1 & SCA_ST1_UDRN) {
1380 /* underrun -- try to increase ready control */
1381 trc0 = msci_read_1(scp, SCA_TRC00);
1382 if (trc0 == 0x1f)
1383 printf("TX: underrun - fifo depth maxed\n");
1384 else {
1385 if ((trc0 += 2) > 0x1f)
1386 trc0 = 0x1f;
1387 SCA_DPRINTF(SCA_DEBUG_TX,
1388 ("TX: udrn - incr fifo to %d\n", trc0));
1389 msci_write_1(scp, SCA_TRC00, trc0);
1390 }
1391 }
1392 }
1393 return (0);
1394}
1395
1396static void
1397sca_get_packets(sca_port_t *scp)
1398{
1399 struct sca_softc *sc;
1400
1401 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n"));
1402
1403 sc = scp->sca;
1404 if (sc->sc_usedma)
1405 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1406 0, sc->scu_allocsize,
1407 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1408 else {
1409 /*
1410 * XXX this code is unable to deal with rx stuff
1411 * in more than 1 page
1412 */
1413 sc->scu_page_on(sc);
1414 sc->scu_set_page(sc, scp->sp_rxdesc_p);
1415 }
1416
1417 /* process as many frames as are available */
1418 while (sca_frame_avail(scp)) {
1419 sca_frame_process(scp);
1420 sca_frame_read_done(scp);
1421 }
1422
1423 if (sc->sc_usedma)
1424 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam,
1425 0, sc->scu_allocsize,
1426 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1427 else
1428 sc->scu_page_off(sc);
1429}
1430
1431/*
1432 * Starting with the first descriptor we wanted to read into, up to but
1433 * not including the current SCA read descriptor, look for a packet.
1434 *
1435 * must be called at splnet()
1436 */
1437static int
1438sca_frame_avail(sca_port_t *scp)
1439{
1440 u_int16_t cda;
1441 u_int32_t desc_p; /* physical address (lower 16 bits) */
1442 sca_desc_t *desc;
1443 u_int8_t rxstat;
1444 int cdaidx, toolong;
1445
1446 /*
1447 * Read the current descriptor from the SCA.
1448 */
1449 cda = dmac_read_2(scp, SCA_CDAL0);
1450
1451 /*
1452 * calculate the index of the current descriptor
1453 */
1454 desc_p = (scp->sp_rxdesc_p & 0xFFFF);
1455 desc_p = cda - desc_p;
1456 cdaidx = desc_p / sizeof(sca_desc_t);
1457
1458 SCA_DPRINTF(SCA_DEBUG_RX,
1459 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n",
1460 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart));
1461
1462 /* note confusion */
1463 if (cdaidx >= scp->sp_nrxdesc)
1464 panic("current descriptor index out of range");
1465
1466 /* see if we have a valid frame available */
1467 toolong = 0;
1468 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) {
1469 /*
1470 * We might have a valid descriptor. Set up a pointer
1471 * to the kva address for it so we can more easily examine
1472 * the contents.
1473 */
1474 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1475 rxstat = sca_desc_read_stat(scp->sca, desc);
1476
1477 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n",
1478 scp->sp_port, scp->sp_rxstart, rxstat));
1479
1480 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n",
1481 scp->sp_port, sca_desc_read_buflen(scp->sca, desc)));
1482
1483 /*
1484 * check for errors
1485 */
1486 if (rxstat & SCA_DESC_ERRORS) {
1487 /*
1488 * consider an error condition the end
1489 * of a frame
1490 */
1491 scp->sp_if.if_ierrors++;
1492 toolong = 0;
1493 continue;
1494 }
1495
1496 /*
1497 * if we aren't skipping overlong frames
1498 * we are done, otherwise reset and look for
1499 * another good frame
1500 */
1501 if (rxstat & SCA_DESC_EOM) {
1502 if (!toolong)
1503 return (1);
1504 toolong = 0;
1505 } else if (!toolong) {
1506 /*
1507 * we currently don't deal with frames
1508 * larger than a single buffer (fixed MTU)
1509 */
1510 scp->sp_if.if_ierrors++;
1511 toolong = 1;
1512 }
1513 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n",
1514 scp->sp_rxstart));
1515 }
1516
1517 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n"));
1518 return 0;
1519}
1520
1521/*
1522 * Pass the packet up to the kernel if it is a packet we want to pay
1523 * attention to.
1524 *
1525 * MUST BE CALLED AT splnet()
1526 */
1527static void
1528sca_frame_process(sca_port_t *scp)
1529{
1530 pktqueue_t *pktq = NULL;
1531 struct ifqueue *ifq = NULL;
1532 struct hdlc_header *hdlc;
1533 struct cisco_pkt *cisco;
1534 sca_desc_t *desc;
1535 struct mbuf *m;
1536 u_int8_t *bufp;
1537 u_int16_t len;
1538 u_int32_t t;
1539 int isr = 0;
1540
1541 t = time_uptime * 1000;
1542 desc = &scp->sp_rxdesc[scp->sp_rxstart];
1543 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart;
1544 len = sca_desc_read_buflen(scp->sca, desc);
1545
1546 SCA_DPRINTF(SCA_DEBUG_RX,
1547 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc,
1548 (bus_addr_t)bufp, len));
1549
1550#if SCA_DEBUG_LEVEL > 0
1551 if (sca_debug & SCA_DEBUG_RXPKT)
1552 sca_frame_print(scp, desc, bufp);
1553#endif
1554 /*
1555 * skip packets that are too short
1556 */
1557 if (len < sizeof(struct hdlc_header)) {
1558 scp->sp_if.if_ierrors++;
1559 return;
1560 }
1561
1562 m = sca_mbuf_alloc(scp->sca, bufp, len);
1563 if (m == NULL) {
1564 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n"));
1565 return;
1566 }
1567
1568 /*
1569 * read and then strip off the HDLC information
1570 */
1571 m = m_pullup(m, sizeof(struct hdlc_header));
1572 if (m == NULL) {
1573 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1574 return;
1575 }
1576
1577 bpf_mtap(&scp->sp_if, m);
1578
1579 scp->sp_if.if_ipackets++;
1580
1581 hdlc = mtod(m, struct hdlc_header *);
1582 switch (ntohs(hdlc->h_proto)) {
1583#ifdef INET
1584 case HDLC_PROTOCOL_IP:
1585 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1586 m_set_rcvif(m, &scp->sp_if);
1587 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1588 m->m_data += sizeof(struct hdlc_header);
1589 m->m_len -= sizeof(struct hdlc_header);
1590 pktq = ip_pktq;
1591 break;
1592#endif /* INET */
1593#ifdef INET6
1594 case HDLC_PROTOCOL_IPV6:
1595 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n"));
1596 m_set_rcvif(m, &scp->sp_if);
1597 m->m_pkthdr.len -= sizeof(struct hdlc_header);
1598 m->m_data += sizeof(struct hdlc_header);
1599 m->m_len -= sizeof(struct hdlc_header);
1600 pktq = ip6_pktq;
1601 break;
1602#endif /* INET6 */
1603 case CISCO_KEEPALIVE:
1604 SCA_DPRINTF(SCA_DEBUG_CISCO,
1605 ("Received CISCO keepalive packet\n"));
1606
1607 if (len < CISCO_PKT_LEN) {
1608 SCA_DPRINTF(SCA_DEBUG_CISCO,
1609 ("short CISCO packet %d, wanted %d\n",
1610 len, CISCO_PKT_LEN));
1611 scp->sp_if.if_ierrors++;
1612 goto dropit;
1613 }
1614
1615 m = m_pullup(m, sizeof(struct cisco_pkt));
1616 if (m == NULL) {
1617 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n"));
1618 return;
1619 }
1620
1621 cisco = (struct cisco_pkt *)
1622 (mtod(m, u_int8_t *) + HDLC_HDRLEN);
1623 m_set_rcvif(m, &scp->sp_if);
1624
1625 switch (ntohl(cisco->type)) {
1626 case CISCO_ADDR_REQ:
1627 printf("Got CISCO addr_req, ignoring\n");
1628 scp->sp_if.if_ierrors++;
1629 goto dropit;
1630
1631 case CISCO_ADDR_REPLY:
1632 printf("Got CISCO addr_reply, ignoring\n");
1633 scp->sp_if.if_ierrors++;
1634 goto dropit;
1635
1636 case CISCO_KEEPALIVE_REQ:
1637
1638 SCA_DPRINTF(SCA_DEBUG_CISCO,
1639 ("Received KA, mseq %d,"
1640 " yseq %d, rel 0x%04x, t0"
1641 " %04x, t1 %04x\n",
1642 ntohl(cisco->par1), ntohl(cisco->par2),
1643 ntohs(cisco->rel), ntohs(cisco->time0),
1644 ntohs(cisco->time1)));
1645
1646 scp->cka_lastrx = ntohl(cisco->par1);
1647 scp->cka_lasttx++;
1648
1649 /*
1650 * schedule the transmit right here.
1651 */
1652 cisco->par2 = cisco->par1;
1653 cisco->par1 = htonl(scp->cka_lasttx);
1654 cisco->time0 = htons((u_int16_t)(t >> 16));
1655 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff));
1656
1657 ifq = &scp->linkq;
1658 if (IF_QFULL(ifq)) {
1659 IF_DROP(ifq);
1660 goto dropit;
1661 }
1662 IF_ENQUEUE(ifq, m);
1663
1664 sca_start(&scp->sp_if);
1665
1666 /* since start may have reset this fix */
1667 if (!scp->sca->sc_usedma) {
1668 scp->sca->scu_set_page(scp->sca,
1669 scp->sp_rxdesc_p);
1670 scp->sca->scu_page_on(scp->sca);
1671 }
1672 return;
1673 default:
1674 SCA_DPRINTF(SCA_DEBUG_CISCO,
1675 ("Unknown CISCO keepalive protocol 0x%04x\n",
1676 ntohl(cisco->type)));
1677
1678 scp->sp_if.if_noproto++;
1679 goto dropit;
1680 }
1681 return;
1682 default:
1683 SCA_DPRINTF(SCA_DEBUG_RX,
1684 ("Unknown/unexpected ethertype 0x%04x\n",
1685 ntohs(hdlc->h_proto)));
1686 scp->sp_if.if_noproto++;
1687 goto dropit;
1688 }
1689
1690 /* Queue the packet */
1691 if (__predict_true(pktq)) {
1692 if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
1693 scp->sp_if.if_iqdrops++;
1694 goto dropit;
1695 }
1696 return;
1697 }
1698 if (!IF_QFULL(ifq)) {
1699 IF_ENQUEUE(ifq, m);
1700 schednetisr(isr);
1701 } else {
1702 IF_DROP(ifq);
1703 scp->sp_if.if_iqdrops++;
1704 goto dropit;
1705 }
1706 return;
1707dropit:
1708 if (m)
1709 m_freem(m);
1710 return;
1711}
1712
1713#if SCA_DEBUG_LEVEL > 0
1714/*
1715 * do a hex dump of the packet received into descriptor "desc" with
1716 * data buffer "p"
1717 */
1718static void
1719sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p)
1720{
1721 int i;
1722 int nothing_yet = 1;
1723 struct sca_softc *sc;
1724 u_int len;
1725
1726 sc = scp->sca;
1727 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n",
1728 desc,
1729 sca_desc_read_chainp(sc, desc),
1730 sca_desc_read_bufp(sc, desc),
1731 sca_desc_read_stat(sc, desc),
1732 (len = sca_desc_read_buflen(sc, desc)));
1733
1734 for (i = 0 ; i < len && i < 256; i++) {
1735 if (nothing_yet == 1 &&
1736 (sc->sc_usedma ? *p
1737 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1738 sca_page_addr(sc, p))) == 0) {
1739 p++;
1740 continue;
1741 }
1742 nothing_yet = 0;
1743 if (i % 16 == 0)
1744 printf("\n");
1745 printf("%02x ",
1746 (sc->sc_usedma ? *p
1747 : bus_space_read_1(sc->scu_memt, sc->scu_memh,
1748 sca_page_addr(sc, p))));
1749 p++;
1750 }
1751
1752 if (i % 16 != 1)
1753 printf("\n");
1754}
1755#endif
1756
1757/*
1758 * adjust things because we have just read the current starting
1759 * frame
1760 *
1761 * must be called at splnet()
1762 */
1763static void
1764sca_frame_read_done(sca_port_t *scp)
1765{
1766 u_int16_t edesc_p;
1767
1768 /* update where our indicies are */
1769 scp->sp_rxend = scp->sp_rxstart;
1770 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc;
1771
1772 /* update the error [end] descriptor */
1773 edesc_p = (u_int16_t)scp->sp_rxdesc_p +
1774 (sizeof(sca_desc_t) * scp->sp_rxend);
1775 dmac_write_2(scp, SCA_EDAL0, edesc_p);
1776}
1777
1778/*
1779 * set a port to the "up" state
1780 */
1781static void
1782sca_port_up(sca_port_t *scp)
1783{
1784 struct sca_softc *sc = scp->sca;
1785 struct timeval now;
1786#if 0
1787 u_int8_t ier0, ier1;
1788#endif
1789
1790 /*
1791 * reset things
1792 */
1793#if 0
1794 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET);
1795 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET);
1796#endif
1797 /*
1798 * clear in-use flag
1799 */
1800 scp->sp_if.if_flags &= ~IFF_OACTIVE;
1801 scp->sp_if.if_flags |= IFF_RUNNING;
1802
1803 /*
1804 * raise DTR
1805 */
1806 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1);
1807
1808 /*
1809 * raise RTS
1810 */
1811 msci_write_1(scp, SCA_CTL0,
1812 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1813 | SCA_CTL_RTS_HIGH);
1814
1815#if 0
1816 /*
1817 * enable interrupts (no timer IER2)
1818 */
1819 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1820 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1821 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1822 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1823 if (scp->sp_port == 1) {
1824 ier0 <<= 4;
1825 ier1 <<= 4;
1826 }
1827 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0);
1828 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1);
1829#else
1830 if (scp->sp_port == 0) {
1831 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f);
1832 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f);
1833 } else {
1834 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0);
1835 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0);
1836 }
1837#endif
1838
1839 /*
1840 * enable transmit and receive
1841 */
1842 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE);
1843 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE);
1844
1845 /*
1846 * reset internal state
1847 */
1848 scp->sp_txinuse = 0;
1849 scp->sp_txcur = 0;
1850 getmicrotime(&now);
1851 scp->cka_lasttx = now.tv_usec;
1852 scp->cka_lastrx = 0;
1853}
1854
1855/*
1856 * set a port to the "down" state
1857 */
1858static void
1859sca_port_down(sca_port_t *scp)
1860{
1861 struct sca_softc *sc = scp->sca;
1862#if 0
1863 u_int8_t ier0, ier1;
1864#endif
1865
1866 /*
1867 * lower DTR
1868 */
1869 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0);
1870
1871 /*
1872 * lower RTS
1873 */
1874 msci_write_1(scp, SCA_CTL0,
1875 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK)
1876 | SCA_CTL_RTS_LOW);
1877
1878 /*
1879 * disable interrupts
1880 */
1881#if 0
1882 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0
1883 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0;
1884 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B
1885 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B;
1886 if (scp->sp_port == 1) {
1887 ier0 <<= 4;
1888 ier1 <<= 4;
1889 }
1890 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0);
1891 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1);
1892#else
1893 if (scp->sp_port == 0) {
1894 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0);
1895 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0);
1896 } else {
1897 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f);
1898 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f);
1899 }
1900#endif
1901
1902 /*
1903 * disable transmit and receive
1904 */
1905 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE);
1906 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE);
1907
1908 /*
1909 * no, we're not in use anymore
1910 */
1911 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING);
1912}
1913
1914/*
1915 * disable all DMA and interrupts for all ports at once.
1916 */
1917void
1918sca_shutdown(struct sca_softc *sca)
1919{
1920 /*
1921 * disable DMA and interrupts
1922 */
1923 sca_write_1(sca, SCA_DMER, 0);
1924 sca_write_1(sca, SCA_IER0, 0);
1925 sca_write_1(sca, SCA_IER1, 0);
1926}
1927
1928/*
1929 * If there are packets to transmit, start the transmit DMA logic.
1930 */
1931static void
1932sca_port_starttx(sca_port_t *scp)
1933{
1934 u_int32_t startdesc_p, enddesc_p;
1935 int enddesc;
1936
1937 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n"));
1938
1939 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE)
1940 || scp->sp_txinuse == 0)
1941 return;
1942
1943 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n"));
1944
1945 scp->sp_if.if_flags |= IFF_OACTIVE;
1946
1947 /*
1948 * We have something to do, since we have at least one packet
1949 * waiting, and we are not already marked as active.
1950 */
1951 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc;
1952 startdesc_p = scp->sp_txdesc_p;
1953 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc;
1954
1955 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n",
1956 startdesc_p, enddesc_p));
1957
1958 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff));
1959 dmac_write_2(scp, SCA_CDAL1,
1960 (u_int16_t)(startdesc_p & 0x0000ffff));
1961
1962 /*
1963 * enable the DMA
1964 */
1965 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE);
1966}
1967
1968/*
1969 * allocate an mbuf at least long enough to hold "len" bytes.
1970 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf,
1971 * otherwise let the caller handle copying the data in.
1972 */
1973static struct mbuf *
1974sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len)
1975{
1976 struct mbuf *m;
1977
1978 /*
1979 * allocate an mbuf and copy the important bits of data
1980 * into it. If the packet won't fit in the header,
1981 * allocate a cluster for it and store it there.
1982 */
1983 MGETHDR(m, M_DONTWAIT, MT_DATA);
1984 if (m == NULL)
1985 return NULL;
1986 if (len > MHLEN) {
1987 if (len > MCLBYTES) {
1988 m_freem(m);
1989 return NULL;
1990 }
1991 MCLGET(m, M_DONTWAIT);
1992 if ((m->m_flags & M_EXT) == 0) {
1993 m_freem(m);
1994 return NULL;
1995 }
1996 }
1997 if (p != NULL) {
1998 /* XXX do we need to sync here? */
1999 if (sc->sc_usedma)
2000 memcpy(mtod(m, void *), p, len);
2001 else
2002 bus_space_read_region_1(sc->scu_memt, sc->scu_memh,
2003 sca_page_addr(sc, p), mtod(m, u_int8_t *), len);
2004 }
2005 m->m_len = len;
2006 m->m_pkthdr.len = len;
2007
2008 return (m);
2009}
2010
2011/*
2012 * get the base clock
2013 */
2014void
2015sca_get_base_clock(struct sca_softc *sc)
2016{
2017 struct timeval btv, ctv, dtv;
2018 u_int64_t bcnt;
2019 u_int32_t cnt;
2020 u_int16_t subcnt;
2021
2022 /* disable the timer, set prescale to 0 */
2023 sca_write_1(sc, SCA_TCSR0, 0);
2024 sca_write_1(sc, SCA_TEPR0, 0);
2025
2026 /* reset the counter */
2027 (void)sca_read_1(sc, SCA_TCSR0);
2028 subcnt = sca_read_2(sc, SCA_TCNTL0);
2029
2030 /* count to max */
2031 sca_write_2(sc, SCA_TCONRL0, 0xffff);
2032
2033 cnt = 0;
2034 microtime(&btv);
2035 /* start the timer -- no interrupt enable */
2036 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME);
2037 for (;;) {
2038 microtime(&ctv);
2039
2040 /* end around 3/4 of a second */
2041 timersub(&ctv, &btv, &dtv);
2042 if (dtv.tv_usec >= 750000)
2043 break;
2044
2045 /* spin */
2046 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF))
2047 ;
2048 /* reset the timer */
2049 (void)sca_read_2(sc, SCA_TCNTL0);
2050 cnt++;
2051 }
2052
2053 /* stop the timer */
2054 sca_write_1(sc, SCA_TCSR0, 0);
2055
2056 subcnt = sca_read_2(sc, SCA_TCNTL0);
2057 /* add the slop in and get the total timer ticks */
2058 cnt = (cnt << 16) | subcnt;
2059
2060 /* cnt is 1/8 the actual time */
2061 bcnt = cnt * 8;
2062 /* make it proportional to 3/4 of a second */
2063 bcnt *= (u_int64_t)750000;
2064 bcnt /= (u_int64_t)dtv.tv_usec;
2065 cnt = bcnt;
2066
2067 /* make it Hz */
2068 cnt *= 4;
2069 cnt /= 3;
2070
2071 SCA_DPRINTF(SCA_DEBUG_CLOCK,
2072 ("sca: unadjusted base %lu Hz\n", (u_long)cnt));
2073
2074 /*
2075 * round to the nearest 200 -- this allows for +-3 ticks error
2076 */
2077 sc->sc_baseclock = ((cnt + 100) / 200) * 200;
2078}
2079
2080/*
2081 * print the information about the clock on the ports
2082 */
2083void
2084sca_print_clock_info(struct sca_softc *sc)
2085{
2086 struct sca_port *scp;
2087 u_int32_t mhz, div;
2088 int i;
2089
2090 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent),
2091 sc->sc_baseclock);
2092
2093 /* print the information about the port clock selection */
2094 for (i = 0; i < sc->sc_numports; i++) {
2095 scp = &sc->sc_ports[i];
2096 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256);
2097 div = scp->sp_rxs & SCA_RXS_DIV_MASK;
2098
2099 printf("%s: rx clock: ", scp->sp_if.if_xname);
2100 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) {
2101 case SCA_RXS_CLK_LINE:
2102 printf("line");
2103 break;
2104 case SCA_RXS_CLK_LINE_SN:
2105 printf("line with noise suppression");
2106 break;
2107 case SCA_RXS_CLK_INTERNAL:
2108 printf("internal %d Hz", (mhz >> div));
2109 break;
2110 case SCA_RXS_CLK_ADPLL_OUT:
2111 printf("adpll using internal %d Hz", (mhz >> div));
2112 break;
2113 case SCA_RXS_CLK_ADPLL_IN:
2114 printf("adpll using line clock");
2115 break;
2116 }
2117 printf(" tx clock: ");
2118 div = scp->sp_txs & SCA_TXS_DIV_MASK;
2119 switch (scp->sp_txs & SCA_TXS_CLK_MASK) {
2120 case SCA_TXS_CLK_LINE:
2121 printf("line\n");
2122 break;
2123 case SCA_TXS_CLK_INTERNAL:
2124 printf("internal %d Hz\n", (mhz >> div));
2125 break;
2126 case SCA_TXS_CLK_RXCLK:
2127 printf("rxclock\n");
2128 break;
2129 }
2130 if (scp->sp_eclock)
2131 printf("%s: outputting line clock\n",
2132 scp->sp_if.if_xname);
2133 }
2134}
2135
2136