1 | /* $NetBSD: hme.c,v 1.94 2016/10/02 14:16:02 christos Exp $ */ |
2 | |
3 | /*- |
4 | * Copyright (c) 1999 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Paul Kranenburg. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ |
31 | |
32 | /* |
33 | * HME Ethernet module driver. |
34 | */ |
35 | |
36 | #include <sys/cdefs.h> |
37 | __KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.94 2016/10/02 14:16:02 christos Exp $" ); |
38 | |
39 | /* #define HMEDEBUG */ |
40 | |
41 | #include "opt_inet.h" |
42 | |
43 | #include <sys/param.h> |
44 | #include <sys/systm.h> |
45 | #include <sys/kernel.h> |
46 | #include <sys/mbuf.h> |
47 | #include <sys/syslog.h> |
48 | #include <sys/socket.h> |
49 | #include <sys/device.h> |
50 | #include <sys/malloc.h> |
51 | #include <sys/ioctl.h> |
52 | #include <sys/errno.h> |
53 | #include <sys/rndsource.h> |
54 | |
55 | #include <net/if.h> |
56 | #include <net/if_dl.h> |
57 | #include <net/if_ether.h> |
58 | #include <net/if_media.h> |
59 | |
60 | #ifdef INET |
61 | #include <net/if_vlanvar.h> |
62 | #include <netinet/in.h> |
63 | #include <netinet/if_inarp.h> |
64 | #include <netinet/in_systm.h> |
65 | #include <netinet/in_var.h> |
66 | #include <netinet/ip.h> |
67 | #include <netinet/tcp.h> |
68 | #include <netinet/udp.h> |
69 | #endif |
70 | |
71 | |
72 | #include <net/bpf.h> |
73 | #include <net/bpfdesc.h> |
74 | |
75 | #include <dev/mii/mii.h> |
76 | #include <dev/mii/miivar.h> |
77 | |
78 | #include <sys/bus.h> |
79 | |
80 | #include <dev/ic/hmereg.h> |
81 | #include <dev/ic/hmevar.h> |
82 | |
83 | static void hme_start(struct ifnet *); |
84 | static void hme_stop(struct ifnet *, int); |
85 | static int hme_ioctl(struct ifnet *, u_long, void *); |
86 | static void hme_tick(void *); |
87 | static void hme_watchdog(struct ifnet *); |
88 | static bool hme_shutdown(device_t, int); |
89 | static int hme_init(struct ifnet *); |
90 | static void hme_meminit(struct hme_softc *); |
91 | static void hme_mifinit(struct hme_softc *); |
92 | static void hme_reset(struct hme_softc *); |
93 | static void hme_chipreset(struct hme_softc *); |
94 | static void hme_setladrf(struct hme_softc *); |
95 | |
96 | /* MII methods & callbacks */ |
97 | static int hme_mii_readreg(device_t, int, int); |
98 | static void hme_mii_writereg(device_t, int, int, int); |
99 | static void hme_mii_statchg(struct ifnet *); |
100 | |
101 | static int hme_mediachange(struct ifnet *); |
102 | |
103 | static struct mbuf *hme_get(struct hme_softc *, int, uint32_t); |
104 | static int hme_put(struct hme_softc *, int, struct mbuf *); |
105 | static void hme_read(struct hme_softc *, int, uint32_t); |
106 | static int hme_eint(struct hme_softc *, u_int); |
107 | static int hme_rint(struct hme_softc *); |
108 | static int hme_tint(struct hme_softc *); |
109 | |
110 | #if 0 |
111 | /* Default buffer copy routines */ |
112 | static void hme_copytobuf_contig(struct hme_softc *, void *, int, int); |
113 | static void hme_copyfrombuf_contig(struct hme_softc *, void *, int, int); |
114 | #endif |
115 | |
116 | void |
117 | hme_config(struct hme_softc *sc) |
118 | { |
119 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
120 | struct mii_data *mii = &sc->sc_mii; |
121 | struct mii_softc *child; |
122 | bus_dma_tag_t dmatag = sc->sc_dmatag; |
123 | bus_dma_segment_t seg; |
124 | bus_size_t size; |
125 | int rseg, error; |
126 | |
127 | /* |
128 | * HME common initialization. |
129 | * |
130 | * hme_softc fields that must be initialized by the front-end: |
131 | * |
132 | * the bus tag: |
133 | * sc_bustag |
134 | * |
135 | * the DMA bus tag: |
136 | * sc_dmatag |
137 | * |
138 | * the bus handles: |
139 | * sc_seb (Shared Ethernet Block registers) |
140 | * sc_erx (Receiver Unit registers) |
141 | * sc_etx (Transmitter Unit registers) |
142 | * sc_mac (MAC registers) |
143 | * sc_mif (Management Interface registers) |
144 | * |
145 | * the maximum bus burst size: |
146 | * sc_burst |
147 | * |
148 | * (notyet:DMA capable memory for the ring descriptors & packet buffers: |
149 | * rb_membase, rb_dmabase) |
150 | * |
151 | * the local Ethernet address: |
152 | * sc_enaddr |
153 | * |
154 | */ |
155 | |
156 | /* Make sure the chip is stopped. */ |
157 | hme_chipreset(sc); |
158 | |
159 | /* |
160 | * Allocate descriptors and buffers |
161 | * XXX - do all this differently.. and more configurably, |
162 | * eg. use things as `dma_load_mbuf()' on transmit, |
163 | * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped |
164 | * all the time) on the receiver side. |
165 | * |
166 | * Note: receive buffers must be 64-byte aligned. |
167 | * Also, apparently, the buffers must extend to a DMA burst |
168 | * boundary beyond the maximum packet size. |
169 | */ |
170 | #define _HME_NDESC 128 |
171 | #define _HME_BUFSZ 1600 |
172 | |
173 | /* Note: the # of descriptors must be a multiple of 16 */ |
174 | sc->sc_rb.rb_ntbuf = _HME_NDESC; |
175 | sc->sc_rb.rb_nrbuf = _HME_NDESC; |
176 | |
177 | /* |
178 | * Allocate DMA capable memory |
179 | * Buffer descriptors must be aligned on a 2048 byte boundary; |
180 | * take this into account when calculating the size. Note that |
181 | * the maximum number of descriptors (256) occupies 2048 bytes, |
182 | * so we allocate that much regardless of _HME_NDESC. |
183 | */ |
184 | size = 2048 + /* TX descriptors */ |
185 | 2048 + /* RX descriptors */ |
186 | sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */ |
187 | sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* RX buffers */ |
188 | |
189 | /* Allocate DMA buffer */ |
190 | if ((error = bus_dmamem_alloc(dmatag, size, |
191 | 2048, 0, |
192 | &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { |
193 | aprint_error_dev(sc->sc_dev, "DMA buffer alloc error %d\n" , |
194 | error); |
195 | return; |
196 | } |
197 | |
198 | /* Map DMA memory in CPU addressable space */ |
199 | if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, |
200 | &sc->sc_rb.rb_membase, |
201 | BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { |
202 | aprint_error_dev(sc->sc_dev, "DMA buffer map error %d\n" , |
203 | error); |
204 | bus_dmamap_unload(dmatag, sc->sc_dmamap); |
205 | bus_dmamem_free(dmatag, &seg, rseg); |
206 | return; |
207 | } |
208 | |
209 | if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, |
210 | BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { |
211 | aprint_error_dev(sc->sc_dev, "DMA map create error %d\n" , |
212 | error); |
213 | return; |
214 | } |
215 | |
216 | /* Load the buffer */ |
217 | if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, |
218 | sc->sc_rb.rb_membase, size, NULL, |
219 | BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { |
220 | aprint_error_dev(sc->sc_dev, "DMA buffer map load error %d\n" , |
221 | error); |
222 | bus_dmamem_free(dmatag, &seg, rseg); |
223 | return; |
224 | } |
225 | sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; |
226 | |
227 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n" , |
228 | ether_sprintf(sc->sc_enaddr)); |
229 | |
230 | /* Initialize ifnet structure. */ |
231 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
232 | ifp->if_softc = sc; |
233 | ifp->if_start = hme_start; |
234 | ifp->if_stop = hme_stop; |
235 | ifp->if_ioctl = hme_ioctl; |
236 | ifp->if_init = hme_init; |
237 | ifp->if_watchdog = hme_watchdog; |
238 | ifp->if_flags = |
239 | IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; |
240 | sc->sc_if_flags = ifp->if_flags; |
241 | ifp->if_capabilities |= |
242 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
243 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
244 | IFQ_SET_READY(&ifp->if_snd); |
245 | |
246 | /* Initialize ifmedia structures and MII info */ |
247 | mii->mii_ifp = ifp; |
248 | mii->mii_readreg = hme_mii_readreg; |
249 | mii->mii_writereg = hme_mii_writereg; |
250 | mii->mii_statchg = hme_mii_statchg; |
251 | |
252 | sc->sc_ethercom.ec_mii = mii; |
253 | ifmedia_init(&mii->mii_media, 0, hme_mediachange, ether_mediastatus); |
254 | |
255 | hme_mifinit(sc); |
256 | |
257 | mii_attach(sc->sc_dev, mii, 0xffffffff, |
258 | MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); |
259 | |
260 | child = LIST_FIRST(&mii->mii_phys); |
261 | if (child == NULL) { |
262 | /* No PHY attached */ |
263 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); |
264 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); |
265 | } else { |
266 | /* |
267 | * Walk along the list of attached MII devices and |
268 | * establish an `MII instance' to `phy number' |
269 | * mapping. We'll use this mapping in media change |
270 | * requests to determine which phy to use to program |
271 | * the MIF configuration register. |
272 | */ |
273 | for (; child != NULL; child = LIST_NEXT(child, mii_list)) { |
274 | /* |
275 | * Note: we support just two PHYs: the built-in |
276 | * internal device and an external on the MII |
277 | * connector. |
278 | */ |
279 | if (child->mii_phy > 1 || child->mii_inst > 1) { |
280 | aprint_error_dev(sc->sc_dev, |
281 | "cannot accommodate MII device %s" |
282 | " at phy %d, instance %d\n" , |
283 | device_xname(child->mii_dev), |
284 | child->mii_phy, child->mii_inst); |
285 | continue; |
286 | } |
287 | |
288 | sc->sc_phys[child->mii_inst] = child->mii_phy; |
289 | } |
290 | |
291 | /* |
292 | * Set the default media to auto negotiation if the phy has |
293 | * the auto negotiation capability. |
294 | * XXX; What to do otherwise? |
295 | */ |
296 | if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0)) |
297 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
298 | /* |
299 | else |
300 | ifmedia_set(&sc->sc_mii.mii_media, sc->sc_defaultmedia); |
301 | */ |
302 | } |
303 | |
304 | /* claim 802.1q capability */ |
305 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
306 | |
307 | /* Attach the interface. */ |
308 | if_attach(ifp); |
309 | ether_ifattach(ifp, sc->sc_enaddr); |
310 | |
311 | if (pmf_device_register1(sc->sc_dev, NULL, NULL, hme_shutdown)) |
312 | pmf_class_network_register(sc->sc_dev, ifp); |
313 | else |
314 | aprint_error_dev(sc->sc_dev, |
315 | "couldn't establish power handler\n" ); |
316 | |
317 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), |
318 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
319 | |
320 | callout_init(&sc->sc_tick_ch, 0); |
321 | } |
322 | |
323 | void |
324 | hme_tick(void *arg) |
325 | { |
326 | struct hme_softc *sc = arg; |
327 | int s; |
328 | |
329 | s = splnet(); |
330 | mii_tick(&sc->sc_mii); |
331 | splx(s); |
332 | |
333 | callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); |
334 | } |
335 | |
336 | void |
337 | hme_reset(struct hme_softc *sc) |
338 | { |
339 | int s; |
340 | |
341 | s = splnet(); |
342 | (void)hme_init(&sc->sc_ethercom.ec_if); |
343 | splx(s); |
344 | } |
345 | |
346 | void |
347 | hme_chipreset(struct hme_softc *sc) |
348 | { |
349 | bus_space_tag_t t = sc->sc_bustag; |
350 | bus_space_handle_t seb = sc->sc_seb; |
351 | int n; |
352 | |
353 | /* Mask all interrupts */ |
354 | bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff); |
355 | |
356 | /* Reset transmitter and receiver */ |
357 | bus_space_write_4(t, seb, HME_SEBI_RESET, |
358 | (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)); |
359 | |
360 | for (n = 0; n < 20; n++) { |
361 | uint32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET); |
362 | if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) |
363 | return; |
364 | DELAY(20); |
365 | } |
366 | |
367 | printf("%s: %s: reset failed\n" , device_xname(sc->sc_dev), __func__); |
368 | } |
369 | |
370 | void |
371 | hme_stop(struct ifnet *ifp, int disable) |
372 | { |
373 | struct hme_softc *sc; |
374 | |
375 | sc = ifp->if_softc; |
376 | |
377 | ifp->if_timer = 0; |
378 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
379 | |
380 | callout_stop(&sc->sc_tick_ch); |
381 | mii_down(&sc->sc_mii); |
382 | |
383 | hme_chipreset(sc); |
384 | } |
385 | |
386 | void |
387 | hme_meminit(struct hme_softc *sc) |
388 | { |
389 | bus_addr_t txbufdma, rxbufdma; |
390 | bus_addr_t dma; |
391 | char *p; |
392 | unsigned int ntbuf, nrbuf, i; |
393 | struct hme_ring *hr = &sc->sc_rb; |
394 | |
395 | p = hr->rb_membase; |
396 | dma = hr->rb_dmabase; |
397 | |
398 | ntbuf = hr->rb_ntbuf; |
399 | nrbuf = hr->rb_nrbuf; |
400 | |
401 | /* |
402 | * Allocate transmit descriptors |
403 | */ |
404 | hr->rb_txd = p; |
405 | hr->rb_txddma = dma; |
406 | p += ntbuf * HME_XD_SIZE; |
407 | dma += ntbuf * HME_XD_SIZE; |
408 | /* We have reserved descriptor space until the next 2048 byte boundary.*/ |
409 | dma = (bus_addr_t)roundup((u_long)dma, 2048); |
410 | p = (void *)roundup((u_long)p, 2048); |
411 | |
412 | /* |
413 | * Allocate receive descriptors |
414 | */ |
415 | hr->rb_rxd = p; |
416 | hr->rb_rxddma = dma; |
417 | p += nrbuf * HME_XD_SIZE; |
418 | dma += nrbuf * HME_XD_SIZE; |
419 | /* Again move forward to the next 2048 byte boundary.*/ |
420 | dma = (bus_addr_t)roundup((u_long)dma, 2048); |
421 | p = (void *)roundup((u_long)p, 2048); |
422 | |
423 | |
424 | /* |
425 | * Allocate transmit buffers |
426 | */ |
427 | hr->rb_txbuf = p; |
428 | txbufdma = dma; |
429 | p += ntbuf * _HME_BUFSZ; |
430 | dma += ntbuf * _HME_BUFSZ; |
431 | |
432 | /* |
433 | * Allocate receive buffers |
434 | */ |
435 | hr->rb_rxbuf = p; |
436 | rxbufdma = dma; |
437 | p += nrbuf * _HME_BUFSZ; |
438 | dma += nrbuf * _HME_BUFSZ; |
439 | |
440 | /* |
441 | * Initialize transmit buffer descriptors |
442 | */ |
443 | for (i = 0; i < ntbuf; i++) { |
444 | HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ); |
445 | HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); |
446 | } |
447 | |
448 | /* |
449 | * Initialize receive buffer descriptors |
450 | */ |
451 | for (i = 0; i < nrbuf; i++) { |
452 | HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ); |
453 | HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, |
454 | HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); |
455 | } |
456 | |
457 | hr->rb_tdhead = hr->rb_tdtail = 0; |
458 | hr->rb_td_nbusy = 0; |
459 | hr->rb_rdtail = 0; |
460 | } |
461 | |
462 | /* |
463 | * Initialization of interface; set up initialization block |
464 | * and transmit/receive descriptor rings. |
465 | */ |
466 | int |
467 | hme_init(struct ifnet *ifp) |
468 | { |
469 | struct hme_softc *sc = ifp->if_softc; |
470 | bus_space_tag_t t = sc->sc_bustag; |
471 | bus_space_handle_t seb = sc->sc_seb; |
472 | bus_space_handle_t etx = sc->sc_etx; |
473 | bus_space_handle_t erx = sc->sc_erx; |
474 | bus_space_handle_t mac = sc->sc_mac; |
475 | uint8_t *ea; |
476 | uint32_t v; |
477 | int rc; |
478 | |
479 | /* |
480 | * Initialization sequence. The numbered steps below correspond |
481 | * to the sequence outlined in section 6.3.5.1 in the Ethernet |
482 | * Channel Engine manual (part of the PCIO manual). |
483 | * See also the STP2002-STQ document from Sun Microsystems. |
484 | */ |
485 | |
486 | /* step 1 & 2. Reset the Ethernet Channel */ |
487 | hme_stop(ifp, 0); |
488 | |
489 | /* Re-initialize the MIF */ |
490 | hme_mifinit(sc); |
491 | |
492 | /* Call MI reset function if any */ |
493 | if (sc->sc_hwreset) |
494 | (*sc->sc_hwreset)(sc); |
495 | |
496 | #if 0 |
497 | /* Mask all MIF interrupts, just in case */ |
498 | bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff); |
499 | #endif |
500 | |
501 | /* step 3. Setup data structures in host memory */ |
502 | hme_meminit(sc); |
503 | |
504 | /* step 4. TX MAC registers & counters */ |
505 | bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); |
506 | bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); |
507 | bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); |
508 | bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); |
509 | bus_space_write_4(t, mac, HME_MACI_TXSIZE, |
510 | (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? |
511 | ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); |
512 | sc->sc_ec_capenable = sc->sc_ethercom.ec_capenable; |
513 | |
514 | /* Load station MAC address */ |
515 | ea = sc->sc_enaddr; |
516 | bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); |
517 | bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); |
518 | bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); |
519 | |
520 | /* |
521 | * Init seed for backoff |
522 | * (source suggested by manual: low 10 bits of MAC address) |
523 | */ |
524 | v = ((ea[4] << 8) | ea[5]) & 0x3fff; |
525 | bus_space_write_4(t, mac, HME_MACI_RANDSEED, v); |
526 | |
527 | |
528 | /* Note: Accepting power-on default for other MAC registers here.. */ |
529 | |
530 | |
531 | /* step 5. RX MAC registers & counters */ |
532 | hme_setladrf(sc); |
533 | |
534 | /* step 6 & 7. Program Descriptor Ring Base Addresses */ |
535 | bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma); |
536 | bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf); |
537 | |
538 | bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma); |
539 | bus_space_write_4(t, mac, HME_MACI_RXSIZE, |
540 | (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? |
541 | ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); |
542 | |
543 | /* step 8. Global Configuration & Interrupt Mask */ |
544 | bus_space_write_4(t, seb, HME_SEBI_IMASK, |
545 | ~( |
546 | /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ |
547 | HME_SEB_STAT_HOSTTOTX | |
548 | HME_SEB_STAT_RXTOHOST | |
549 | HME_SEB_STAT_TXALL | |
550 | HME_SEB_STAT_TXPERR | |
551 | HME_SEB_STAT_RCNTEXP | |
552 | HME_SEB_STAT_MIFIRQ | |
553 | HME_SEB_STAT_ALL_ERRORS )); |
554 | |
555 | switch (sc->sc_burst) { |
556 | default: |
557 | v = 0; |
558 | break; |
559 | case 16: |
560 | v = HME_SEB_CFG_BURST16; |
561 | break; |
562 | case 32: |
563 | v = HME_SEB_CFG_BURST32; |
564 | break; |
565 | case 64: |
566 | v = HME_SEB_CFG_BURST64; |
567 | break; |
568 | } |
569 | bus_space_write_4(t, seb, HME_SEBI_CFG, v); |
570 | |
571 | /* step 9. ETX Configuration: use mostly default values */ |
572 | |
573 | /* Enable DMA */ |
574 | v = bus_space_read_4(t, etx, HME_ETXI_CFG); |
575 | v |= HME_ETX_CFG_DMAENABLE; |
576 | bus_space_write_4(t, etx, HME_ETXI_CFG, v); |
577 | |
578 | /* Transmit Descriptor ring size: in increments of 16 */ |
579 | bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1); |
580 | |
581 | |
582 | /* step 10. ERX Configuration */ |
583 | v = bus_space_read_4(t, erx, HME_ERXI_CFG); |
584 | |
585 | /* Encode Receive Descriptor ring size: four possible values */ |
586 | switch (_HME_NDESC /*XXX*/) { |
587 | case 32: |
588 | v |= HME_ERX_CFG_RINGSIZE32; |
589 | break; |
590 | case 64: |
591 | v |= HME_ERX_CFG_RINGSIZE64; |
592 | break; |
593 | case 128: |
594 | v |= HME_ERX_CFG_RINGSIZE128; |
595 | break; |
596 | case 256: |
597 | v |= HME_ERX_CFG_RINGSIZE256; |
598 | break; |
599 | default: |
600 | printf("hme: invalid Receive Descriptor ring size\n" ); |
601 | break; |
602 | } |
603 | |
604 | /* Enable DMA */ |
605 | v |= HME_ERX_CFG_DMAENABLE; |
606 | |
607 | /* set h/w rx checksum start offset (# of half-words) */ |
608 | #ifdef INET |
609 | v |= (((ETHER_HDR_LEN + sizeof(struct ip)) / sizeof(uint16_t)) |
610 | << HME_ERX_CFG_CSUMSHIFT) & |
611 | HME_ERX_CFG_CSUMSTART; |
612 | #endif |
613 | bus_space_write_4(t, erx, HME_ERXI_CFG, v); |
614 | |
615 | /* step 11. XIF Configuration */ |
616 | v = bus_space_read_4(t, mac, HME_MACI_XIF); |
617 | v |= HME_MAC_XIF_OE; |
618 | bus_space_write_4(t, mac, HME_MACI_XIF, v); |
619 | |
620 | /* step 12. RX_MAC Configuration Register */ |
621 | v = bus_space_read_4(t, mac, HME_MACI_RXCFG); |
622 | v |= HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_PSTRIP; |
623 | bus_space_write_4(t, mac, HME_MACI_RXCFG, v); |
624 | |
625 | /* step 13. TX_MAC Configuration Register */ |
626 | v = bus_space_read_4(t, mac, HME_MACI_TXCFG); |
627 | v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); |
628 | bus_space_write_4(t, mac, HME_MACI_TXCFG, v); |
629 | |
630 | /* step 14. Issue Transmit Pending command */ |
631 | |
632 | /* Call MI initialization function if any */ |
633 | if (sc->sc_hwinit) |
634 | (*sc->sc_hwinit)(sc); |
635 | |
636 | /* Set the current media. */ |
637 | if ((rc = hme_mediachange(ifp)) != 0) |
638 | return rc; |
639 | |
640 | /* Start the one second timer. */ |
641 | callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); |
642 | |
643 | ifp->if_flags |= IFF_RUNNING; |
644 | ifp->if_flags &= ~IFF_OACTIVE; |
645 | sc->sc_if_flags = ifp->if_flags; |
646 | ifp->if_timer = 0; |
647 | hme_start(ifp); |
648 | return 0; |
649 | } |
650 | |
651 | /* |
652 | * Routine to copy from mbuf chain to transmit buffer in |
653 | * network buffer memory. |
654 | * Returns the amount of data copied. |
655 | */ |
656 | int |
657 | hme_put(struct hme_softc *sc, int ri, struct mbuf *m) |
658 | /* ri: Ring index */ |
659 | { |
660 | struct mbuf *n; |
661 | int len, tlen = 0; |
662 | char *bp; |
663 | |
664 | bp = (char *)sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ; |
665 | for (; m; m = n) { |
666 | len = m->m_len; |
667 | if (len == 0) { |
668 | n = m_free(m); |
669 | continue; |
670 | } |
671 | memcpy(bp, mtod(m, void *), len); |
672 | bp += len; |
673 | tlen += len; |
674 | n = m_free(m); |
675 | } |
676 | return (tlen); |
677 | } |
678 | |
679 | /* |
680 | * Pull data off an interface. |
681 | * Len is length of data, with local net header stripped. |
682 | * We copy the data into mbufs. When full cluster sized units are present |
683 | * we copy into clusters. |
684 | */ |
685 | struct mbuf * |
686 | hme_get(struct hme_softc *sc, int ri, uint32_t flags) |
687 | { |
688 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
689 | struct mbuf *m, *m0, *newm; |
690 | char *bp; |
691 | int len, totlen; |
692 | #ifdef INET |
693 | int csum_flags; |
694 | #endif |
695 | |
696 | totlen = HME_XD_DECODE_RSIZE(flags); |
697 | MGETHDR(m0, M_DONTWAIT, MT_DATA); |
698 | if (m0 == 0) |
699 | return (0); |
700 | m_set_rcvif(m0, ifp); |
701 | m0->m_pkthdr.len = totlen; |
702 | len = MHLEN; |
703 | m = m0; |
704 | |
705 | bp = (char *)sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ; |
706 | |
707 | while (totlen > 0) { |
708 | if (totlen >= MINCLSIZE) { |
709 | MCLGET(m, M_DONTWAIT); |
710 | if ((m->m_flags & M_EXT) == 0) |
711 | goto bad; |
712 | len = MCLBYTES; |
713 | } |
714 | |
715 | if (m == m0) { |
716 | char *newdata = (char *) |
717 | ALIGN(m->m_data + sizeof(struct ether_header)) - |
718 | sizeof(struct ether_header); |
719 | len -= newdata - m->m_data; |
720 | m->m_data = newdata; |
721 | } |
722 | |
723 | m->m_len = len = min(totlen, len); |
724 | memcpy(mtod(m, void *), bp, len); |
725 | bp += len; |
726 | |
727 | totlen -= len; |
728 | if (totlen > 0) { |
729 | MGET(newm, M_DONTWAIT, MT_DATA); |
730 | if (newm == 0) |
731 | goto bad; |
732 | len = MLEN; |
733 | m = m->m_next = newm; |
734 | } |
735 | } |
736 | |
737 | #ifdef INET |
738 | /* hardware checksum */ |
739 | csum_flags = 0; |
740 | if (ifp->if_csum_flags_rx & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { |
741 | struct ether_header *eh; |
742 | struct ether_vlan_header *evh; |
743 | struct ip *ip; |
744 | struct udphdr *uh; |
745 | uint16_t *opts; |
746 | int32_t hlen, pktlen; |
747 | uint32_t csum_data; |
748 | |
749 | eh = mtod(m0, struct ether_header *); |
750 | if (ntohs(eh->ether_type) == ETHERTYPE_IP) { |
751 | ip = (struct ip *)((char *)eh + ETHER_HDR_LEN); |
752 | pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN; |
753 | } else if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) { |
754 | evh = (struct ether_vlan_header *)eh; |
755 | if (ntohs(evh->evl_proto != ETHERTYPE_IP)) |
756 | goto swcsum; |
757 | ip = (struct ip *)((char *)eh + ETHER_HDR_LEN + |
758 | ETHER_VLAN_ENCAP_LEN); |
759 | pktlen = m0->m_pkthdr.len - |
760 | ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN; |
761 | } else |
762 | goto swcsum; |
763 | |
764 | /* IPv4 only */ |
765 | if (ip->ip_v != IPVERSION) |
766 | goto swcsum; |
767 | |
768 | hlen = ip->ip_hl << 2; |
769 | if (hlen < sizeof(struct ip)) |
770 | goto swcsum; |
771 | |
772 | /* |
773 | * bail if too short, has random trailing garbage, truncated, |
774 | * fragment, or has ethernet pad. |
775 | */ |
776 | if (ntohs(ip->ip_len) < hlen || |
777 | ntohs(ip->ip_len) != pktlen || |
778 | (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) |
779 | goto swcsum; |
780 | |
781 | switch (ip->ip_p) { |
782 | case IPPROTO_TCP: |
783 | if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0) |
784 | goto swcsum; |
785 | if (pktlen < (hlen + sizeof(struct tcphdr))) |
786 | goto swcsum; |
787 | csum_flags = |
788 | M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; |
789 | break; |
790 | case IPPROTO_UDP: |
791 | if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0) |
792 | goto swcsum; |
793 | if (pktlen < (hlen + sizeof(struct udphdr))) |
794 | goto swcsum; |
795 | uh = (struct udphdr *)((char *)ip + hlen); |
796 | /* no checksum */ |
797 | if (uh->uh_sum == 0) |
798 | goto swcsum; |
799 | csum_flags = |
800 | M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; |
801 | break; |
802 | default: |
803 | goto swcsum; |
804 | } |
805 | |
806 | /* w/ M_CSUM_NO_PSEUDOHDR, the uncomplemented sum is expected */ |
807 | csum_data = ~flags & HME_XD_RXCKSUM; |
808 | |
809 | /* |
810 | * If data offset is different from RX cksum start offset, |
811 | * we have to deduct them. |
812 | */ |
813 | hlen = ((char *)ip + hlen) - |
814 | ((char *)eh + ETHER_HDR_LEN + sizeof(struct ip)); |
815 | if (hlen > 1) { |
816 | uint32_t optsum; |
817 | |
818 | optsum = 0; |
819 | opts = (uint16_t *)((char *)eh + |
820 | ETHER_HDR_LEN + sizeof(struct ip)); |
821 | |
822 | while (hlen > 1) { |
823 | optsum += ntohs(*opts++); |
824 | hlen -= 2; |
825 | } |
826 | while (optsum >> 16) |
827 | optsum = (optsum >> 16) + (optsum & 0xffff); |
828 | |
829 | /* Deduct the ip opts sum from the hwsum. */ |
830 | csum_data += (uint16_t)~optsum; |
831 | |
832 | while (csum_data >> 16) |
833 | csum_data = |
834 | (csum_data >> 16) + (csum_data & 0xffff); |
835 | } |
836 | m0->m_pkthdr.csum_data = csum_data; |
837 | } |
838 | swcsum: |
839 | m0->m_pkthdr.csum_flags = csum_flags; |
840 | #endif |
841 | |
842 | return (m0); |
843 | |
844 | bad: |
845 | m_freem(m0); |
846 | return (0); |
847 | } |
848 | |
849 | /* |
850 | * Pass a packet to the higher levels. |
851 | */ |
852 | void |
853 | hme_read(struct hme_softc *sc, int ix, uint32_t flags) |
854 | { |
855 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
856 | struct mbuf *m; |
857 | int len; |
858 | |
859 | len = HME_XD_DECODE_RSIZE(flags); |
860 | if (len <= sizeof(struct ether_header) || |
861 | len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? |
862 | ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) : |
863 | ETHERMTU + sizeof(struct ether_header))) { |
864 | #ifdef HMEDEBUG |
865 | printf("%s: invalid packet size %d; dropping\n" , |
866 | device_xname(sc->sc_dev), len); |
867 | #endif |
868 | ifp->if_ierrors++; |
869 | return; |
870 | } |
871 | |
872 | /* Pull packet off interface. */ |
873 | m = hme_get(sc, ix, flags); |
874 | if (m == 0) { |
875 | ifp->if_ierrors++; |
876 | return; |
877 | } |
878 | |
879 | ifp->if_ipackets++; |
880 | |
881 | /* |
882 | * Check if there's a BPF listener on this interface. |
883 | * If so, hand off the raw packet to BPF. |
884 | */ |
885 | bpf_mtap(ifp, m); |
886 | |
887 | /* Pass the packet up. */ |
888 | if_percpuq_enqueue(ifp->if_percpuq, m); |
889 | } |
890 | |
891 | void |
892 | hme_start(struct ifnet *ifp) |
893 | { |
894 | struct hme_softc *sc = ifp->if_softc; |
895 | void *txd = sc->sc_rb.rb_txd; |
896 | struct mbuf *m; |
897 | unsigned int txflags; |
898 | unsigned int ri, len, obusy; |
899 | unsigned int ntbuf = sc->sc_rb.rb_ntbuf; |
900 | |
901 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
902 | return; |
903 | |
904 | ri = sc->sc_rb.rb_tdhead; |
905 | obusy = sc->sc_rb.rb_td_nbusy; |
906 | |
907 | for (;;) { |
908 | IFQ_DEQUEUE(&ifp->if_snd, m); |
909 | if (m == 0) |
910 | break; |
911 | |
912 | /* |
913 | * If BPF is listening on this interface, let it see the |
914 | * packet before we commit it to the wire. |
915 | */ |
916 | bpf_mtap(ifp, m); |
917 | |
918 | #ifdef INET |
919 | /* collect bits for h/w csum, before hme_put frees the mbuf */ |
920 | if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 | M_CSUM_UDPv4) && |
921 | m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { |
922 | struct ether_header *eh; |
923 | uint16_t offset, start; |
924 | |
925 | eh = mtod(m, struct ether_header *); |
926 | switch (ntohs(eh->ether_type)) { |
927 | case ETHERTYPE_IP: |
928 | start = ETHER_HDR_LEN; |
929 | break; |
930 | case ETHERTYPE_VLAN: |
931 | start = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
932 | break; |
933 | default: |
934 | /* unsupported, drop it */ |
935 | m_free(m); |
936 | continue; |
937 | } |
938 | start += M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); |
939 | offset = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data) |
940 | + start; |
941 | txflags = HME_XD_TXCKSUM | |
942 | (offset << HME_XD_TXCSSTUFFSHIFT) | |
943 | (start << HME_XD_TXCSSTARTSHIFT); |
944 | } else |
945 | #endif |
946 | txflags = 0; |
947 | |
948 | /* |
949 | * Copy the mbuf chain into the transmit buffer. |
950 | */ |
951 | len = hme_put(sc, ri, m); |
952 | |
953 | /* |
954 | * Initialize transmit registers and start transmission |
955 | */ |
956 | HME_XD_SETFLAGS(sc->sc_pci, txd, ri, |
957 | HME_XD_OWN | HME_XD_SOP | HME_XD_EOP | |
958 | HME_XD_ENCODE_TSIZE(len) | txflags); |
959 | |
960 | /*if (sc->sc_rb.rb_td_nbusy <= 0)*/ |
961 | bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING, |
962 | HME_ETX_TP_DMAWAKEUP); |
963 | |
964 | if (++ri == ntbuf) |
965 | ri = 0; |
966 | |
967 | if (++sc->sc_rb.rb_td_nbusy == ntbuf) { |
968 | ifp->if_flags |= IFF_OACTIVE; |
969 | break; |
970 | } |
971 | } |
972 | |
973 | if (obusy != sc->sc_rb.rb_td_nbusy) { |
974 | sc->sc_rb.rb_tdhead = ri; |
975 | ifp->if_timer = 5; |
976 | } |
977 | } |
978 | |
979 | /* |
980 | * Transmit interrupt. |
981 | */ |
982 | int |
983 | hme_tint(struct hme_softc *sc) |
984 | { |
985 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
986 | bus_space_tag_t t = sc->sc_bustag; |
987 | bus_space_handle_t mac = sc->sc_mac; |
988 | unsigned int ri, txflags; |
989 | |
990 | /* |
991 | * Unload collision counters |
992 | */ |
993 | ifp->if_collisions += |
994 | bus_space_read_4(t, mac, HME_MACI_NCCNT) + |
995 | bus_space_read_4(t, mac, HME_MACI_FCCNT); |
996 | ifp->if_oerrors += |
997 | bus_space_read_4(t, mac, HME_MACI_EXCNT) + |
998 | bus_space_read_4(t, mac, HME_MACI_LTCNT); |
999 | |
1000 | /* |
1001 | * then clear the hardware counters. |
1002 | */ |
1003 | bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); |
1004 | bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); |
1005 | bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); |
1006 | bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); |
1007 | |
1008 | /* Fetch current position in the transmit ring */ |
1009 | ri = sc->sc_rb.rb_tdtail; |
1010 | |
1011 | for (;;) { |
1012 | if (sc->sc_rb.rb_td_nbusy <= 0) |
1013 | break; |
1014 | |
1015 | txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); |
1016 | |
1017 | if (txflags & HME_XD_OWN) |
1018 | break; |
1019 | |
1020 | ifp->if_flags &= ~IFF_OACTIVE; |
1021 | ifp->if_opackets++; |
1022 | |
1023 | if (++ri == sc->sc_rb.rb_ntbuf) |
1024 | ri = 0; |
1025 | |
1026 | --sc->sc_rb.rb_td_nbusy; |
1027 | } |
1028 | |
1029 | /* Update ring */ |
1030 | sc->sc_rb.rb_tdtail = ri; |
1031 | |
1032 | hme_start(ifp); |
1033 | |
1034 | if (sc->sc_rb.rb_td_nbusy == 0) |
1035 | ifp->if_timer = 0; |
1036 | |
1037 | return (1); |
1038 | } |
1039 | |
1040 | /* |
1041 | * Receive interrupt. |
1042 | */ |
1043 | int |
1044 | hme_rint(struct hme_softc *sc) |
1045 | { |
1046 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1047 | bus_space_tag_t t = sc->sc_bustag; |
1048 | bus_space_handle_t mac = sc->sc_mac; |
1049 | void *xdr = sc->sc_rb.rb_rxd; |
1050 | unsigned int nrbuf = sc->sc_rb.rb_nrbuf; |
1051 | unsigned int ri; |
1052 | uint32_t flags; |
1053 | |
1054 | ri = sc->sc_rb.rb_rdtail; |
1055 | |
1056 | /* |
1057 | * Process all buffers with valid data. |
1058 | */ |
1059 | for (;;) { |
1060 | flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); |
1061 | if (flags & HME_XD_OWN) |
1062 | break; |
1063 | |
1064 | if (flags & HME_XD_OFL) { |
1065 | printf("%s: buffer overflow, ri=%d; flags=0x%x\n" , |
1066 | device_xname(sc->sc_dev), ri, flags); |
1067 | } else |
1068 | hme_read(sc, ri, flags); |
1069 | |
1070 | /* This buffer can be used by the hardware again */ |
1071 | HME_XD_SETFLAGS(sc->sc_pci, xdr, ri, |
1072 | HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); |
1073 | |
1074 | if (++ri == nrbuf) |
1075 | ri = 0; |
1076 | } |
1077 | |
1078 | sc->sc_rb.rb_rdtail = ri; |
1079 | |
1080 | /* Read error counters ... */ |
1081 | ifp->if_ierrors += |
1082 | bus_space_read_4(t, mac, HME_MACI_STAT_LCNT) + |
1083 | bus_space_read_4(t, mac, HME_MACI_STAT_ACNT) + |
1084 | bus_space_read_4(t, mac, HME_MACI_STAT_CCNT) + |
1085 | bus_space_read_4(t, mac, HME_MACI_STAT_CVCNT); |
1086 | |
1087 | /* ... then clear the hardware counters. */ |
1088 | bus_space_write_4(t, mac, HME_MACI_STAT_LCNT, 0); |
1089 | bus_space_write_4(t, mac, HME_MACI_STAT_ACNT, 0); |
1090 | bus_space_write_4(t, mac, HME_MACI_STAT_CCNT, 0); |
1091 | bus_space_write_4(t, mac, HME_MACI_STAT_CVCNT, 0); |
1092 | return (1); |
1093 | } |
1094 | |
1095 | int |
1096 | hme_eint(struct hme_softc *sc, u_int status) |
1097 | { |
1098 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1099 | char bits[128]; |
1100 | |
1101 | if ((status & HME_SEB_STAT_MIFIRQ) != 0) { |
1102 | bus_space_tag_t t = sc->sc_bustag; |
1103 | bus_space_handle_t mif = sc->sc_mif; |
1104 | uint32_t cf, st, sm; |
1105 | cf = bus_space_read_4(t, mif, HME_MIFI_CFG); |
1106 | st = bus_space_read_4(t, mif, HME_MIFI_STAT); |
1107 | sm = bus_space_read_4(t, mif, HME_MIFI_SM); |
1108 | printf("%s: XXXlink status changed: cfg=%x, stat %x, sm %x\n" , |
1109 | device_xname(sc->sc_dev), cf, st, sm); |
1110 | return (1); |
1111 | } |
1112 | |
1113 | /* Receive error counters rolled over */ |
1114 | if (status & HME_SEB_STAT_ACNTEXP) |
1115 | ifp->if_ierrors += 0xff; |
1116 | if (status & HME_SEB_STAT_CCNTEXP) |
1117 | ifp->if_ierrors += 0xff; |
1118 | if (status & HME_SEB_STAT_LCNTEXP) |
1119 | ifp->if_ierrors += 0xff; |
1120 | if (status & HME_SEB_STAT_CVCNTEXP) |
1121 | ifp->if_ierrors += 0xff; |
1122 | |
1123 | /* RXTERR locks up the interface, so do a reset */ |
1124 | if (status & HME_SEB_STAT_RXTERR) |
1125 | hme_reset(sc); |
1126 | |
1127 | snprintb(bits, sizeof(bits), HME_SEB_STAT_BITS, status); |
1128 | printf("%s: status=%s\n" , device_xname(sc->sc_dev), bits); |
1129 | |
1130 | return (1); |
1131 | } |
1132 | |
1133 | int |
1134 | hme_intr(void *v) |
1135 | { |
1136 | struct hme_softc *sc = v; |
1137 | bus_space_tag_t t = sc->sc_bustag; |
1138 | bus_space_handle_t seb = sc->sc_seb; |
1139 | uint32_t status; |
1140 | int r = 0; |
1141 | |
1142 | status = bus_space_read_4(t, seb, HME_SEBI_STAT); |
1143 | |
1144 | if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) |
1145 | r |= hme_eint(sc, status); |
1146 | |
1147 | if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) |
1148 | r |= hme_tint(sc); |
1149 | |
1150 | if ((status & HME_SEB_STAT_RXTOHOST) != 0) |
1151 | r |= hme_rint(sc); |
1152 | |
1153 | rnd_add_uint32(&sc->rnd_source, status); |
1154 | |
1155 | return (r); |
1156 | } |
1157 | |
1158 | |
1159 | void |
1160 | hme_watchdog(struct ifnet *ifp) |
1161 | { |
1162 | struct hme_softc *sc = ifp->if_softc; |
1163 | |
1164 | log(LOG_ERR, "%s: device timeout\n" , device_xname(sc->sc_dev)); |
1165 | ++ifp->if_oerrors; |
1166 | |
1167 | hme_reset(sc); |
1168 | } |
1169 | |
1170 | /* |
1171 | * Initialize the MII Management Interface |
1172 | */ |
1173 | void |
1174 | hme_mifinit(struct hme_softc *sc) |
1175 | { |
1176 | bus_space_tag_t t = sc->sc_bustag; |
1177 | bus_space_handle_t mif = sc->sc_mif; |
1178 | bus_space_handle_t mac = sc->sc_mac; |
1179 | int instance, phy; |
1180 | uint32_t v; |
1181 | |
1182 | if (sc->sc_mii.mii_media.ifm_cur != NULL) { |
1183 | instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); |
1184 | phy = sc->sc_phys[instance]; |
1185 | } else |
1186 | /* No media set yet, pick phy arbitrarily.. */ |
1187 | phy = HME_PHYAD_EXTERNAL; |
1188 | |
1189 | /* Configure the MIF in frame mode, no poll, current phy select */ |
1190 | v = 0; |
1191 | if (phy == HME_PHYAD_EXTERNAL) |
1192 | v |= HME_MIF_CFG_PHY; |
1193 | bus_space_write_4(t, mif, HME_MIFI_CFG, v); |
1194 | |
1195 | /* If an external transceiver is selected, enable its MII drivers */ |
1196 | v = bus_space_read_4(t, mac, HME_MACI_XIF); |
1197 | v &= ~HME_MAC_XIF_MIIENABLE; |
1198 | if (phy == HME_PHYAD_EXTERNAL) |
1199 | v |= HME_MAC_XIF_MIIENABLE; |
1200 | bus_space_write_4(t, mac, HME_MACI_XIF, v); |
1201 | } |
1202 | |
1203 | /* |
1204 | * MII interface |
1205 | */ |
1206 | static int |
1207 | hme_mii_readreg(device_t self, int phy, int reg) |
1208 | { |
1209 | struct hme_softc *sc = device_private(self); |
1210 | bus_space_tag_t t = sc->sc_bustag; |
1211 | bus_space_handle_t mif = sc->sc_mif; |
1212 | bus_space_handle_t mac = sc->sc_mac; |
1213 | uint32_t v, xif_cfg, mifi_cfg; |
1214 | int n; |
1215 | |
1216 | /* We can at most have two PHYs */ |
1217 | if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) |
1218 | return (0); |
1219 | |
1220 | /* Select the desired PHY in the MIF configuration register */ |
1221 | v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); |
1222 | v &= ~HME_MIF_CFG_PHY; |
1223 | if (phy == HME_PHYAD_EXTERNAL) |
1224 | v |= HME_MIF_CFG_PHY; |
1225 | bus_space_write_4(t, mif, HME_MIFI_CFG, v); |
1226 | |
1227 | /* Enable MII drivers on external transceiver */ |
1228 | v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); |
1229 | if (phy == HME_PHYAD_EXTERNAL) |
1230 | v |= HME_MAC_XIF_MIIENABLE; |
1231 | else |
1232 | v &= ~HME_MAC_XIF_MIIENABLE; |
1233 | bus_space_write_4(t, mac, HME_MACI_XIF, v); |
1234 | |
1235 | #if 0 |
1236 | /* This doesn't work reliably; the MDIO_1 bit is off most of the time */ |
1237 | /* |
1238 | * Check whether a transceiver is connected by testing |
1239 | * the MIF configuration register's MDI_X bits. Note that |
1240 | * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h |
1241 | */ |
1242 | mif_mdi_bit = 1 << (8 + (1 - phy)); |
1243 | delay(100); |
1244 | v = bus_space_read_4(t, mif, HME_MIFI_CFG); |
1245 | if ((v & mif_mdi_bit) == 0) |
1246 | return (0); |
1247 | #endif |
1248 | |
1249 | /* Construct the frame command */ |
1250 | v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | |
1251 | HME_MIF_FO_TAMSB | |
1252 | (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | |
1253 | (phy << HME_MIF_FO_PHYAD_SHIFT) | |
1254 | (reg << HME_MIF_FO_REGAD_SHIFT); |
1255 | |
1256 | bus_space_write_4(t, mif, HME_MIFI_FO, v); |
1257 | for (n = 0; n < 100; n++) { |
1258 | DELAY(1); |
1259 | v = bus_space_read_4(t, mif, HME_MIFI_FO); |
1260 | if (v & HME_MIF_FO_TALSB) { |
1261 | v &= HME_MIF_FO_DATA; |
1262 | goto out; |
1263 | } |
1264 | } |
1265 | |
1266 | v = 0; |
1267 | printf("%s: mii_read timeout\n" , device_xname(sc->sc_dev)); |
1268 | |
1269 | out: |
1270 | /* Restore MIFI_CFG register */ |
1271 | bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); |
1272 | /* Restore XIF register */ |
1273 | bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); |
1274 | return (v); |
1275 | } |
1276 | |
1277 | static void |
1278 | hme_mii_writereg(device_t self, int phy, int reg, int val) |
1279 | { |
1280 | struct hme_softc *sc = device_private(self); |
1281 | bus_space_tag_t t = sc->sc_bustag; |
1282 | bus_space_handle_t mif = sc->sc_mif; |
1283 | bus_space_handle_t mac = sc->sc_mac; |
1284 | uint32_t v, xif_cfg, mifi_cfg; |
1285 | int n; |
1286 | |
1287 | /* We can at most have two PHYs */ |
1288 | if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) |
1289 | return; |
1290 | |
1291 | /* Select the desired PHY in the MIF configuration register */ |
1292 | v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); |
1293 | v &= ~HME_MIF_CFG_PHY; |
1294 | if (phy == HME_PHYAD_EXTERNAL) |
1295 | v |= HME_MIF_CFG_PHY; |
1296 | bus_space_write_4(t, mif, HME_MIFI_CFG, v); |
1297 | |
1298 | /* Enable MII drivers on external transceiver */ |
1299 | v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); |
1300 | if (phy == HME_PHYAD_EXTERNAL) |
1301 | v |= HME_MAC_XIF_MIIENABLE; |
1302 | else |
1303 | v &= ~HME_MAC_XIF_MIIENABLE; |
1304 | bus_space_write_4(t, mac, HME_MACI_XIF, v); |
1305 | |
1306 | #if 0 |
1307 | /* This doesn't work reliably; the MDIO_1 bit is off most of the time */ |
1308 | /* |
1309 | * Check whether a transceiver is connected by testing |
1310 | * the MIF configuration register's MDI_X bits. Note that |
1311 | * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h |
1312 | */ |
1313 | mif_mdi_bit = 1 << (8 + (1 - phy)); |
1314 | delay(100); |
1315 | v = bus_space_read_4(t, mif, HME_MIFI_CFG); |
1316 | if ((v & mif_mdi_bit) == 0) |
1317 | return; |
1318 | #endif |
1319 | |
1320 | /* Construct the frame command */ |
1321 | v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | |
1322 | HME_MIF_FO_TAMSB | |
1323 | (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | |
1324 | (phy << HME_MIF_FO_PHYAD_SHIFT) | |
1325 | (reg << HME_MIF_FO_REGAD_SHIFT) | |
1326 | (val & HME_MIF_FO_DATA); |
1327 | |
1328 | bus_space_write_4(t, mif, HME_MIFI_FO, v); |
1329 | for (n = 0; n < 100; n++) { |
1330 | DELAY(1); |
1331 | v = bus_space_read_4(t, mif, HME_MIFI_FO); |
1332 | if (v & HME_MIF_FO_TALSB) |
1333 | goto out; |
1334 | } |
1335 | |
1336 | printf("%s: mii_write timeout\n" , device_xname(sc->sc_dev)); |
1337 | out: |
1338 | /* Restore MIFI_CFG register */ |
1339 | bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); |
1340 | /* Restore XIF register */ |
1341 | bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); |
1342 | } |
1343 | |
1344 | static void |
1345 | hme_mii_statchg(struct ifnet *ifp) |
1346 | { |
1347 | struct hme_softc *sc = ifp->if_softc; |
1348 | bus_space_tag_t t = sc->sc_bustag; |
1349 | bus_space_handle_t mac = sc->sc_mac; |
1350 | uint32_t v; |
1351 | |
1352 | #ifdef HMEDEBUG |
1353 | if (sc->sc_debug) |
1354 | printf("hme_mii_statchg: status change\n" ); |
1355 | #endif |
1356 | |
1357 | /* Set the MAC Full Duplex bit appropriately */ |
1358 | /* Apparently the hme chip is SIMPLEX if working in full duplex mode, |
1359 | but not otherwise. */ |
1360 | v = bus_space_read_4(t, mac, HME_MACI_TXCFG); |
1361 | if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { |
1362 | v |= HME_MAC_TXCFG_FULLDPLX; |
1363 | sc->sc_ethercom.ec_if.if_flags |= IFF_SIMPLEX; |
1364 | } else { |
1365 | v &= ~HME_MAC_TXCFG_FULLDPLX; |
1366 | sc->sc_ethercom.ec_if.if_flags &= ~IFF_SIMPLEX; |
1367 | } |
1368 | sc->sc_if_flags = sc->sc_ethercom.ec_if.if_flags; |
1369 | bus_space_write_4(t, mac, HME_MACI_TXCFG, v); |
1370 | } |
1371 | |
1372 | int |
1373 | hme_mediachange(struct ifnet *ifp) |
1374 | { |
1375 | struct hme_softc *sc = ifp->if_softc; |
1376 | bus_space_tag_t t = sc->sc_bustag; |
1377 | bus_space_handle_t mif = sc->sc_mif; |
1378 | bus_space_handle_t mac = sc->sc_mac; |
1379 | int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); |
1380 | int phy = sc->sc_phys[instance]; |
1381 | int rc; |
1382 | uint32_t v; |
1383 | |
1384 | #ifdef HMEDEBUG |
1385 | if (sc->sc_debug) |
1386 | printf("hme_mediachange: phy = %d\n" , phy); |
1387 | #endif |
1388 | |
1389 | /* Select the current PHY in the MIF configuration register */ |
1390 | v = bus_space_read_4(t, mif, HME_MIFI_CFG); |
1391 | v &= ~HME_MIF_CFG_PHY; |
1392 | if (phy == HME_PHYAD_EXTERNAL) |
1393 | v |= HME_MIF_CFG_PHY; |
1394 | bus_space_write_4(t, mif, HME_MIFI_CFG, v); |
1395 | |
1396 | /* If an external transceiver is selected, enable its MII drivers */ |
1397 | v = bus_space_read_4(t, mac, HME_MACI_XIF); |
1398 | v &= ~HME_MAC_XIF_MIIENABLE; |
1399 | if (phy == HME_PHYAD_EXTERNAL) |
1400 | v |= HME_MAC_XIF_MIIENABLE; |
1401 | bus_space_write_4(t, mac, HME_MACI_XIF, v); |
1402 | |
1403 | if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) |
1404 | return 0; |
1405 | return rc; |
1406 | } |
1407 | |
1408 | /* |
1409 | * Process an ioctl request. |
1410 | */ |
1411 | int |
1412 | hme_ioctl(struct ifnet *ifp, unsigned long cmd, void *data) |
1413 | { |
1414 | struct hme_softc *sc = ifp->if_softc; |
1415 | struct ifaddr *ifa = (struct ifaddr *)data; |
1416 | int s, error = 0; |
1417 | |
1418 | s = splnet(); |
1419 | |
1420 | switch (cmd) { |
1421 | |
1422 | case SIOCINITIFADDR: |
1423 | switch (ifa->ifa_addr->sa_family) { |
1424 | #ifdef INET |
1425 | case AF_INET: |
1426 | if (ifp->if_flags & IFF_UP) |
1427 | hme_setladrf(sc); |
1428 | else { |
1429 | ifp->if_flags |= IFF_UP; |
1430 | error = hme_init(ifp); |
1431 | } |
1432 | arp_ifinit(ifp, ifa); |
1433 | break; |
1434 | #endif |
1435 | default: |
1436 | ifp->if_flags |= IFF_UP; |
1437 | error = hme_init(ifp); |
1438 | break; |
1439 | } |
1440 | break; |
1441 | |
1442 | case SIOCSIFFLAGS: |
1443 | #ifdef HMEDEBUG |
1444 | { |
1445 | struct ifreq *ifr = data; |
1446 | sc->sc_debug = |
1447 | (ifr->ifr_flags & IFF_DEBUG) != 0 ? 1 : 0; |
1448 | } |
1449 | #endif |
1450 | if ((error = ifioctl_common(ifp, cmd, data)) != 0) |
1451 | break; |
1452 | |
1453 | switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { |
1454 | case IFF_RUNNING: |
1455 | /* |
1456 | * If interface is marked down and it is running, then |
1457 | * stop it. |
1458 | */ |
1459 | hme_stop(ifp, 0); |
1460 | ifp->if_flags &= ~IFF_RUNNING; |
1461 | break; |
1462 | case IFF_UP: |
1463 | /* |
1464 | * If interface is marked up and it is stopped, then |
1465 | * start it. |
1466 | */ |
1467 | error = hme_init(ifp); |
1468 | break; |
1469 | case IFF_UP|IFF_RUNNING: |
1470 | /* |
1471 | * If setting debug or promiscuous mode, do not reset |
1472 | * the chip; for everything else, call hme_init() |
1473 | * which will trigger a reset. |
1474 | */ |
1475 | #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG) |
1476 | if (ifp->if_flags != sc->sc_if_flags) { |
1477 | if ((ifp->if_flags & (~RESETIGN)) |
1478 | == (sc->sc_if_flags & (~RESETIGN))) |
1479 | hme_setladrf(sc); |
1480 | else |
1481 | error = hme_init(ifp); |
1482 | } |
1483 | #undef RESETIGN |
1484 | break; |
1485 | case 0: |
1486 | break; |
1487 | } |
1488 | |
1489 | if (sc->sc_ec_capenable != sc->sc_ethercom.ec_capenable) |
1490 | error = hme_init(ifp); |
1491 | |
1492 | break; |
1493 | |
1494 | default: |
1495 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) |
1496 | break; |
1497 | |
1498 | error = 0; |
1499 | |
1500 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
1501 | ; |
1502 | else if (ifp->if_flags & IFF_RUNNING) { |
1503 | /* |
1504 | * Multicast list has changed; set the hardware filter |
1505 | * accordingly. |
1506 | */ |
1507 | hme_setladrf(sc); |
1508 | } |
1509 | break; |
1510 | } |
1511 | |
1512 | sc->sc_if_flags = ifp->if_flags; |
1513 | splx(s); |
1514 | return (error); |
1515 | } |
1516 | |
1517 | bool |
1518 | hme_shutdown(device_t self, int howto) |
1519 | { |
1520 | struct hme_softc *sc; |
1521 | struct ifnet *ifp; |
1522 | |
1523 | sc = device_private(self); |
1524 | ifp = &sc->sc_ethercom.ec_if; |
1525 | hme_stop(ifp, 1); |
1526 | |
1527 | return true; |
1528 | } |
1529 | |
1530 | /* |
1531 | * Set up the logical address filter. |
1532 | */ |
1533 | void |
1534 | hme_setladrf(struct hme_softc *sc) |
1535 | { |
1536 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1537 | struct ether_multi *enm; |
1538 | struct ether_multistep step; |
1539 | struct ethercom *ec = &sc->sc_ethercom; |
1540 | bus_space_tag_t t = sc->sc_bustag; |
1541 | bus_space_handle_t mac = sc->sc_mac; |
1542 | uint32_t v; |
1543 | uint32_t crc; |
1544 | uint32_t hash[4]; |
1545 | |
1546 | /* Clear hash table */ |
1547 | hash[3] = hash[2] = hash[1] = hash[0] = 0; |
1548 | |
1549 | /* Get current RX configuration */ |
1550 | v = bus_space_read_4(t, mac, HME_MACI_RXCFG); |
1551 | |
1552 | if ((ifp->if_flags & IFF_PROMISC) != 0) { |
1553 | /* Turn on promiscuous mode; turn off the hash filter */ |
1554 | v |= HME_MAC_RXCFG_PMISC; |
1555 | v &= ~HME_MAC_RXCFG_HENABLE; |
1556 | ifp->if_flags |= IFF_ALLMULTI; |
1557 | goto chipit; |
1558 | } |
1559 | |
1560 | /* Turn off promiscuous mode; turn on the hash filter */ |
1561 | v &= ~HME_MAC_RXCFG_PMISC; |
1562 | v |= HME_MAC_RXCFG_HENABLE; |
1563 | |
1564 | /* |
1565 | * Set up multicast address filter by passing all multicast addresses |
1566 | * through a crc generator, and then using the high order 6 bits as an |
1567 | * index into the 64 bit logical address filter. The high order bit |
1568 | * selects the word, while the rest of the bits select the bit within |
1569 | * the word. |
1570 | */ |
1571 | |
1572 | ETHER_FIRST_MULTI(step, ec, enm); |
1573 | while (enm != NULL) { |
1574 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
1575 | /* |
1576 | * We must listen to a range of multicast addresses. |
1577 | * For now, just accept all multicasts, rather than |
1578 | * trying to set only those filter bits needed to match |
1579 | * the range. (At this time, the only use of address |
1580 | * ranges is for IP multicast routing, for which the |
1581 | * range is big enough to require all bits set.) |
1582 | */ |
1583 | hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; |
1584 | ifp->if_flags |= IFF_ALLMULTI; |
1585 | goto chipit; |
1586 | } |
1587 | |
1588 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); |
1589 | |
1590 | /* Just want the 6 most significant bits. */ |
1591 | crc >>= 26; |
1592 | |
1593 | /* Set the corresponding bit in the filter. */ |
1594 | hash[crc >> 4] |= 1 << (crc & 0xf); |
1595 | |
1596 | ETHER_NEXT_MULTI(step, enm); |
1597 | } |
1598 | |
1599 | ifp->if_flags &= ~IFF_ALLMULTI; |
1600 | |
1601 | chipit: |
1602 | /* Now load the hash table into the chip */ |
1603 | bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]); |
1604 | bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]); |
1605 | bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]); |
1606 | bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]); |
1607 | bus_space_write_4(t, mac, HME_MACI_RXCFG, v); |
1608 | } |
1609 | |
1610 | /* |
1611 | * Routines for accessing the transmit and receive buffers. |
1612 | * The various CPU and adapter configurations supported by this |
1613 | * driver require three different access methods for buffers |
1614 | * and descriptors: |
1615 | * (1) contig (contiguous data; no padding), |
1616 | * (2) gap2 (two bytes of data followed by two bytes of padding), |
1617 | * (3) gap16 (16 bytes of data followed by 16 bytes of padding). |
1618 | */ |
1619 | |
1620 | #if 0 |
1621 | /* |
1622 | * contig: contiguous data with no padding. |
1623 | * |
1624 | * Buffers may have any alignment. |
1625 | */ |
1626 | |
1627 | void |
1628 | hme_copytobuf_contig(struct hme_softc *sc, void *from, int ri, int len) |
1629 | { |
1630 | volatile void *buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ); |
1631 | |
1632 | /* |
1633 | * Just call memcpy() to do the work. |
1634 | */ |
1635 | memcpy(buf, from, len); |
1636 | } |
1637 | |
1638 | void |
1639 | hme_copyfrombuf_contig(struct hme_softc *sc, void *to, int boff, int len) |
1640 | { |
1641 | volatile void *buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ); |
1642 | |
1643 | /* |
1644 | * Just call memcpy() to do the work. |
1645 | */ |
1646 | memcpy(to, buf, len); |
1647 | } |
1648 | #endif |
1649 | |