1 | /* $NetBSD: rrunner.c,v 1.82 2016/10/02 14:16:02 christos Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. |
5 | * All rights reserved. |
6 | * |
7 | * This code contributed to The NetBSD Foundation by Kevin M. Lahey |
8 | * of the Numerical Aerospace Simulation Facility, NASA Ames Research |
9 | * Center. |
10 | * |
11 | * Partially based on a HIPPI driver written by Essential Communications |
12 | * Corporation. Thanks to Jason Thorpe, Matt Jacob, and Fred Templin |
13 | * for invaluable advice and encouragement! |
14 | * |
15 | * Redistribution and use in source and binary forms, with or without |
16 | * modification, are permitted provided that the following conditions |
17 | * are met: |
18 | * 1. Redistributions of source code must retain the above copyright |
19 | * notice, this list of conditions and the following disclaimer. |
20 | * 2. Redistributions in binary form must reproduce the above copyright |
21 | * notice, this list of conditions and the following disclaimer in the |
22 | * documentation and/or other materials provided with the distribution. |
23 | * |
24 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
25 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
26 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
27 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
28 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ |
36 | |
37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: rrunner.c,v 1.82 2016/10/02 14:16:02 christos Exp $" ); |
39 | |
40 | #include "opt_inet.h" |
41 | |
42 | #include "esh.h" |
43 | |
44 | #include <sys/param.h> |
45 | #include <sys/systm.h> |
46 | #include <sys/mbuf.h> |
47 | #include <sys/buf.h> |
48 | #include <sys/bufq.h> |
49 | #include <sys/socket.h> |
50 | #include <sys/ioctl.h> |
51 | #include <sys/errno.h> |
52 | #include <sys/syslog.h> |
53 | #include <sys/select.h> |
54 | #include <sys/device.h> |
55 | #include <sys/proc.h> |
56 | #include <sys/kernel.h> |
57 | #include <sys/conf.h> |
58 | #include <sys/kauth.h> |
59 | |
60 | #include <uvm/uvm_extern.h> |
61 | |
62 | #include <net/if.h> |
63 | #include <net/if_dl.h> |
64 | #include <net/route.h> |
65 | |
66 | #include <net/if_hippi.h> |
67 | #include <net/if_media.h> |
68 | |
69 | #ifdef INET |
70 | #include <netinet/in.h> |
71 | #include <netinet/in_systm.h> |
72 | #include <netinet/in_var.h> |
73 | #include <netinet/ip.h> |
74 | #include <netinet/if_inarp.h> |
75 | #endif |
76 | |
77 | |
78 | #include <net/bpf.h> |
79 | #include <net/bpfdesc.h> |
80 | |
81 | #include <sys/cpu.h> |
82 | #include <sys/bus.h> |
83 | #include <sys/intr.h> |
84 | |
85 | #include <dev/ic/rrunnerreg.h> |
86 | #include <dev/ic/rrunnervar.h> |
87 | |
88 | /* |
89 | #define ESH_PRINTF |
90 | */ |
91 | |
92 | /* Autoconfig definition of driver back-end */ |
93 | extern struct cfdriver esh_cd; |
94 | |
95 | struct esh_softc *esh_softc_debug[22]; /* for gdb */ |
96 | |
97 | #ifdef DIAGNOSTIC |
98 | u_int32_t max_write_len; |
99 | #endif |
100 | |
101 | /* Network device driver and initialization framework routines */ |
102 | |
103 | void eshinit(struct esh_softc *); |
104 | int eshioctl(struct ifnet *, u_long, void *); |
105 | void eshreset(struct esh_softc *); |
106 | void eshstart(struct ifnet *); |
107 | static int eshstatus(struct esh_softc *); |
108 | void eshstop(struct esh_softc *); |
109 | void eshwatchdog(struct ifnet *); |
110 | |
111 | /* Routines to support FP operation */ |
112 | |
113 | dev_type_open(esh_fpopen); |
114 | dev_type_close(esh_fpclose); |
115 | dev_type_read(esh_fpread); |
116 | dev_type_write(esh_fpwrite); |
117 | #ifdef MORE_DONE |
118 | dev_type_mmap(esh_fpmmap); |
119 | #endif |
120 | dev_type_strategy(esh_fpstrategy); |
121 | |
122 | const struct cdevsw esh_cdevsw = { |
123 | .d_open = esh_fpopen, |
124 | .d_close = esh_fpclose, |
125 | .d_read = esh_fpread, |
126 | .d_write = esh_fpwrite, |
127 | .d_ioctl = nullioctl, |
128 | .d_stop = nostop, |
129 | .d_tty = notty, |
130 | .d_poll = nullpoll, |
131 | #ifdef MORE_DONE |
132 | .d_mmap = esh_fpmmap, |
133 | #else |
134 | .d_mmap = nommap, |
135 | #endif |
136 | .d_kqfilter = nullkqfilter, |
137 | .d_discard = nodiscard, |
138 | .d_flag = D_OTHER |
139 | }; |
140 | |
141 | /* General routines, not externally visable */ |
142 | |
143 | static struct mbuf *esh_adjust_mbufs(struct esh_softc *, struct mbuf *m); |
144 | static void esh_dma_sync(struct esh_softc *, void *, |
145 | int, int, int, int, int, int); |
146 | static void esh_fill_snap_ring(struct esh_softc *); |
147 | static void esh_init_snap_ring(struct esh_softc *); |
148 | static void esh_close_snap_ring(struct esh_softc *); |
149 | static void esh_read_snap_ring(struct esh_softc *, u_int16_t, int); |
150 | static void esh_fill_fp_ring(struct esh_softc *, struct esh_fp_ring_ctl *); |
151 | static void esh_flush_fp_ring(struct esh_softc *, |
152 | struct esh_fp_ring_ctl *, |
153 | struct esh_dmainfo *); |
154 | static void esh_init_fp_rings(struct esh_softc *); |
155 | static void esh_read_fp_ring(struct esh_softc *, u_int16_t, int, int); |
156 | static void esh_reset_runcode(struct esh_softc *); |
157 | static void esh_send(struct esh_softc *); |
158 | static void esh_send_cmd(struct esh_softc *, u_int8_t, u_int8_t, u_int8_t); |
159 | static u_int32_t esh_read_eeprom(struct esh_softc *, u_int32_t); |
160 | static void esh_write_addr(bus_space_tag_t, bus_space_handle_t, |
161 | bus_addr_t, bus_addr_t); |
162 | static int esh_write_eeprom(struct esh_softc *, u_int32_t, u_int32_t); |
163 | static void eshstart_cleanup(struct esh_softc *, u_int16_t, int); |
164 | |
165 | static struct esh_dmainfo *esh_new_dmainfo(struct esh_softc *); |
166 | static void esh_free_dmainfo(struct esh_softc *, struct esh_dmainfo *); |
167 | static int esh_generic_ioctl(struct esh_softc *, u_long, void *, u_long, |
168 | struct lwp *); |
169 | |
170 | #ifdef ESH_PRINTF |
171 | static int esh_check(struct esh_softc *); |
172 | #endif |
173 | |
174 | #define ESHUNIT(x) ((minor(x) & 0xff00) >> 8) |
175 | #define ESHULP(x) (minor(x) & 0x00ff) |
176 | |
177 | |
178 | /* |
179 | * Back-end attach and configure. Allocate DMA space and initialize |
180 | * all structures. |
181 | */ |
182 | |
183 | void |
184 | eshconfig(struct esh_softc *sc) |
185 | { |
186 | struct ifnet *ifp = &sc->sc_if; |
187 | bus_space_tag_t iot = sc->sc_iot; |
188 | bus_space_handle_t ioh = sc->sc_ioh; |
189 | u_int32_t misc_host_ctl; |
190 | u_int32_t misc_local_ctl; |
191 | u_int32_t ; |
192 | u_int32_t ula_tmp; |
193 | bus_size_t size; |
194 | int rseg; |
195 | int error; |
196 | int i; |
197 | |
198 | esh_softc_debug[device_unit(sc->sc_dev)] = sc; |
199 | sc->sc_flags = 0; |
200 | |
201 | TAILQ_INIT(&sc->sc_dmainfo_freelist); |
202 | sc->sc_dmainfo_freelist_count = 0; |
203 | |
204 | /* |
205 | * Allocate and divvy up some host side memory that can hold |
206 | * data structures that will be DMA'ed over to the NIC |
207 | */ |
208 | |
209 | sc->sc_dma_size = sizeof(struct rr_gen_info) + |
210 | sizeof(struct rr_ring_ctl) * RR_ULP_COUNT + |
211 | sizeof(struct rr_descr) * RR_SEND_RING_SIZE + |
212 | sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE + |
213 | sizeof(struct rr_event) * RR_EVENT_RING_SIZE; |
214 | |
215 | error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, |
216 | 0, RR_DMA_BOUNDARY, &sc->sc_dmaseg, 1, |
217 | &rseg, BUS_DMA_NOWAIT); |
218 | if (error) { |
219 | aprint_error_dev(sc->sc_dev, "couldn't allocate space for host-side" |
220 | "data structures\n" ); |
221 | return; |
222 | } |
223 | if (rseg > 1) { |
224 | aprint_error_dev(sc->sc_dev, "contiguous memory not available\n" ); |
225 | goto bad_dmamem_map; |
226 | } |
227 | |
228 | error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, rseg, |
229 | sc->sc_dma_size, (void **)&sc->sc_dma_addr, |
230 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT); |
231 | if (error) { |
232 | aprint_error_dev(sc->sc_dev, |
233 | "couldn't map memory for host-side structures\n" ); |
234 | goto bad_dmamem_map; |
235 | } |
236 | |
237 | if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, |
238 | 1, sc->sc_dma_size, RR_DMA_BOUNDARY, |
239 | BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT, |
240 | &sc->sc_dma)) { |
241 | aprint_error_dev(sc->sc_dev, "couldn't create DMA map\n" ); |
242 | goto bad_dmamap_create; |
243 | } |
244 | |
245 | if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma, sc->sc_dma_addr, |
246 | sc->sc_dma_size, NULL, BUS_DMA_NOWAIT)) { |
247 | aprint_error_dev(sc->sc_dev, "couldn't load DMA map\n" ); |
248 | goto bad_dmamap_load; |
249 | } |
250 | |
251 | memset(sc->sc_dma_addr, 0, sc->sc_dma_size); |
252 | |
253 | sc->sc_gen_info_dma = sc->sc_dma->dm_segs->ds_addr; |
254 | sc->sc_gen_info = (struct rr_gen_info *) sc->sc_dma_addr; |
255 | size = sizeof(struct rr_gen_info); |
256 | |
257 | sc->sc_recv_ring_table_dma = sc->sc_dma->dm_segs->ds_addr + size; |
258 | sc->sc_recv_ring_table = |
259 | (struct rr_ring_ctl *) (sc->sc_dma_addr + size); |
260 | size += sizeof(struct rr_ring_ctl) * RR_ULP_COUNT; |
261 | |
262 | sc->sc_send_ring_dma = sc->sc_dma->dm_segs->ds_addr + size; |
263 | sc->sc_send_ring = (struct rr_descr *) (sc->sc_dma_addr + size); |
264 | sc->sc2_send_ring = (struct rr2_descr *) (sc->sc_dma_addr + size); |
265 | size += sizeof(struct rr_descr) * RR_SEND_RING_SIZE; |
266 | |
267 | sc->sc_snap_recv_ring_dma = sc->sc_dma->dm_segs->ds_addr + size; |
268 | sc->sc_snap_recv_ring = (struct rr_descr *) (sc->sc_dma_addr + size); |
269 | sc->sc2_snap_recv_ring = (struct rr2_descr *) (sc->sc_dma_addr + size); |
270 | size += sizeof(struct rr_descr) * RR_SNAP_RECV_RING_SIZE; |
271 | |
272 | sc->sc_event_ring_dma = sc->sc_dma->dm_segs->ds_addr + size; |
273 | sc->sc_event_ring = (struct rr_event *) (sc->sc_dma_addr + size); |
274 | size += sizeof(struct rr_event) * RR_EVENT_RING_SIZE; |
275 | |
276 | #ifdef DIAGNOSTIC |
277 | if (size > sc->sc_dmaseg.ds_len) { |
278 | aprint_error_dev(sc->sc_dev, "bogus size calculation\n" ); |
279 | goto bad_other; |
280 | } |
281 | #endif |
282 | |
283 | /* |
284 | * Allocate DMA maps for transfers. We do this here and now |
285 | * so we won't have to wait for them in the middle of sending |
286 | * or receiving something. |
287 | */ |
288 | |
289 | if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX, |
290 | ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY, |
291 | BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT, |
292 | &sc->sc_send.ec_dma)) { |
293 | aprint_error_dev(sc->sc_dev, "failed bus_dmamap_create\n" ); |
294 | goto bad_other; |
295 | } |
296 | sc->sc_send.ec_offset = 0; |
297 | sc->sc_send.ec_descr = sc->sc_send_ring; |
298 | TAILQ_INIT(&sc->sc_send.ec_di_queue); |
299 | bufq_alloc(&sc->sc_send.ec_buf_queue, "fcfs" , 0); |
300 | |
301 | for (i = 0; i < RR_MAX_SNAP_RECV_RING_SIZE; i++) |
302 | if (bus_dmamap_create(sc->sc_dmat, RR_DMA_MAX, 1, RR_DMA_MAX, |
303 | RR_DMA_BOUNDARY, |
304 | BUS_DMA_ALLOCNOW | BUS_DMA_NOWAIT, |
305 | &sc->sc_snap_recv.ec_dma[i])) { |
306 | aprint_error_dev(sc->sc_dev, "failed bus_dmamap_create\n" ); |
307 | for (i--; i >= 0; i--) |
308 | bus_dmamap_destroy(sc->sc_dmat, |
309 | sc->sc_snap_recv.ec_dma[i]); |
310 | goto bad_ring_dmamap_create; |
311 | } |
312 | |
313 | /* |
314 | * If this is a coldboot, the NIC RunCode should be operational. |
315 | * If it is a warmboot, it may or may not be operational. |
316 | * Just to be sure, we'll stop the RunCode and reset everything. |
317 | */ |
318 | |
319 | /* Halt the processor (preserve NO_SWAP, if set) */ |
320 | |
321 | misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL); |
322 | bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, |
323 | (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC); |
324 | |
325 | /* Make the EEPROM readable */ |
326 | |
327 | misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL); |
328 | bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, |
329 | misc_local_ctl & ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM | |
330 | RR_LC_PARITY_ON)); |
331 | |
332 | /* Extract interesting information from the EEPROM: */ |
333 | |
334 | header_format = esh_read_eeprom(sc, RR_EE_HEADER_FORMAT); |
335 | if (header_format != RR_EE_HEADER_FORMAT_MAGIC) { |
336 | aprint_error_dev(sc->sc_dev, "bogus EEPROM header format value %x\n" , |
337 | header_format); |
338 | goto bad_other; |
339 | } |
340 | |
341 | /* |
342 | * As it is now, the runcode version in the EEPROM doesn't |
343 | * reflect the actual runcode version number. That is only |
344 | * available once the runcode starts up. We should probably |
345 | * change the firmware update code to modify this value, |
346 | * but Essential itself doesn't do it right now. |
347 | */ |
348 | |
349 | sc->sc_sram_size = 4 * esh_read_eeprom(sc, RR_EE_SRAM_SIZE); |
350 | sc->sc_runcode_start = esh_read_eeprom(sc, RR_EE_RUNCODE_START); |
351 | sc->sc_runcode_version = esh_read_eeprom(sc, RR_EE_RUNCODE_VERSION); |
352 | |
353 | sc->sc_pci_latency = esh_read_eeprom(sc, RR_EE_PCI_LATENCY); |
354 | sc->sc_pci_lat_gnt = esh_read_eeprom(sc, RR_EE_PCI_LAT_GNT); |
355 | |
356 | /* General tuning values */ |
357 | |
358 | sc->sc_tune.rt_mode_and_status = |
359 | esh_read_eeprom(sc, RR_EE_MODE_AND_STATUS); |
360 | sc->sc_tune.rt_conn_retry_count = |
361 | esh_read_eeprom(sc, RR_EE_CONN_RETRY_COUNT); |
362 | sc->sc_tune.rt_conn_retry_timer = |
363 | esh_read_eeprom(sc, RR_EE_CONN_RETRY_TIMER); |
364 | sc->sc_tune.rt_conn_timeout = |
365 | esh_read_eeprom(sc, RR_EE_CONN_TIMEOUT); |
366 | sc->sc_tune.rt_interrupt_timer = |
367 | esh_read_eeprom(sc, RR_EE_INTERRUPT_TIMER); |
368 | sc->sc_tune.rt_tx_timeout = |
369 | esh_read_eeprom(sc, RR_EE_TX_TIMEOUT); |
370 | sc->sc_tune.rt_rx_timeout = |
371 | esh_read_eeprom(sc, RR_EE_RX_TIMEOUT); |
372 | sc->sc_tune.rt_stats_timer = |
373 | esh_read_eeprom(sc, RR_EE_STATS_TIMER); |
374 | sc->sc_tune.rt_stats_timer = ESH_STATS_TIMER_DEFAULT; |
375 | |
376 | /* DMA tuning values */ |
377 | |
378 | sc->sc_tune.rt_pci_state = |
379 | esh_read_eeprom(sc, RR_EE_PCI_STATE); |
380 | sc->sc_tune.rt_dma_write_state = |
381 | esh_read_eeprom(sc, RR_EE_DMA_WRITE_STATE); |
382 | sc->sc_tune.rt_dma_read_state = |
383 | esh_read_eeprom(sc, RR_EE_DMA_READ_STATE); |
384 | sc->sc_tune.rt_driver_param = |
385 | esh_read_eeprom(sc, RR_EE_DRIVER_PARAM); |
386 | |
387 | /* |
388 | * Snag the ULA. The first two bytes are reserved. |
389 | * We don't really use it immediately, but it would be good to |
390 | * have for building IPv6 addresses, etc. |
391 | */ |
392 | |
393 | ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_HI); |
394 | sc->sc_ula[0] = (ula_tmp >> 8) & 0xff; |
395 | sc->sc_ula[1] = ula_tmp & 0xff; |
396 | |
397 | ula_tmp = esh_read_eeprom(sc, RR_EE_ULA_LO); |
398 | sc->sc_ula[2] = (ula_tmp >> 24) & 0xff; |
399 | sc->sc_ula[3] = (ula_tmp >> 16) & 0xff; |
400 | sc->sc_ula[4] = (ula_tmp >> 8) & 0xff; |
401 | sc->sc_ula[5] = ula_tmp & 0xff; |
402 | |
403 | /* Reset EEPROM readability */ |
404 | |
405 | bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl); |
406 | |
407 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
408 | ifp->if_softc = sc; |
409 | ifp->if_start = eshstart; |
410 | ifp->if_ioctl = eshioctl; |
411 | ifp->if_watchdog = eshwatchdog; |
412 | ifp->if_flags = IFF_SIMPLEX | IFF_NOTRAILERS | IFF_NOARP; |
413 | IFQ_SET_READY(&ifp->if_snd); |
414 | |
415 | if_attach(ifp); |
416 | hippi_ifattach(ifp, sc->sc_ula); |
417 | |
418 | sc->sc_misaligned_bufs = sc->sc_bad_lens = 0; |
419 | sc->sc_fp_rings = 0; |
420 | |
421 | return; |
422 | |
423 | bad_ring_dmamap_create: |
424 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_send.ec_dma); |
425 | bad_other: |
426 | bus_dmamap_unload(sc->sc_dmat, sc->sc_dma); |
427 | bad_dmamap_load: |
428 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma); |
429 | bad_dmamap_create: |
430 | bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_addr, sc->sc_dma_size); |
431 | bad_dmamem_map: |
432 | bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, rseg); |
433 | return; |
434 | } |
435 | |
436 | |
437 | /* |
438 | * Bring device up. |
439 | * |
440 | * Assume that the on-board processor has already been stopped, |
441 | * the rings have been cleared of valid buffers, and everything |
442 | * is pretty much as it was when the system started. |
443 | * |
444 | * Stop the processor (just for good measure), clear the SRAM, |
445 | * reload the boot code, and start it all up again, with the PC |
446 | * pointing at the boot code. Once the boot code has had a chance |
447 | * to come up, adjust all of the appropriate parameters, and send |
448 | * the 'start firmware' command. |
449 | * |
450 | * The NIC won't actually be up until it gets an interrupt with an |
451 | * event indicating the RunCode is up. |
452 | */ |
453 | |
454 | void |
455 | eshinit(struct esh_softc *sc) |
456 | { |
457 | struct ifnet *ifp = &sc->sc_if; |
458 | bus_space_tag_t iot = sc->sc_iot; |
459 | bus_space_handle_t ioh = sc->sc_ioh; |
460 | struct rr_ring_ctl *ring; |
461 | u_int32_t misc_host_ctl; |
462 | u_int32_t misc_local_ctl; |
463 | u_int32_t value; |
464 | u_int32_t mode; |
465 | |
466 | /* If we're already doing an init, don't try again simultaniously */ |
467 | |
468 | if ((sc->sc_flags & ESH_FL_INITIALIZING) != 0) |
469 | return; |
470 | sc->sc_flags = ESH_FL_INITIALIZING; |
471 | |
472 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size, |
473 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
474 | |
475 | /* Halt the processor (preserve NO_SWAP, if set) */ |
476 | |
477 | misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL); |
478 | bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, |
479 | (misc_host_ctl & RR_MH_NO_SWAP) |
480 | | RR_MH_HALT_PROC | RR_MH_CLEAR_INT); |
481 | |
482 | /* Make the EEPROM readable */ |
483 | |
484 | misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL); |
485 | bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, |
486 | misc_local_ctl & ~(RR_LC_FAST_PROM | |
487 | RR_LC_ADD_SRAM | |
488 | RR_LC_PARITY_ON)); |
489 | |
490 | /* Reset DMA */ |
491 | |
492 | bus_space_write_4(iot, ioh, RR_RX_STATE, RR_RS_RESET); |
493 | bus_space_write_4(iot, ioh, RR_TX_STATE, 0); |
494 | bus_space_write_4(iot, ioh, RR_DMA_READ_STATE, RR_DR_RESET); |
495 | bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE, RR_DW_RESET); |
496 | bus_space_write_4(iot, ioh, RR_PCI_STATE, 0); |
497 | bus_space_write_4(iot, ioh, RR_TIMER, 0); |
498 | bus_space_write_4(iot, ioh, RR_TIMER_REF, 0); |
499 | |
500 | /* |
501 | * Reset the assist register that the documentation suggests |
502 | * resetting. Too bad that the docs don't mention anything |
503 | * else about the register! |
504 | */ |
505 | |
506 | bus_space_write_4(iot, ioh, 0x15C, 1); |
507 | |
508 | /* Clear BIST, set the PC to the start of the code and let 'er rip */ |
509 | |
510 | value = bus_space_read_4(iot, ioh, RR_PCI_BIST); |
511 | bus_space_write_4(iot, ioh, RR_PCI_BIST, (value & ~0xff) | 8); |
512 | |
513 | sc->sc_bist_write(sc, 0); |
514 | esh_reset_runcode(sc); |
515 | |
516 | bus_space_write_4(iot, ioh, RR_PROC_PC, sc->sc_runcode_start); |
517 | bus_space_write_4(iot, ioh, RR_PROC_BREAKPT, 0x00000001); |
518 | |
519 | misc_host_ctl &= ~RR_MH_HALT_PROC; |
520 | bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, misc_host_ctl); |
521 | |
522 | /* XXX: should we sleep rather than delaying for 1ms!? */ |
523 | |
524 | delay(1000); /* Need 500 us, but we'll give it more */ |
525 | |
526 | value = sc->sc_bist_read(sc); |
527 | if (value != 0) { |
528 | aprint_error_dev(sc->sc_dev, "BIST is %d, not 0!\n" , |
529 | value); |
530 | goto bad_init; |
531 | } |
532 | |
533 | #ifdef ESH_PRINTF |
534 | printf("%s: BIST is %x\n" , device_xname(sc->sc_dev), value); |
535 | eshstatus(sc); |
536 | #endif |
537 | |
538 | /* RunCode is up. Initialize NIC */ |
539 | |
540 | esh_write_addr(iot, ioh, RR_GEN_INFO_PTR, sc->sc_gen_info_dma); |
541 | esh_write_addr(iot, ioh, RR_RECV_RING_PTR, sc->sc_recv_ring_table_dma); |
542 | |
543 | sc->sc_event_consumer = 0; |
544 | bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, sc->sc_event_consumer); |
545 | sc->sc_event_producer = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER); |
546 | sc->sc_cmd_producer = RR_INIT_CMD; |
547 | sc->sc_cmd_consumer = 0; |
548 | |
549 | mode = bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS); |
550 | mode |= (RR_MS_WARNINGS | |
551 | RR_MS_ERR_TERM | |
552 | RR_MS_NO_RESTART | |
553 | RR_MS_SWAP_DATA); |
554 | mode &= ~RR_MS_PH_MODE; |
555 | bus_space_write_4(iot, ioh, RR_MODE_AND_STATUS, mode); |
556 | |
557 | #if 0 |
558 | #ifdef ESH_PRINTF |
559 | printf("eshinit: misc_local_ctl %x, SRAM size %d\n" , misc_local_ctl, |
560 | sc->sc_sram_size); |
561 | #endif |
562 | /* |
563 | misc_local_ctl |= (RR_LC_FAST_PROM | RR_LC_PARITY_ON); |
564 | */ |
565 | if (sc->sc_sram_size > 256 * 1024) { |
566 | misc_local_ctl |= RR_LC_ADD_SRAM; |
567 | } |
568 | #endif |
569 | |
570 | #ifdef ESH_PRINTF |
571 | printf("eshinit: misc_local_ctl %x\n" , misc_local_ctl); |
572 | #endif |
573 | bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl); |
574 | |
575 | /* Set tuning parameters */ |
576 | |
577 | bus_space_write_4(iot, ioh, RR_CONN_RETRY_COUNT, |
578 | sc->sc_tune.rt_conn_retry_count); |
579 | bus_space_write_4(iot, ioh, RR_CONN_RETRY_TIMER, |
580 | sc->sc_tune.rt_conn_retry_timer); |
581 | bus_space_write_4(iot, ioh, RR_CONN_TIMEOUT, |
582 | sc->sc_tune.rt_conn_timeout); |
583 | bus_space_write_4(iot, ioh, RR_INTERRUPT_TIMER, |
584 | sc->sc_tune.rt_interrupt_timer); |
585 | bus_space_write_4(iot, ioh, RR_TX_TIMEOUT, |
586 | sc->sc_tune.rt_tx_timeout); |
587 | bus_space_write_4(iot, ioh, RR_RX_TIMEOUT, |
588 | sc->sc_tune.rt_rx_timeout); |
589 | bus_space_write_4(iot, ioh, RR_STATS_TIMER, |
590 | sc->sc_tune.rt_stats_timer); |
591 | bus_space_write_4(iot, ioh, RR_PCI_STATE, |
592 | sc->sc_tune.rt_pci_state); |
593 | bus_space_write_4(iot, ioh, RR_DMA_WRITE_STATE, |
594 | sc->sc_tune.rt_dma_write_state); |
595 | bus_space_write_4(iot, ioh, RR_DMA_READ_STATE, |
596 | sc->sc_tune.rt_dma_read_state); |
597 | |
598 | sc->sc_max_rings = bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS); |
599 | |
600 | sc->sc_runcode_version = |
601 | bus_space_read_4(iot, ioh, RR_RUNCODE_VERSION); |
602 | sc->sc_version = sc->sc_runcode_version >> 16; |
603 | if (sc->sc_version != 1 && sc->sc_version != 2) { |
604 | aprint_error_dev(sc->sc_dev, "bad version number %d in runcode\n" , |
605 | sc->sc_version); |
606 | goto bad_init; |
607 | } |
608 | |
609 | if (sc->sc_version == 1) { |
610 | sc->sc_options = 0; |
611 | } else { |
612 | value = bus_space_read_4(iot, ioh, RR_ULA); |
613 | sc->sc_options = value >> 16; |
614 | } |
615 | |
616 | if (sc->sc_options & (RR_OP_LONG_TX | RR_OP_LONG_RX)) { |
617 | aprint_error_dev(sc->sc_dev, "unsupported firmware -- long descriptors\n" ); |
618 | goto bad_init; |
619 | } |
620 | |
621 | printf("%s: startup runcode version %d.%d.%d, options %x\n" , |
622 | device_xname(sc->sc_dev), |
623 | sc->sc_version, |
624 | (sc->sc_runcode_version >> 8) & 0xff, |
625 | sc->sc_runcode_version & 0xff, |
626 | sc->sc_options); |
627 | |
628 | /* Initialize the general ring information */ |
629 | |
630 | memset(sc->sc_recv_ring_table, 0, |
631 | sizeof(struct rr_ring_ctl) * RR_ULP_COUNT); |
632 | |
633 | ring = &sc->sc_gen_info->ri_event_ring_ctl; |
634 | ring->rr_ring_addr = sc->sc_event_ring_dma; |
635 | ring->rr_entry_size = sizeof(struct rr_event); |
636 | ring->rr_free_bufs = RR_EVENT_RING_SIZE / 4; |
637 | ring->rr_entries = RR_EVENT_RING_SIZE; |
638 | ring->rr_prod_index = 0; |
639 | |
640 | ring = &sc->sc_gen_info->ri_cmd_ring_ctl; |
641 | ring->rr_free_bufs = 8; |
642 | ring->rr_entry_size = sizeof(union rr_cmd); |
643 | ring->rr_prod_index = RR_INIT_CMD; |
644 | |
645 | ring = &sc->sc_gen_info->ri_send_ring_ctl; |
646 | ring->rr_ring_addr = sc->sc_send_ring_dma; |
647 | if (sc->sc_version == 1) { |
648 | ring->rr_free_bufs = RR_RR_DONT_COMPLAIN; |
649 | } else { |
650 | ring->rr_free_bufs = 0; |
651 | } |
652 | |
653 | ring->rr_entries = RR_SEND_RING_SIZE; |
654 | ring->rr_entry_size = sizeof(struct rr_descr); |
655 | |
656 | ring->rr_prod_index = sc->sc_send.ec_producer = |
657 | sc->sc_send.ec_consumer = 0; |
658 | sc->sc_send.ec_cur_mbuf = NULL; |
659 | sc->sc_send.ec_cur_buf = NULL; |
660 | |
661 | sc->sc_snap_recv.ec_descr = sc->sc_snap_recv_ring; |
662 | sc->sc_snap_recv.ec_consumer = sc->sc_snap_recv.ec_producer = 0; |
663 | |
664 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size, |
665 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
666 | |
667 | /* Set up the watchdog to make sure something happens! */ |
668 | |
669 | sc->sc_watchdog = 0; |
670 | ifp->if_timer = 5; |
671 | |
672 | /* |
673 | * Can't actually turn on interface until we see some events, |
674 | * so set initialized flag, but don't start sending. |
675 | */ |
676 | |
677 | sc->sc_flags = ESH_FL_INITIALIZED; |
678 | esh_send_cmd(sc, RR_CC_START_RUNCODE, 0, 0); |
679 | return; |
680 | |
681 | bad_init: |
682 | sc->sc_flags = 0; |
683 | wakeup((void *) sc); |
684 | return; |
685 | } |
686 | |
687 | |
688 | /* |
689 | * Code to handle the Framing Protocol (FP) interface to the esh. |
690 | * This will allow us to write directly to the wire, with no |
691 | * intervening memcpy's to slow us down. |
692 | */ |
693 | |
694 | int |
695 | esh_fpopen(dev_t dev, int oflags, int devtype, |
696 | struct lwp *l) |
697 | { |
698 | struct esh_softc *sc; |
699 | struct rr_ring_ctl *ring_ctl; |
700 | struct esh_fp_ring_ctl *recv; |
701 | int ulp = ESHULP(dev); |
702 | int error = 0; |
703 | bus_size_t size; |
704 | int rseg; |
705 | int s; |
706 | |
707 | sc = device_lookup_private(&esh_cd, ESHUNIT(dev)); |
708 | if (sc == NULL || ulp == HIPPI_ULP_802) |
709 | return (ENXIO); |
710 | |
711 | #ifdef ESH_PRINTF |
712 | printf("esh_fpopen: opening board %d, ulp %d\n" , |
713 | device_unit(sc->sc_dev), ulp); |
714 | #endif |
715 | |
716 | /* If the card is not up, initialize it. */ |
717 | |
718 | s = splnet(); |
719 | |
720 | if (sc->sc_fp_rings >= sc->sc_max_rings - 1) { |
721 | splx(s); |
722 | return (ENOSPC); |
723 | } |
724 | |
725 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
726 | eshinit(sc); |
727 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
728 | splx(s); |
729 | return EIO; |
730 | } |
731 | } |
732 | |
733 | if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) { |
734 | /* |
735 | * Wait for the runcode to indicate that it is up, |
736 | * while watching to make sure we haven't crashed. |
737 | */ |
738 | |
739 | error = 0; |
740 | while (error == 0 && |
741 | (sc->sc_flags & ESH_FL_INITIALIZED) != 0 && |
742 | (sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) { |
743 | error = tsleep((void *) sc, PCATCH | PRIBIO, |
744 | "eshinit" , 0); |
745 | #ifdef ESH_PRINTF |
746 | printf("esh_fpopen: tslept\n" ); |
747 | #endif |
748 | } |
749 | |
750 | if (error != 0) { |
751 | splx(s); |
752 | return error; |
753 | } |
754 | |
755 | if ((sc->sc_flags & ESH_FL_RUNCODE_UP) == 0) { |
756 | splx(s); |
757 | return EIO; |
758 | } |
759 | } |
760 | |
761 | |
762 | #ifdef ESH_PRINTF |
763 | printf("esh_fpopen: card up\n" ); |
764 | #endif |
765 | |
766 | /* Look at the ring descriptor to see if the ULP is in use */ |
767 | |
768 | ring_ctl = &sc->sc_recv_ring_table[ulp]; |
769 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
770 | (char *) ring_ctl - (char *) sc->sc_dma_addr, |
771 | sizeof(*ring_ctl), |
772 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
773 | if (ring_ctl->rr_entry_size != 0) { |
774 | splx(s); |
775 | return (EBUSY); |
776 | } |
777 | |
778 | #ifdef ESH_PRINTF |
779 | printf("esh_fpopen: ring %d okay\n" , ulp); |
780 | #endif |
781 | |
782 | /* |
783 | * Allocate the DMA space for the ring; space for the |
784 | * ring control blocks has already been staticly allocated. |
785 | */ |
786 | |
787 | recv = (struct esh_fp_ring_ctl *) |
788 | malloc(sizeof(*recv), M_DEVBUF, M_WAITOK|M_ZERO); |
789 | if (recv == NULL) |
790 | return(ENOMEM); |
791 | TAILQ_INIT(&recv->ec_queue); |
792 | |
793 | size = RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr); |
794 | error = bus_dmamem_alloc(sc->sc_dmat, size, 0, RR_DMA_BOUNDARY, |
795 | &recv->ec_dmaseg, 1, |
796 | &rseg, BUS_DMA_WAITOK); |
797 | |
798 | if (error) { |
799 | aprint_error_dev(sc->sc_dev, "couldn't allocate space for FP receive ring" |
800 | "data structures\n" ); |
801 | goto bad_fp_dmamem_alloc; |
802 | } |
803 | |
804 | if (rseg > 1) { |
805 | aprint_error_dev(sc->sc_dev, "contiguous memory not available for " |
806 | "FP receive ring\n" ); |
807 | goto bad_fp_dmamem_map; |
808 | } |
809 | |
810 | error = bus_dmamem_map(sc->sc_dmat, &recv->ec_dmaseg, rseg, |
811 | size, (void **) &recv->ec_descr, |
812 | BUS_DMA_WAITOK | BUS_DMA_COHERENT); |
813 | if (error) { |
814 | aprint_error_dev(sc->sc_dev, "couldn't map memory for FP receive ring\n" ); |
815 | goto bad_fp_dmamem_map; |
816 | } |
817 | |
818 | if (bus_dmamap_create(sc->sc_dmat, size, 1, size, RR_DMA_BOUNDARY, |
819 | BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, |
820 | &recv->ec_dma)) { |
821 | aprint_error_dev(sc->sc_dev, "couldn't create DMA map for FP receive ring\n" ); |
822 | goto bad_fp_dmamap_create; |
823 | } |
824 | |
825 | if (bus_dmamap_load(sc->sc_dmat, recv->ec_dma, recv->ec_descr, |
826 | size, NULL, BUS_DMA_WAITOK)) { |
827 | aprint_error_dev(sc->sc_dev, "couldn't load DMA map for FP receive ring\n" ); |
828 | goto bad_fp_dmamap_load; |
829 | } |
830 | |
831 | memset(recv->ec_descr, 0, size); |
832 | |
833 | /* |
834 | * Create the ring: |
835 | * |
836 | * XXX: HTF are we gonna deal with the fact that we don't know |
837 | * if the open succeeded until we get a response from |
838 | * the event handler? I guess we could go to sleep waiting |
839 | * for the interrupt, and get woken up by the eshintr |
840 | * case handling it. |
841 | */ |
842 | |
843 | ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr; |
844 | ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4; |
845 | ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE; |
846 | ring_ctl->rr_entry_size = sizeof(struct rr_descr); |
847 | ring_ctl->rr_prod_index = recv->ec_producer = recv->ec_consumer = 0; |
848 | ring_ctl->rr_mode = RR_RR_CHARACTER; |
849 | recv->ec_ulp = ulp; |
850 | recv->ec_index = -1; |
851 | |
852 | sc->sc_fp_recv[ulp] = recv; |
853 | |
854 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
855 | (char *) ring_ctl - (char *) sc->sc_dma_addr, |
856 | sizeof(*ring_ctl), |
857 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
858 | |
859 | bus_dmamap_sync(sc->sc_dmat, recv->ec_dma, 0, size, |
860 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
861 | |
862 | esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer); |
863 | |
864 | #ifdef ESH_PRINTF |
865 | printf("esh_fpopen: sent create ring cmd\n" ); |
866 | #endif |
867 | |
868 | while (recv->ec_index == -1) { |
869 | error = tsleep((void *) &recv->ec_ulp, PCATCH | PRIBIO, |
870 | "eshfpopen" , 0); |
871 | if (error != 0 || recv->ec_index == -1) { |
872 | goto bad_fp_ring_create; |
873 | } |
874 | } |
875 | #ifdef ESH_PRINTF |
876 | printf("esh_fpopen: created ring\n" ); |
877 | #endif |
878 | |
879 | /* |
880 | * Ring is created. Set up various pointers to the ring |
881 | * information, fill the ring, and get going... |
882 | */ |
883 | |
884 | sc->sc_fp_rings++; |
885 | splx(s); |
886 | return 0; |
887 | |
888 | bad_fp_ring_create: |
889 | #ifdef ESH_PRINTF |
890 | printf("esh_fpopen: bad ring create\n" ); |
891 | #endif |
892 | sc->sc_fp_recv[ulp] = NULL; |
893 | memset(ring_ctl, 0, sizeof(*ring_ctl)); |
894 | bus_dmamap_unload(sc->sc_dmat, recv->ec_dma); |
895 | bad_fp_dmamap_load: |
896 | bus_dmamap_destroy(sc->sc_dmat, recv->ec_dma); |
897 | bad_fp_dmamap_create: |
898 | bus_dmamem_unmap(sc->sc_dmat, (void *) recv->ec_descr, size); |
899 | bad_fp_dmamem_map: |
900 | bus_dmamem_free(sc->sc_dmat, &recv->ec_dmaseg, rseg); |
901 | bad_fp_dmamem_alloc: |
902 | free(recv, M_DEVBUF); |
903 | if (error == 0) |
904 | error = ENOMEM; |
905 | splx(s); |
906 | return (error); |
907 | } |
908 | |
909 | |
910 | int |
911 | esh_fpclose(dev_t dev, int fflag, int devtype, |
912 | struct lwp *l) |
913 | { |
914 | struct esh_softc *sc; |
915 | struct rr_ring_ctl *ring_ctl; |
916 | struct esh_fp_ring_ctl *ring; |
917 | int ulp = ESHULP(dev); |
918 | int index; |
919 | int error = 0; |
920 | int s; |
921 | |
922 | sc = device_lookup_private(&esh_cd, ESHUNIT(dev)); |
923 | if (sc == NULL || ulp == HIPPI_ULP_802) |
924 | return (ENXIO); |
925 | |
926 | s = splnet(); |
927 | |
928 | ring = sc->sc_fp_recv[ulp]; |
929 | ring_ctl = &sc->sc_recv_ring_table[ulp]; |
930 | index = ring->ec_index; |
931 | |
932 | #ifdef ESH_PRINTF |
933 | printf("esh_fpclose: closing unit %d, ulp %d\n" , |
934 | device_unit(sc->sc_dev), ulp); |
935 | #endif |
936 | assert(ring); |
937 | assert(ring_ctl); |
938 | |
939 | /* |
940 | * Disable the ring, wait for notification, and get rid of DMA |
941 | * stuff and dynamically allocated memory. Loop, waiting to |
942 | * learn that the ring has been disabled, or the card |
943 | * has been shut down. |
944 | */ |
945 | |
946 | do { |
947 | esh_send_cmd(sc, RR_CC_DISABLE_RING, ulp, ring->ec_producer); |
948 | |
949 | error = tsleep((void *) &ring->ec_index, PCATCH | PRIBIO, |
950 | "esh_fpclose" , 0); |
951 | if (error != 0 && error != EAGAIN) { |
952 | aprint_error_dev(sc->sc_dev, "esh_fpclose: wait on ring disable bad\n" ); |
953 | ring->ec_index = -1; |
954 | break; |
955 | } |
956 | } while (ring->ec_index != -1 && sc->sc_flags != 0); |
957 | |
958 | /* |
959 | * XXX: Gotta unload the ring, removing old descriptors! |
960 | * *Can* there be outstanding reads with a close issued!? |
961 | */ |
962 | |
963 | bus_dmamap_unload(sc->sc_dmat, ring->ec_dma); |
964 | bus_dmamap_destroy(sc->sc_dmat, ring->ec_dma); |
965 | bus_dmamem_unmap(sc->sc_dmat, (void *) ring->ec_descr, |
966 | RR_FP_RECV_RING_SIZE * sizeof(struct rr_descr)); |
967 | bus_dmamem_free(sc->sc_dmat, &ring->ec_dmaseg, ring->ec_dma->dm_nsegs); |
968 | free(ring, M_DEVBUF); |
969 | memset(ring_ctl, 0, sizeof(*ring_ctl)); |
970 | sc->sc_fp_recv[ulp] = NULL; |
971 | sc->sc_fp_recv_index[index] = NULL; |
972 | |
973 | sc->sc_fp_rings--; |
974 | if (sc->sc_fp_rings == 0) |
975 | sc->sc_flags &= ~ESH_FL_FP_RING_UP; |
976 | |
977 | splx(s); |
978 | return 0; |
979 | } |
980 | |
981 | int |
982 | esh_fpread(dev_t dev, struct uio *uio, int ioflag) |
983 | { |
984 | struct lwp *l = curlwp; |
985 | struct proc *p = l->l_proc; |
986 | struct iovec *iovp; |
987 | struct esh_softc *sc; |
988 | struct esh_fp_ring_ctl *ring; |
989 | struct esh_dmainfo *di; |
990 | int ulp = ESHULP(dev); |
991 | int error; |
992 | int i; |
993 | int s; |
994 | |
995 | #ifdef ESH_PRINTF |
996 | printf("esh_fpread: dev %x\n" , dev); |
997 | #endif |
998 | |
999 | sc = device_lookup_private(&esh_cd, ESHUNIT(dev)); |
1000 | if (sc == NULL || ulp == HIPPI_ULP_802) |
1001 | return (ENXIO); |
1002 | |
1003 | s = splnet(); |
1004 | |
1005 | ring = sc->sc_fp_recv[ulp]; |
1006 | |
1007 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
1008 | error = ENXIO; |
1009 | goto fpread_done; |
1010 | } |
1011 | |
1012 | /* Check for validity */ |
1013 | for (i = 0; i < uio->uio_iovcnt; i++) { |
1014 | /* Check for valid offsets and sizes */ |
1015 | if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 || |
1016 | (i < uio->uio_iovcnt - 1 && |
1017 | (uio->uio_iov[i].iov_len & 3) != 0)) { |
1018 | error = EFAULT; |
1019 | goto fpread_done; |
1020 | } |
1021 | } |
1022 | |
1023 | /* Lock down the pages */ |
1024 | for (i = 0; i < uio->uio_iovcnt; i++) { |
1025 | iovp = &uio->uio_iov[i]; |
1026 | error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len, |
1027 | VM_PROT_WRITE); |
1028 | if (error) { |
1029 | /* Unlock what we've locked so far. */ |
1030 | for (--i; i >= 0; i--) { |
1031 | iovp = &uio->uio_iov[i]; |
1032 | uvm_vsunlock(p->p_vmspace, iovp->iov_base, |
1033 | iovp->iov_len); |
1034 | } |
1035 | goto fpread_done; |
1036 | } |
1037 | } |
1038 | |
1039 | /* |
1040 | * Perform preliminary DMA mapping and throw the buffers |
1041 | * onto the queue to be sent. |
1042 | */ |
1043 | |
1044 | di = esh_new_dmainfo(sc); |
1045 | if (di == NULL) { |
1046 | error = ENOMEM; |
1047 | goto fpread_done; |
1048 | } |
1049 | di->ed_buf = NULL; |
1050 | di->ed_error = 0; |
1051 | di->ed_read_len = 0; |
1052 | |
1053 | #ifdef ESH_PRINTF |
1054 | printf("esh_fpread: ulp %d, uio offset %qd, resid %d, iovcnt %d\n" , |
1055 | ulp, uio->uio_offset, uio->uio_resid, uio->uio_iovcnt); |
1056 | #endif |
1057 | |
1058 | error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma, |
1059 | uio, BUS_DMA_READ|BUS_DMA_WAITOK); |
1060 | if (error) { |
1061 | aprint_error_dev(sc->sc_dev, "esh_fpread: bus_dmamap_load_uio " |
1062 | "failed\terror code %d\n" , |
1063 | error); |
1064 | error = ENOBUFS; |
1065 | esh_free_dmainfo(sc, di); |
1066 | goto fpread_done; |
1067 | } |
1068 | |
1069 | bus_dmamap_sync(sc->sc_dmat, di->ed_dma, |
1070 | 0, di->ed_dma->dm_mapsize, |
1071 | BUS_DMASYNC_PREREAD); |
1072 | |
1073 | #ifdef ESH_PRINTF |
1074 | printf("esh_fpread: ulp %d, di %p, nsegs %d, uio len %d\n" , |
1075 | ulp, di, di->ed_dma->dm_nsegs, uio->uio_resid); |
1076 | #endif |
1077 | |
1078 | di->ed_flags |= ESH_DI_BUSY; |
1079 | |
1080 | TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list); |
1081 | esh_fill_fp_ring(sc, ring); |
1082 | |
1083 | while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) { |
1084 | error = tsleep((void *) di, PCATCH | PRIBIO, "esh_fpread" , 0); |
1085 | #ifdef ESH_PRINTF |
1086 | printf("esh_fpread: ulp %d, tslept %d\n" , ulp, error); |
1087 | #endif |
1088 | if (error) { |
1089 | /* |
1090 | * Remove the buffer entries from the ring; this |
1091 | * is gonna require a DISCARD_PKT command, and |
1092 | * will certainly disrupt things. This is why we |
1093 | * can have only one outstanding read on a ring |
1094 | * at a time. :-( |
1095 | */ |
1096 | |
1097 | printf("esh_fpread: was that a ^C!? error %d, ulp %d\n" , |
1098 | error, ulp); |
1099 | if (error == EINTR || error == ERESTART) |
1100 | error = 0; |
1101 | if ((di->ed_flags & ESH_DI_BUSY) != 0) { |
1102 | esh_flush_fp_ring(sc, ring, di); |
1103 | error = EINTR; |
1104 | break; |
1105 | } |
1106 | } |
1107 | } |
1108 | |
1109 | if (error == 0 && di->ed_error != 0) |
1110 | error = EIO; |
1111 | |
1112 | /* |
1113 | * How do we let the caller know how much has been read? |
1114 | * Adjust the uio_resid stuff!? |
1115 | */ |
1116 | |
1117 | assert(uio->uio_resid >= di->ed_read_len); |
1118 | |
1119 | uio->uio_resid -= di->ed_read_len; |
1120 | for (i = 0; i < uio->uio_iovcnt; i++) { |
1121 | iovp = &uio->uio_iov[i]; |
1122 | uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len); |
1123 | } |
1124 | esh_free_dmainfo(sc, di); |
1125 | |
1126 | fpread_done: |
1127 | #ifdef ESH_PRINTF |
1128 | printf("esh_fpread: ulp %d, error %d\n" , ulp, error); |
1129 | #endif |
1130 | splx(s); |
1131 | return error; |
1132 | } |
1133 | |
1134 | |
1135 | int |
1136 | esh_fpwrite(dev_t dev, struct uio *uio, int ioflag) |
1137 | { |
1138 | struct lwp *l = curlwp; |
1139 | struct proc *p = l->l_proc; |
1140 | struct iovec *iovp; |
1141 | struct esh_softc *sc; |
1142 | struct esh_send_ring_ctl *ring; |
1143 | struct esh_dmainfo *di; |
1144 | int ulp = ESHULP(dev); |
1145 | int error; |
1146 | int len; |
1147 | int i; |
1148 | int s; |
1149 | |
1150 | #ifdef ESH_PRINTF |
1151 | printf("esh_fpwrite: dev %x\n" , dev); |
1152 | #endif |
1153 | |
1154 | sc = device_lookup_private(&esh_cd, ESHUNIT(dev)); |
1155 | if (sc == NULL || ulp == HIPPI_ULP_802) |
1156 | return (ENXIO); |
1157 | |
1158 | s = splnet(); |
1159 | |
1160 | ring = &sc->sc_send; |
1161 | |
1162 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
1163 | error = ENXIO; |
1164 | goto fpwrite_done; |
1165 | } |
1166 | |
1167 | /* Check for validity */ |
1168 | for (i = 0; i < uio->uio_iovcnt; i++) { |
1169 | if (((u_long) uio->uio_iov[i].iov_base & 3) != 0 || |
1170 | (i < uio->uio_iovcnt - 1 && |
1171 | (uio->uio_iov[i].iov_len & 3) != 0)) { |
1172 | error = EFAULT; |
1173 | goto fpwrite_done; |
1174 | } |
1175 | } |
1176 | |
1177 | /* Lock down the pages */ |
1178 | for (i = 0; i < uio->uio_iovcnt; i++) { |
1179 | iovp = &uio->uio_iov[i]; |
1180 | error = uvm_vslock(p->p_vmspace, iovp->iov_base, iovp->iov_len, |
1181 | VM_PROT_READ); |
1182 | if (error) { |
1183 | /* Unlock what we've locked so far. */ |
1184 | for (--i; i >= 0; i--) { |
1185 | iovp = &uio->uio_iov[i]; |
1186 | uvm_vsunlock(p->p_vmspace, iovp->iov_base, |
1187 | iovp->iov_len); |
1188 | } |
1189 | goto fpwrite_done; |
1190 | } |
1191 | } |
1192 | |
1193 | /* |
1194 | * Perform preliminary DMA mapping and throw the buffers |
1195 | * onto the queue to be sent. |
1196 | */ |
1197 | |
1198 | di = esh_new_dmainfo(sc); |
1199 | if (di == NULL) { |
1200 | error = ENOMEM; |
1201 | goto fpwrite_done; |
1202 | } |
1203 | di->ed_buf = NULL; |
1204 | di->ed_error = 0; |
1205 | |
1206 | #ifdef ESH_PRINTF |
1207 | printf("esh_fpwrite: uio offset %qd, resid %d, iovcnt %d\n" , |
1208 | uio->uio_offset, uio->uio_resid, uio->uio_iovcnt); |
1209 | #endif |
1210 | |
1211 | error = bus_dmamap_load_uio(sc->sc_dmat, di->ed_dma, |
1212 | uio, BUS_DMA_WRITE|BUS_DMA_WAITOK); |
1213 | if (error) { |
1214 | aprint_error_dev(sc->sc_dev, "esh_fpwrite: bus_dmamap_load_uio " |
1215 | "failed\terror code %d\n" , |
1216 | error); |
1217 | error = ENOBUFS; |
1218 | esh_free_dmainfo(sc, di); |
1219 | goto fpwrite_done; |
1220 | } |
1221 | |
1222 | bus_dmamap_sync(sc->sc_dmat, di->ed_dma, |
1223 | 0, di->ed_dma->dm_mapsize, |
1224 | BUS_DMASYNC_PREWRITE); |
1225 | |
1226 | #ifdef ESH_PRINTF |
1227 | printf("esh_fpwrite: di %p, nsegs %d, uio len %d\n" , |
1228 | di, di->ed_dma->dm_nsegs, uio->uio_resid); |
1229 | #endif |
1230 | |
1231 | len = di->ed_dma->dm_mapsize; |
1232 | di->ed_flags |= ESH_DI_BUSY; |
1233 | |
1234 | TAILQ_INSERT_TAIL(&ring->ec_di_queue, di, ed_list); |
1235 | eshstart(&sc->sc_if); |
1236 | |
1237 | while ((di->ed_flags & ESH_DI_BUSY) != 0 && error == 0) { |
1238 | error = tsleep((void *) di, PRIBIO, "esh_fpwrite" , 0); |
1239 | #ifdef ESH_PRINTF |
1240 | printf("esh_fpwrite: tslept %d\n" , error); |
1241 | #endif |
1242 | if (error) { |
1243 | printf("esh_fpwrite: was that a ^C!? Shouldn't be! Error %d\n" , |
1244 | error); |
1245 | if (error == EINTR || error == ERESTART) |
1246 | error = 0; |
1247 | if ((di->ed_flags & ESH_DI_BUSY) != 0) { |
1248 | panic("interrupted eshwrite!" ); |
1249 | #if 0 |
1250 | /* Better do *something* here! */ |
1251 | esh_flush_send_ring(sc, di); |
1252 | #endif |
1253 | error = EINTR; |
1254 | break; |
1255 | } |
1256 | } |
1257 | } |
1258 | |
1259 | if (error == 0 && di->ed_error != 0) |
1260 | error = EIO; |
1261 | |
1262 | /* |
1263 | * How do we let the caller know how much has been written? |
1264 | * Adjust the uio_resid stuff!? |
1265 | */ |
1266 | |
1267 | uio->uio_resid -= len; |
1268 | uio->uio_offset += len; |
1269 | |
1270 | for (i = 0; i < uio->uio_iovcnt; i++) { |
1271 | iovp = &uio->uio_iov[i]; |
1272 | uvm_vsunlock(p->p_vmspace, iovp->iov_base, iovp->iov_len); |
1273 | } |
1274 | |
1275 | esh_free_dmainfo(sc, di); |
1276 | |
1277 | fpwrite_done: |
1278 | #ifdef ESH_PRINTF |
1279 | printf("esh_fpwrite: error %d\n" , error); |
1280 | #endif |
1281 | splx(s); |
1282 | return error; |
1283 | } |
1284 | |
1285 | void |
1286 | esh_fpstrategy(struct buf *bp) |
1287 | { |
1288 | struct esh_softc *sc; |
1289 | int ulp = ESHULP(bp->b_dev); |
1290 | int error = 0; |
1291 | int s; |
1292 | |
1293 | #ifdef ESH_PRINTF |
1294 | printf("esh_fpstrategy: starting, bcount %ld, flags %lx, dev %x\n" |
1295 | "\tunit %x, ulp %d\n" , |
1296 | bp->b_bcount, bp->b_flags, bp->b_dev, unit, ulp); |
1297 | #endif |
1298 | |
1299 | sc = device_lookup_private(&esh_cd, ESHUNIT(bp->b_dev)); |
1300 | |
1301 | s = splnet(); |
1302 | if (sc == NULL || ulp == HIPPI_ULP_802) { |
1303 | bp->b_error = ENXIO; |
1304 | goto done; |
1305 | } |
1306 | |
1307 | if (bp->b_bcount == 0) |
1308 | goto done; |
1309 | |
1310 | #define UP_FLAGS (ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP) |
1311 | |
1312 | if ((sc->sc_flags & UP_FLAGS) != UP_FLAGS) { |
1313 | bp->b_error = EBUSY; |
1314 | goto done; |
1315 | } |
1316 | #undef UP_FLAGS |
1317 | |
1318 | if (bp->b_flags & B_READ) { |
1319 | /* |
1320 | * Perform preliminary DMA mapping and throw the buffers |
1321 | * onto the queue to be sent. |
1322 | */ |
1323 | |
1324 | struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[ulp]; |
1325 | struct esh_dmainfo *di = esh_new_dmainfo(sc); |
1326 | |
1327 | if (di == NULL) { |
1328 | bp->b_error = ENOMEM; |
1329 | goto done; |
1330 | } |
1331 | di->ed_buf = bp; |
1332 | error = bus_dmamap_load(sc->sc_dmat, di->ed_dma, |
1333 | bp->b_data, bp->b_bcount, |
1334 | bp->b_proc, |
1335 | BUS_DMA_READ|BUS_DMA_WAITOK); |
1336 | if (error) { |
1337 | aprint_error_dev(sc->sc_dev, "esh_fpstrategy: " |
1338 | "bus_dmamap_load " |
1339 | "failed\terror code %d\n" , |
1340 | error); |
1341 | bp->b_error = ENOBUFS; |
1342 | esh_free_dmainfo(sc, di); |
1343 | goto done; |
1344 | } |
1345 | |
1346 | bus_dmamap_sync(sc->sc_dmat, di->ed_dma, |
1347 | 0, di->ed_dma->dm_mapsize, |
1348 | BUS_DMASYNC_PREREAD); |
1349 | |
1350 | #ifdef ESH_PRINTF |
1351 | printf("fpstrategy: di %p\n" , di); |
1352 | #endif |
1353 | |
1354 | TAILQ_INSERT_TAIL(&ring->ec_queue, di, ed_list); |
1355 | esh_fill_fp_ring(sc, ring); |
1356 | } else { |
1357 | /* |
1358 | * Queue up the buffer for future sending. If the card |
1359 | * isn't already transmitting, give it a kick. |
1360 | */ |
1361 | |
1362 | struct esh_send_ring_ctl *ring = &sc->sc_send; |
1363 | bufq_put(ring->ec_buf_queue, bp); |
1364 | #ifdef ESH_PRINTF |
1365 | printf("esh_fpstrategy: ready to call eshstart to write!\n" ); |
1366 | #endif |
1367 | eshstart(&sc->sc_if); |
1368 | } |
1369 | splx(s); |
1370 | return; |
1371 | |
1372 | done: |
1373 | splx(s); |
1374 | #ifdef ESH_PRINTF |
1375 | printf("esh_fpstrategy: failing, bp->b_error %d!\n" , |
1376 | bp->b_error); |
1377 | #endif |
1378 | biodone(bp); |
1379 | } |
1380 | |
1381 | /* |
1382 | * Handle interrupts. This is basicly event handling code; version two |
1383 | * firmware tries to speed things up by just telling us the location |
1384 | * of the producer and consumer indices, rather than sending us an event. |
1385 | */ |
1386 | |
1387 | int |
1388 | eshintr(void *arg) |
1389 | { |
1390 | struct esh_softc *sc = arg; |
1391 | bus_space_tag_t iot = sc->sc_iot; |
1392 | bus_space_handle_t ioh = sc->sc_ioh; |
1393 | struct ifnet *ifp = &sc->sc_if; |
1394 | u_int32_t rc_offsets; |
1395 | u_int32_t misc_host_ctl; |
1396 | int rc_send_consumer = 0; /* shut up compiler */ |
1397 | int rc_snap_ring_consumer = 0; /* ditto */ |
1398 | u_int8_t fp_ring_consumer[RR_MAX_RECV_RING]; |
1399 | int start_consumer; |
1400 | int ret = 0; |
1401 | |
1402 | int okay = 0; |
1403 | int blah = 0; |
1404 | char sbuf[100]; |
1405 | char t[100]; |
1406 | |
1407 | |
1408 | /* Check to see if this is our interrupt. */ |
1409 | |
1410 | misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL); |
1411 | if ((misc_host_ctl & RR_MH_INTERRUPT) == 0) |
1412 | return 0; |
1413 | |
1414 | /* If we can't do anything with the interrupt, just drop it */ |
1415 | |
1416 | if (sc->sc_flags == 0) |
1417 | return 1; |
1418 | |
1419 | rc_offsets = bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER); |
1420 | sc->sc_event_producer = rc_offsets & 0xff; |
1421 | if (sc->sc_version == 2) { |
1422 | int i; |
1423 | |
1424 | sbuf[0] = '\0'; |
1425 | strlcat(sbuf, "rc: " , sizeof(sbuf)); |
1426 | rc_send_consumer = (rc_offsets >> 8) & 0xff; |
1427 | rc_snap_ring_consumer = (rc_offsets >> 16) & 0xff; |
1428 | for (i = 0; i < RR_MAX_RECV_RING; i += 4) { |
1429 | rc_offsets = |
1430 | bus_space_read_4(iot, ioh, |
1431 | RR_RUNCODE_RECV_CONS + i); |
1432 | /* XXX: should do this right! */ |
1433 | NTOHL(rc_offsets); |
1434 | *((u_int32_t *) &fp_ring_consumer[i]) = rc_offsets; |
1435 | snprintf(t, sizeof(t), "%.8x|" , rc_offsets); |
1436 | strlcat(sbuf, t, sizeof(sbuf)); |
1437 | } |
1438 | } |
1439 | start_consumer = sc->sc_event_consumer; |
1440 | |
1441 | /* Take care of synchronizing DMA with entries we read... */ |
1442 | |
1443 | esh_dma_sync(sc, sc->sc_event_ring, |
1444 | start_consumer, sc->sc_event_producer, |
1445 | RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0, |
1446 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1447 | |
1448 | while (sc->sc_event_consumer != sc->sc_event_producer) { |
1449 | struct rr_event *event = |
1450 | &sc->sc_event_ring[sc->sc_event_consumer]; |
1451 | |
1452 | #ifdef ESH_PRINTF |
1453 | if (event->re_code != RR_EC_WATCHDOG && |
1454 | event->re_code != RR_EC_STATS_UPDATE && |
1455 | event->re_code != RR_EC_SET_CMD_CONSUMER) { |
1456 | printf("%s: event code %x, ring %d, index %d\n" , |
1457 | device_xname(sc->sc_dev), event->re_code, |
1458 | event->re_ring, event->re_index); |
1459 | if (okay == 0) |
1460 | printf("%s\n" , sbuf); |
1461 | okay = 1; |
1462 | } |
1463 | #endif |
1464 | ret = 1; /* some action was taken by card */ |
1465 | |
1466 | switch(event->re_code) { |
1467 | case RR_EC_RUNCODE_UP: |
1468 | printf("%s: firmware up\n" , device_xname(sc->sc_dev)); |
1469 | sc->sc_flags |= ESH_FL_RUNCODE_UP; |
1470 | esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0); |
1471 | esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0); |
1472 | #ifdef ESH_PRINTF |
1473 | eshstatus(sc); |
1474 | #endif |
1475 | if ((ifp->if_flags & IFF_UP) != 0) |
1476 | esh_init_snap_ring(sc); |
1477 | if (sc->sc_fp_rings > 0) |
1478 | esh_init_fp_rings(sc); |
1479 | |
1480 | /* |
1481 | * XXX: crank up FP rings that might be |
1482 | * in use after a reset! |
1483 | */ |
1484 | wakeup((void *) sc); |
1485 | break; |
1486 | |
1487 | case RR_EC_WATCHDOG: |
1488 | /* |
1489 | * Record the watchdog event. |
1490 | * This is checked by eshwatchdog |
1491 | */ |
1492 | |
1493 | sc->sc_watchdog = 1; |
1494 | break; |
1495 | |
1496 | case RR_EC_SET_CMD_CONSUMER: |
1497 | sc->sc_cmd_consumer = event->re_index; |
1498 | break; |
1499 | |
1500 | case RR_EC_LINK_ON: |
1501 | printf("%s: link up\n" , device_xname(sc->sc_dev)); |
1502 | sc->sc_flags |= ESH_FL_LINK_UP; |
1503 | |
1504 | esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0); |
1505 | esh_send_cmd(sc, RR_CC_UPDATE_STATS, 0, 0); |
1506 | if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) { |
1507 | /* |
1508 | * Interface is now `running', with no |
1509 | * output active. |
1510 | */ |
1511 | ifp->if_flags |= IFF_RUNNING; |
1512 | ifp->if_flags &= ~IFF_OACTIVE; |
1513 | |
1514 | /* Attempt to start output, if any. */ |
1515 | } |
1516 | eshstart(ifp); |
1517 | break; |
1518 | |
1519 | case RR_EC_LINK_OFF: |
1520 | sc->sc_flags &= ~ESH_FL_LINK_UP; |
1521 | printf("%s: link down\n" , device_xname(sc->sc_dev)); |
1522 | break; |
1523 | |
1524 | /* |
1525 | * These are all unexpected. We need to handle all |
1526 | * of them, though. |
1527 | */ |
1528 | |
1529 | case RR_EC_INVALID_CMD: |
1530 | case RR_EC_INTERNAL_ERROR: |
1531 | case RR2_EC_INTERNAL_ERROR: |
1532 | case RR_EC_BAD_SEND_RING: |
1533 | case RR_EC_BAD_SEND_BUF: |
1534 | case RR_EC_BAD_SEND_DESC: |
1535 | case RR_EC_RECV_RING_FLUSH: |
1536 | case RR_EC_RECV_ERROR_INFO: |
1537 | case RR_EC_BAD_RECV_BUF: |
1538 | case RR_EC_BAD_RECV_DESC: |
1539 | case RR_EC_BAD_RECV_RING: |
1540 | case RR_EC_UNIMPLEMENTED: |
1541 | aprint_error_dev(sc->sc_dev, "unexpected event %x;" |
1542 | "shutting down interface\n" , |
1543 | event->re_code); |
1544 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1545 | sc->sc_flags = ESH_FL_CRASHED; |
1546 | #ifdef ESH_PRINTF |
1547 | eshstatus(sc); |
1548 | #endif |
1549 | break; |
1550 | |
1551 | #define CALLOUT(a) case a: \ |
1552 | printf("%s: Event " #a " received -- " \ |
1553 | "ring %d index %d timestamp %x\n", \ |
1554 | device_xname(sc->sc_dev), event->re_ring, event->re_index, \ |
1555 | event->re_timestamp); \ |
1556 | break; |
1557 | |
1558 | CALLOUT(RR_EC_NO_RING_FOR_ULP); |
1559 | CALLOUT(RR_EC_REJECTING); /* dropping packets */ |
1560 | #undef CALLOUT |
1561 | |
1562 | /* Send events */ |
1563 | |
1564 | case RR_EC_PACKET_SENT: /* not used in firmware 2.x */ |
1565 | ifp->if_opackets++; |
1566 | /* FALLTHROUGH */ |
1567 | |
1568 | case RR_EC_SET_SND_CONSUMER: |
1569 | assert(sc->sc_version == 1); |
1570 | /* FALLTHROUGH */ |
1571 | |
1572 | case RR_EC_SEND_RING_LOW: |
1573 | eshstart_cleanup(sc, event->re_index, 0); |
1574 | break; |
1575 | |
1576 | |
1577 | case RR_EC_CONN_REJECT: |
1578 | case RR_EC_CAMPON_TIMEOUT: |
1579 | case RR_EC_CONN_TIMEOUT: |
1580 | case RR_EC_DISCONN_ERR: |
1581 | case RR_EC_INTERNAL_PARITY: |
1582 | case RR_EC_TX_IDLE: |
1583 | case RR_EC_SEND_LINK_OFF: |
1584 | eshstart_cleanup(sc, event->re_index, event->re_code); |
1585 | break; |
1586 | |
1587 | /* Receive events */ |
1588 | |
1589 | case RR_EC_RING_ENABLED: |
1590 | if (event->re_ring == HIPPI_ULP_802) { |
1591 | rc_snap_ring_consumer = 0; /* prevent read */ |
1592 | sc->sc_flags |= ESH_FL_SNAP_RING_UP; |
1593 | esh_fill_snap_ring(sc); |
1594 | |
1595 | if (sc->sc_flags & ESH_FL_LINK_UP) { |
1596 | /* |
1597 | * Interface is now `running', with no |
1598 | * output active. |
1599 | */ |
1600 | ifp->if_flags |= IFF_RUNNING; |
1601 | ifp->if_flags &= ~IFF_OACTIVE; |
1602 | |
1603 | /* Attempt to start output, if any. */ |
1604 | |
1605 | eshstart(ifp); |
1606 | } |
1607 | #ifdef ESH_PRINTF |
1608 | if (event->re_index != 0) |
1609 | printf("ENABLE snap ring -- index %d instead of 0!\n" , |
1610 | event->re_index); |
1611 | #endif |
1612 | } else { |
1613 | struct esh_fp_ring_ctl *ring = |
1614 | sc->sc_fp_recv[event->re_ring]; |
1615 | |
1616 | sc->sc_flags |= ESH_FL_FP_RING_UP; |
1617 | #ifdef ESH_PRINTF |
1618 | printf("eshintr: FP ring %d up\n" , |
1619 | event->re_ring); |
1620 | #endif |
1621 | |
1622 | sc->sc_fp_recv_index[event->re_index] = ring; |
1623 | ring->ec_index = event->re_index; |
1624 | wakeup((void *) &ring->ec_ulp); |
1625 | } |
1626 | break; |
1627 | |
1628 | case RR_EC_RING_DISABLED: |
1629 | #ifdef ESH_PRINTF |
1630 | printf("eshintr: disabling ring %d\n" , |
1631 | event->re_ring); |
1632 | #endif |
1633 | if (event->re_ring == HIPPI_ULP_802) { |
1634 | struct rr_ring_ctl *ring = |
1635 | sc->sc_recv_ring_table + HIPPI_ULP_802; |
1636 | memset(ring, 0, sizeof(*ring)); |
1637 | sc->sc_flags &= ~ESH_FL_CLOSING_SNAP; |
1638 | sc->sc_flags &= ~ESH_FL_SNAP_RING_UP; |
1639 | while (sc->sc_snap_recv.ec_consumer |
1640 | != sc->sc_snap_recv.ec_producer) { |
1641 | u_int16_t offset = sc->sc_snap_recv.ec_consumer; |
1642 | |
1643 | bus_dmamap_unload(sc->sc_dmat, |
1644 | sc->sc_snap_recv.ec_dma[offset]); |
1645 | m_free(sc->sc_snap_recv.ec_m[offset]); |
1646 | sc->sc_snap_recv.ec_m[offset] = NULL; |
1647 | sc->sc_snap_recv.ec_consumer = |
1648 | NEXT_RECV(sc->sc_snap_recv.ec_consumer); |
1649 | } |
1650 | sc->sc_snap_recv.ec_consumer = |
1651 | rc_snap_ring_consumer; |
1652 | sc->sc_snap_recv.ec_producer = |
1653 | rc_snap_ring_consumer; |
1654 | wakeup((void *) &sc->sc_snap_recv); |
1655 | } else { |
1656 | struct esh_fp_ring_ctl *recv = |
1657 | sc->sc_fp_recv[event->re_ring]; |
1658 | assert(recv != NULL); |
1659 | recv->ec_consumer = recv->ec_producer = |
1660 | fp_ring_consumer[recv->ec_index]; |
1661 | recv->ec_index = -1; |
1662 | wakeup((void *) &recv->ec_index); |
1663 | } |
1664 | break; |
1665 | |
1666 | case RR_EC_RING_ENABLE_ERR: |
1667 | if (event->re_ring == HIPPI_ULP_802) { |
1668 | aprint_error_dev(sc->sc_dev, "unable to enable SNAP ring!?\n\t" |
1669 | "shutting down interface\n" ); |
1670 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1671 | #ifdef ESH_PRINTF |
1672 | eshstatus(sc); |
1673 | #endif |
1674 | } else { |
1675 | /* |
1676 | * If we just leave the ring index as-is, |
1677 | * the driver will figure out that |
1678 | * we failed to open the ring. |
1679 | */ |
1680 | wakeup((void *) &(sc->sc_fp_recv[event->re_ring]->ec_ulp)); |
1681 | } |
1682 | break; |
1683 | |
1684 | case RR_EC_PACKET_DISCARDED: |
1685 | /* |
1686 | * Determine the dmainfo for the current packet |
1687 | * we just discarded and wake up the waiting |
1688 | * process. |
1689 | * |
1690 | * This should never happen on the network ring! |
1691 | */ |
1692 | |
1693 | if (event->re_ring == HIPPI_ULP_802) { |
1694 | aprint_error_dev(sc->sc_dev, "discard on SNAP ring!?\n\t" |
1695 | "shutting down interface\n" ); |
1696 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1697 | sc->sc_flags = ESH_FL_CRASHED; |
1698 | } else { |
1699 | struct esh_fp_ring_ctl *ring = |
1700 | sc->sc_fp_recv[event->re_ring]; |
1701 | struct esh_dmainfo *di = |
1702 | ring->ec_cur_dmainfo; |
1703 | |
1704 | if (di == NULL) |
1705 | di = ring->ec_dmainfo[ring->ec_producer]; |
1706 | printf("eshintr: DISCARD: index %d," |
1707 | "ring prod %d, di %p, ring[index] %p\n" , |
1708 | event->re_index, ring->ec_producer, di, |
1709 | ring->ec_dmainfo[event->re_index]); |
1710 | |
1711 | if (di == NULL) |
1712 | di = ring->ec_dmainfo[event->re_index]; |
1713 | |
1714 | if (di == NULL) { |
1715 | printf("eshintr: DISCARD: NULL di, skipping...\n" ); |
1716 | break; |
1717 | } |
1718 | |
1719 | di->ed_flags &= |
1720 | ~(ESH_DI_READING | ESH_DI_BUSY); |
1721 | wakeup((void *) &di->ed_flags); |
1722 | } |
1723 | break; |
1724 | |
1725 | case RR_EC_OUT_OF_BUF: |
1726 | case RR_EC_RECV_RING_OUT: |
1727 | case RR_EC_RECV_RING_LOW: |
1728 | break; |
1729 | |
1730 | case RR_EC_SET_RECV_CONSUMER: |
1731 | case RR_EC_PACKET_RECVED: |
1732 | if (event->re_ring == HIPPI_ULP_802) |
1733 | esh_read_snap_ring(sc, event->re_index, 0); |
1734 | else if (sc->sc_fp_recv[event->re_ring] != NULL) |
1735 | esh_read_fp_ring(sc, event->re_index, 0, |
1736 | event->re_ring); |
1737 | break; |
1738 | |
1739 | case RR_EC_RECV_IDLE: |
1740 | case RR_EC_PARITY_ERR: |
1741 | case RR_EC_LLRC_ERR: |
1742 | case RR_EC_PKT_LENGTH_ERR: |
1743 | case RR_EC_IP_HDR_CKSUM_ERR: |
1744 | case RR_EC_DATA_CKSUM_ERR: |
1745 | case RR_EC_SHORT_BURST_ERR: |
1746 | case RR_EC_RECV_LINK_OFF: |
1747 | case RR_EC_FLAG_SYNC_ERR: |
1748 | case RR_EC_FRAME_ERR: |
1749 | case RR_EC_STATE_TRANS_ERR: |
1750 | case RR_EC_NO_READY_PULSE: |
1751 | if (event->re_ring == HIPPI_ULP_802) { |
1752 | esh_read_snap_ring(sc, event->re_index, |
1753 | event->re_code); |
1754 | } else { |
1755 | struct esh_fp_ring_ctl *r; |
1756 | |
1757 | r = sc->sc_fp_recv[event->re_ring]; |
1758 | if (r) |
1759 | r->ec_error = event->re_code; |
1760 | } |
1761 | break; |
1762 | |
1763 | /* |
1764 | * Statistics events can be ignored for now. They might become |
1765 | * necessary if we have to deliver stats on demand, rather than |
1766 | * just returning the statistics block of memory. |
1767 | */ |
1768 | |
1769 | case RR_EC_STATS_UPDATE: |
1770 | case RR_EC_STATS_RETRIEVED: |
1771 | case RR_EC_TRACE: |
1772 | break; |
1773 | |
1774 | default: |
1775 | aprint_error_dev(sc->sc_dev, "Bogus event code %x, " |
1776 | "ring %d, index %d, timestamp %x\n" , |
1777 | event->re_code, |
1778 | event->re_ring, event->re_index, |
1779 | event->re_timestamp); |
1780 | break; |
1781 | } |
1782 | |
1783 | sc->sc_event_consumer = NEXT_EVENT(sc->sc_event_consumer); |
1784 | } |
1785 | |
1786 | /* Do the receive and send ring processing for version 2 RunCode */ |
1787 | |
1788 | if (sc->sc_version == 2) { |
1789 | int i; |
1790 | if (sc->sc_send.ec_consumer != rc_send_consumer) { |
1791 | eshstart_cleanup(sc, rc_send_consumer, 0); |
1792 | ret = 1; |
1793 | blah++; |
1794 | } |
1795 | if (sc->sc_snap_recv.ec_consumer != rc_snap_ring_consumer && |
1796 | (sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0) { |
1797 | esh_read_snap_ring(sc, rc_snap_ring_consumer, 0); |
1798 | ret = 1; |
1799 | blah++; |
1800 | } |
1801 | for (i = 0; i < RR_MAX_RECV_RING; i++) { |
1802 | struct esh_fp_ring_ctl *r = sc->sc_fp_recv_index[i]; |
1803 | |
1804 | if (r != NULL && |
1805 | r->ec_consumer != fp_ring_consumer[i]) { |
1806 | #ifdef ESH_PRINTF |
1807 | printf("eshintr: performed read on ring %d, index %d\n" , |
1808 | r->ec_ulp, i); |
1809 | #endif |
1810 | blah++; |
1811 | esh_read_fp_ring(sc, fp_ring_consumer[i], |
1812 | 0, r->ec_ulp); |
1813 | fp_ring_consumer[i] = r->ec_consumer; |
1814 | } |
1815 | } |
1816 | if (blah != 0 && okay == 0) { |
1817 | okay = 1; |
1818 | #ifdef ESH_PRINTF |
1819 | printf("%s\n" , sbuf); |
1820 | #endif |
1821 | } |
1822 | rc_offsets = (sc->sc_snap_recv.ec_consumer << 16) | |
1823 | (sc->sc_send.ec_consumer << 8) | sc->sc_event_consumer; |
1824 | } else { |
1825 | rc_offsets = sc->sc_event_consumer; |
1826 | } |
1827 | |
1828 | esh_dma_sync(sc, sc->sc_event_ring, |
1829 | start_consumer, sc->sc_event_producer, |
1830 | RR_EVENT_RING_SIZE, sizeof(struct rr_event), 0, |
1831 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1832 | |
1833 | /* Write out new values for the FP segments... */ |
1834 | |
1835 | if (sc->sc_version == 2) { |
1836 | int i; |
1837 | u_int32_t u; |
1838 | |
1839 | sbuf[0] = '\0'; |
1840 | strlcat(sbuf, "drv: " , sizeof(sbuf)); |
1841 | for (i = 0; i < RR_MAX_RECV_RING; i += 4) { |
1842 | /* XXX: should do this right! */ |
1843 | u = *((u_int32_t *) &fp_ring_consumer[i]); |
1844 | snprintf(t, sizeof(t), "%.8x|" , u); |
1845 | strlcat(sbuf, t, sizeof(sbuf)); |
1846 | NTOHL(u); |
1847 | bus_space_write_4(iot, ioh, |
1848 | RR_DRIVER_RECV_CONS + i, u); |
1849 | } |
1850 | #ifdef ESH_PRINTF |
1851 | if (okay == 1) |
1852 | printf("%s\n" , sbuf); |
1853 | #endif |
1854 | |
1855 | sbuf[0] = '\0'; |
1856 | strlcat(sbuf, "rcn: " , sizeof(sbuf)); |
1857 | for (i = 0; i < RR_MAX_RECV_RING; i += 4) { |
1858 | u = bus_space_read_4(iot, ioh, |
1859 | RR_RUNCODE_RECV_CONS + i); |
1860 | /* XXX: should do this right! */ |
1861 | NTOHL(u); |
1862 | snprintf(t, sizeof(t), "%.8x|" , u); |
1863 | strlcat(sbuf, t, sizeof(sbuf)); |
1864 | } |
1865 | #ifdef ESH_PRINTF |
1866 | if (okay == 1) |
1867 | printf("%s\n" , sbuf); |
1868 | #endif |
1869 | } |
1870 | |
1871 | /* Clear interrupt */ |
1872 | bus_space_write_4(iot, ioh, RR_EVENT_CONSUMER, rc_offsets); |
1873 | |
1874 | return (ret); |
1875 | } |
1876 | |
1877 | |
1878 | /* |
1879 | * Start output on the interface. Always called at splnet(). |
1880 | * Check to see if there are any mbufs that didn't get sent the |
1881 | * last time this was called. If there are none, get more mbufs |
1882 | * and send 'em. |
1883 | * |
1884 | * For now, we only send one packet at a time. |
1885 | */ |
1886 | |
1887 | void |
1888 | eshstart(struct ifnet *ifp) |
1889 | { |
1890 | struct esh_softc *sc = ifp->if_softc; |
1891 | struct esh_send_ring_ctl *send = &sc->sc_send; |
1892 | struct mbuf *m = NULL; |
1893 | int error; |
1894 | |
1895 | /* Don't transmit if interface is busy or not running */ |
1896 | |
1897 | #ifdef ESH_PRINTF |
1898 | printf("eshstart: ready to look; flags %x\n" , sc->sc_flags); |
1899 | #endif |
1900 | |
1901 | #define LINK_UP_FLAGS (ESH_FL_LINK_UP | ESH_FL_INITIALIZED | ESH_FL_RUNCODE_UP) |
1902 | if ((sc->sc_flags & LINK_UP_FLAGS) != LINK_UP_FLAGS) |
1903 | return; |
1904 | #undef LINK_UP_FLAGS |
1905 | |
1906 | #ifdef ESH_PRINTF |
1907 | if (esh_check(sc)) |
1908 | return; |
1909 | #endif |
1910 | |
1911 | /* If we have sent the current packet, get another */ |
1912 | |
1913 | while ((sc->sc_flags & ESH_FL_SNAP_RING_UP) != 0 && |
1914 | (m = send->ec_cur_mbuf) == NULL && send->ec_cur_buf == NULL && |
1915 | send->ec_cur_dmainfo == NULL) { |
1916 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1917 | if (m == 0) /* not really needed */ |
1918 | break; |
1919 | |
1920 | if (ifp->if_bpf) { |
1921 | /* |
1922 | * On output, the raw packet has a eight-byte CCI |
1923 | * field prepended. On input, there is no such field. |
1924 | * The bpf expects the packet to look the same in both |
1925 | * places, so we temporarily lop off the prepended CCI |
1926 | * field here, then replace it. Ugh. |
1927 | * |
1928 | * XXX: Need to use standard mbuf manipulation |
1929 | * functions, first mbuf may be less than |
1930 | * 8 bytes long. |
1931 | */ |
1932 | |
1933 | m->m_len -= 8; |
1934 | m->m_data += 8; |
1935 | m->m_pkthdr.len -= 8; |
1936 | bpf_mtap(ifp, m); |
1937 | m->m_len += 8; |
1938 | m->m_data -= 8; |
1939 | m->m_pkthdr.len += 8; |
1940 | } |
1941 | |
1942 | send->ec_len = m->m_pkthdr.len; |
1943 | m = send->ec_cur_mbuf = esh_adjust_mbufs(sc, m); |
1944 | if (m == NULL) |
1945 | continue; |
1946 | |
1947 | error = bus_dmamap_load_mbuf(sc->sc_dmat, send->ec_dma, |
1948 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
1949 | if (error) |
1950 | panic("%s: eshstart: " |
1951 | "bus_dmamap_load_mbuf failed err %d\n" , |
1952 | device_xname(sc->sc_dev), error); |
1953 | send->ec_offset = 0; |
1954 | } |
1955 | |
1956 | /* |
1957 | * If there are no network packets to send, see if there |
1958 | * are any FP packets to send. |
1959 | * |
1960 | * XXX: Some users may disagree with these priorities; |
1961 | * this reduces network latency by increasing FP latency... |
1962 | * Note that it also means that FP packets can get |
1963 | * locked out so that they *never* get sent, if the |
1964 | * network constantly fills up the pipe. Not good! |
1965 | */ |
1966 | |
1967 | if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 && |
1968 | send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL && |
1969 | send->ec_cur_dmainfo == NULL && |
1970 | bufq_peek(send->ec_buf_queue) != NULL) { |
1971 | struct buf *bp; |
1972 | |
1973 | #ifdef ESH_PRINTF |
1974 | printf("eshstart: getting a buf from send->ec_queue %p\n" , |
1975 | send->ec_queue); |
1976 | #endif |
1977 | |
1978 | bp = send->ec_cur_buf = bufq_get(send->ec_buf_queue); |
1979 | send->ec_offset = 0; |
1980 | send->ec_len = bp->b_bcount; |
1981 | |
1982 | /* |
1983 | * Determine the DMA mapping for the buffer. |
1984 | * If this is too large, what do we do!? |
1985 | */ |
1986 | |
1987 | error = bus_dmamap_load(sc->sc_dmat, send->ec_dma, |
1988 | bp->b_data, bp->b_bcount, |
1989 | bp->b_proc, |
1990 | BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
1991 | |
1992 | if (error) |
1993 | panic("%s: eshstart: " |
1994 | "bus_dmamap_load failed err %d\n" , |
1995 | device_xname(sc->sc_dev), error); |
1996 | } |
1997 | |
1998 | /* |
1999 | * If there are no packets from strategy to send, see if there |
2000 | * are any FP packets to send from fpwrite. |
2001 | */ |
2002 | |
2003 | if ((sc->sc_flags & ESH_FL_FP_RING_UP) != 0 && |
2004 | send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL && |
2005 | send->ec_cur_dmainfo == NULL) { |
2006 | struct esh_dmainfo *di; |
2007 | |
2008 | di = TAILQ_FIRST(&send->ec_di_queue); |
2009 | if (di == NULL) |
2010 | return; |
2011 | TAILQ_REMOVE(&send->ec_di_queue, di, ed_list); |
2012 | |
2013 | #ifdef ESH_PRINTF |
2014 | printf("eshstart: getting a di from send->ec_di_queue %p\n" , |
2015 | &send->ec_di_queue); |
2016 | #endif |
2017 | |
2018 | send->ec_cur_dmainfo = di; |
2019 | send->ec_offset = 0; |
2020 | send->ec_len = di->ed_dma->dm_mapsize; |
2021 | } |
2022 | |
2023 | if (send->ec_cur_mbuf == NULL && send->ec_cur_buf == NULL && |
2024 | send->ec_cur_dmainfo == NULL) |
2025 | return; |
2026 | |
2027 | assert(send->ec_len); |
2028 | assert(send->ec_dma->dm_nsegs || |
2029 | send->ec_cur_dmainfo->ed_dma->dm_nsegs); |
2030 | assert(send->ec_cur_mbuf || send->ec_cur_buf || send->ec_cur_dmainfo); |
2031 | |
2032 | esh_send(sc); |
2033 | return; |
2034 | } |
2035 | |
2036 | |
2037 | /* |
2038 | * Put the buffers from the send dmamap into the descriptors and |
2039 | * send 'em off... |
2040 | */ |
2041 | |
2042 | static void |
2043 | esh_send(struct esh_softc *sc) |
2044 | { |
2045 | struct esh_send_ring_ctl *send = &sc->sc_send; |
2046 | u_int start_producer = send->ec_producer; |
2047 | bus_dmamap_t dma; |
2048 | |
2049 | if (send->ec_cur_dmainfo != NULL) |
2050 | dma = send->ec_cur_dmainfo->ed_dma; |
2051 | else |
2052 | dma = send->ec_dma; |
2053 | |
2054 | #ifdef ESH_PRINTF |
2055 | printf("esh_send: producer %x consumer %x nsegs %d\n" , |
2056 | send->ec_producer, send->ec_consumer, dma->dm_nsegs); |
2057 | #endif |
2058 | |
2059 | esh_dma_sync(sc, send->ec_descr, send->ec_producer, send->ec_consumer, |
2060 | RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1, |
2061 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2062 | |
2063 | while (NEXT_SEND(send->ec_producer) != send->ec_consumer && |
2064 | send->ec_offset < dma->dm_nsegs) { |
2065 | int offset = send->ec_producer; |
2066 | |
2067 | send->ec_descr[offset].rd_buffer_addr = |
2068 | dma->dm_segs[send->ec_offset].ds_addr; |
2069 | send->ec_descr[offset].rd_length = |
2070 | dma->dm_segs[send->ec_offset].ds_len; |
2071 | send->ec_descr[offset].rd_control = 0; |
2072 | |
2073 | if (send->ec_offset == 0) { |
2074 | /* Start of the dmamap... */ |
2075 | send->ec_descr[offset].rd_control |= |
2076 | RR_CT_PACKET_START; |
2077 | } |
2078 | |
2079 | if (send->ec_offset + 1 == dma->dm_nsegs) { |
2080 | send->ec_descr[offset].rd_control |= RR_CT_PACKET_END; |
2081 | } |
2082 | |
2083 | send->ec_offset++; |
2084 | send->ec_producer = NEXT_SEND(send->ec_producer); |
2085 | } |
2086 | |
2087 | /* |
2088 | * XXX: we could optimize the dmamap_sync to just get what we've |
2089 | * just set up, rather than the whole buffer... |
2090 | */ |
2091 | |
2092 | bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize, |
2093 | BUS_DMASYNC_PREWRITE); |
2094 | esh_dma_sync(sc, send->ec_descr, |
2095 | start_producer, send->ec_consumer, |
2096 | RR_SEND_RING_SIZE, sizeof(struct rr_descr), 1, |
2097 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2098 | |
2099 | #ifdef ESH_PRINTF |
2100 | if (send->ec_offset != dma->dm_nsegs) |
2101 | printf("eshstart: couldn't fit packet in send ring!\n" ); |
2102 | #endif |
2103 | |
2104 | if (sc->sc_version == 1) { |
2105 | esh_send_cmd(sc, RR_CC_SET_SEND_PRODUCER, |
2106 | 0, send->ec_producer); |
2107 | } else { |
2108 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, |
2109 | RR_SEND_PRODUCER, send->ec_producer); |
2110 | } |
2111 | return; |
2112 | } |
2113 | |
2114 | |
2115 | /* |
2116 | * Cleanup for the send routine. When the NIC sends us an event to |
2117 | * let us know that it has consumed our buffers, we need to free the |
2118 | * buffers, and possibly send another packet. |
2119 | */ |
2120 | |
2121 | static void |
2122 | eshstart_cleanup(struct esh_softc *sc, u_int16_t consumer, int error) |
2123 | { |
2124 | struct esh_send_ring_ctl *send = &sc->sc_send; |
2125 | int start_consumer = send->ec_consumer; |
2126 | bus_dmamap_t dma; |
2127 | |
2128 | if (send->ec_cur_dmainfo != NULL) |
2129 | dma = send->ec_cur_dmainfo->ed_dma; |
2130 | else |
2131 | dma = send->ec_dma; |
2132 | |
2133 | #ifdef ESH_PRINTF |
2134 | printf("eshstart_cleanup: consumer %x, send->consumer %x\n" , |
2135 | consumer, send->ec_consumer); |
2136 | #endif |
2137 | |
2138 | esh_dma_sync(sc, send->ec_descr, |
2139 | send->ec_consumer, consumer, |
2140 | RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0, |
2141 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2142 | |
2143 | while (send->ec_consumer != consumer) { |
2144 | assert(dma->dm_nsegs); |
2145 | assert(send->ec_cur_mbuf || send->ec_cur_buf || |
2146 | send->ec_cur_dmainfo); |
2147 | |
2148 | if (send->ec_descr[send->ec_consumer].rd_control & |
2149 | RR_CT_PACKET_END) { |
2150 | #ifdef ESH_PRINT |
2151 | printf("eshstart_cleanup: dmamap_sync mapsize %d\n" , |
2152 | send->ec_dma->dm_mapsize); |
2153 | #endif |
2154 | bus_dmamap_sync(sc->sc_dmat, dma, 0, dma->dm_mapsize, |
2155 | BUS_DMASYNC_POSTWRITE); |
2156 | bus_dmamap_unload(sc->sc_dmat, dma); |
2157 | if (send->ec_cur_mbuf) { |
2158 | m_freem(send->ec_cur_mbuf); |
2159 | send->ec_cur_mbuf = NULL; |
2160 | } else if (send->ec_cur_dmainfo) { |
2161 | send->ec_cur_dmainfo->ed_flags &= ~ESH_DI_BUSY; |
2162 | send->ec_cur_dmainfo->ed_error = |
2163 | (send->ec_error ? send->ec_error : error); |
2164 | send->ec_error = 0; |
2165 | wakeup((void *) send->ec_cur_dmainfo); |
2166 | send->ec_cur_dmainfo = NULL; |
2167 | } else if (send->ec_cur_buf) { |
2168 | biodone(send->ec_cur_buf); |
2169 | send->ec_cur_buf = NULL; |
2170 | } else { |
2171 | panic("%s: eshstart_cleanup: " |
2172 | "no current mbuf, buf, or dmainfo!\n" , |
2173 | device_xname(sc->sc_dev)); |
2174 | } |
2175 | |
2176 | /* |
2177 | * Version 1 of the firmware sent an event each |
2178 | * time it sent out a packet. Later versions do not |
2179 | * (which results in a considerable speedup), so we |
2180 | * have to keep track here. |
2181 | */ |
2182 | |
2183 | if (sc->sc_version != 1) |
2184 | sc->sc_if.if_opackets++; |
2185 | } |
2186 | if (error != 0) |
2187 | send->ec_error = error; |
2188 | |
2189 | send->ec_consumer = NEXT_SEND(send->ec_consumer); |
2190 | } |
2191 | |
2192 | esh_dma_sync(sc, send->ec_descr, |
2193 | start_consumer, consumer, |
2194 | RR_SEND_RING_SIZE, sizeof(struct rr_descr), 0, |
2195 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2196 | |
2197 | eshstart(&sc->sc_if); |
2198 | } |
2199 | |
2200 | |
2201 | /* |
2202 | * XXX: Ouch: The NIC can only send word-aligned buffers, and only |
2203 | * the last buffer in the packet can have a length that is not |
2204 | * a multiple of four! |
2205 | * |
2206 | * Here we traverse the packet, pick out the bogus mbufs, and fix 'em |
2207 | * if possible. The fix is amazingly expensive, so we sure hope that |
2208 | * this is a rare occurance (it seems to be). |
2209 | */ |
2210 | |
2211 | static struct mbuf * |
2212 | esh_adjust_mbufs(struct esh_softc *sc, struct mbuf *m) |
2213 | { |
2214 | struct mbuf *m0, *n, *n0; |
2215 | u_int32_t write_len; |
2216 | |
2217 | write_len = m->m_pkthdr.len; |
2218 | #ifdef DIAGNOSTIC |
2219 | if (write_len > max_write_len) |
2220 | max_write_len = write_len; |
2221 | #endif |
2222 | |
2223 | for (n0 = n = m; n; n = n->m_next) { |
2224 | while (n && n->m_len == 0) { |
2225 | m0 = m_free(n); |
2226 | if (n == m) |
2227 | n = n0 = m = m0; |
2228 | else |
2229 | n = n0->m_next = m0; |
2230 | } |
2231 | if (n == NULL) |
2232 | break; |
2233 | |
2234 | if (mtod(n, long) & 3 || (n->m_next && n->m_len & 3)) { |
2235 | /* Gotta clean it up */ |
2236 | struct mbuf *o; |
2237 | u_int32_t len; |
2238 | |
2239 | sc->sc_misaligned_bufs++; |
2240 | MGETHDR(o, M_DONTWAIT, MT_DATA); |
2241 | if (!o) |
2242 | goto bogosity; |
2243 | |
2244 | MCLGET(o, M_DONTWAIT); |
2245 | if (!(o->m_flags & M_EXT)) { |
2246 | m0 = m_free(o); |
2247 | goto bogosity; |
2248 | } |
2249 | |
2250 | /* |
2251 | * XXX: Copy as much as we can into the |
2252 | * cluster. For now we can't have more |
2253 | * than a cluster in there. May change. |
2254 | * I'd prefer not to get this |
2255 | * down-n-dirty, but we have to be able |
2256 | * to do this kind of funky copy. |
2257 | */ |
2258 | |
2259 | len = min(MCLBYTES, write_len); |
2260 | #ifdef DIAGNOSTIC |
2261 | assert(n->m_len <= len); |
2262 | assert(len <= MCLBYTES); |
2263 | #endif |
2264 | |
2265 | m_copydata(n, 0, len, mtod(o, void *)); |
2266 | o->m_pkthdr.len = len; |
2267 | m_adj(n, len); |
2268 | o->m_len = len; |
2269 | o->m_next = n; |
2270 | |
2271 | if (n == m) |
2272 | m = o; |
2273 | else |
2274 | n0->m_next = o; |
2275 | n = o; |
2276 | } |
2277 | n0 = n; |
2278 | write_len -= n->m_len; |
2279 | } |
2280 | return m; |
2281 | |
2282 | bogosity: |
2283 | aprint_error_dev(sc->sc_dev, "esh_adjust_mbuf: unable to allocate cluster for " |
2284 | "mbuf %p, len %x\n" , |
2285 | mtod(m, void *), m->m_len); |
2286 | m_freem(m); |
2287 | return NULL; |
2288 | } |
2289 | |
2290 | |
2291 | /* |
2292 | * Read in the current valid entries from the ring and forward |
2293 | * them to the upper layer protocols. It is possible that we |
2294 | * haven't received the whole packet yet, in which case we just |
2295 | * add each of the buffers into the packet until we have the whole |
2296 | * thing. |
2297 | */ |
2298 | |
2299 | static void |
2300 | esh_read_snap_ring(struct esh_softc *sc, u_int16_t consumer, int error) |
2301 | { |
2302 | struct ifnet *ifp = &sc->sc_if; |
2303 | struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv; |
2304 | int start_consumer = recv->ec_consumer; |
2305 | u_int16_t control; |
2306 | |
2307 | if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0) |
2308 | return; |
2309 | |
2310 | if (error) |
2311 | recv->ec_error = error; |
2312 | |
2313 | esh_dma_sync(sc, recv->ec_descr, |
2314 | start_consumer, consumer, |
2315 | RR_SNAP_RECV_RING_SIZE, |
2316 | sizeof(struct rr_descr), 0, |
2317 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2318 | |
2319 | while (recv->ec_consumer != consumer) { |
2320 | u_int16_t offset = recv->ec_consumer; |
2321 | struct mbuf *m; |
2322 | |
2323 | m = recv->ec_m[offset]; |
2324 | m->m_len = recv->ec_descr[offset].rd_length; |
2325 | control = recv->ec_descr[offset].rd_control; |
2326 | bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, m->m_len, |
2327 | BUS_DMASYNC_POSTREAD); |
2328 | bus_dmamap_unload(sc->sc_dmat, recv->ec_dma[offset]); |
2329 | |
2330 | #ifdef ESH_PRINTF |
2331 | printf("esh_read_snap_ring: offset %x addr %p len %x flags %x\n" , |
2332 | offset, mtod(m, void *), m->m_len, control); |
2333 | #endif |
2334 | if (control & RR_CT_PACKET_START || !recv->ec_cur_mbuf) { |
2335 | if (recv->ec_cur_pkt) { |
2336 | m_freem(recv->ec_cur_pkt); |
2337 | recv->ec_cur_pkt = NULL; |
2338 | printf("%s: possible skipped packet!\n" , |
2339 | device_xname(sc->sc_dev)); |
2340 | } |
2341 | recv->ec_cur_pkt = recv->ec_cur_mbuf = m; |
2342 | /* allocated buffers all have pkthdrs... */ |
2343 | m_set_rcvif(m, ifp); |
2344 | m->m_pkthdr.len = m->m_len; |
2345 | } else { |
2346 | if (!recv->ec_cur_pkt) |
2347 | panic("esh_read_snap_ring: no cur_pkt" ); |
2348 | |
2349 | recv->ec_cur_mbuf->m_next = m; |
2350 | recv->ec_cur_mbuf = m; |
2351 | recv->ec_cur_pkt->m_pkthdr.len += m->m_len; |
2352 | } |
2353 | |
2354 | recv->ec_m[offset] = NULL; |
2355 | recv->ec_descr[offset].rd_length = 0; |
2356 | recv->ec_descr[offset].rd_buffer_addr = 0; |
2357 | |
2358 | /* Note that we can START and END on the same buffer */ |
2359 | |
2360 | if (control & RR_CT_PACKET_END) { /* XXX: RR2_ matches */ |
2361 | m = recv->ec_cur_pkt; |
2362 | if (!error && !recv->ec_error) { |
2363 | /* |
2364 | * We have a complete packet, send it up |
2365 | * the stack... |
2366 | */ |
2367 | ifp->if_ipackets++; |
2368 | |
2369 | /* |
2370 | * Check if there's a BPF listener on this |
2371 | * interface. If so, hand off the raw packet |
2372 | * to BPF. |
2373 | */ |
2374 | bpf_mtap(ifp, m); |
2375 | if ((ifp->if_flags & IFF_RUNNING) == 0) { |
2376 | m_freem(m); |
2377 | } else { |
2378 | m = m_pullup(m, |
2379 | sizeof(struct hippi_header)); |
2380 | if_percpuq_enqueue(ifp->if_percpuq, m); |
2381 | } |
2382 | } else { |
2383 | ifp->if_ierrors++; |
2384 | recv->ec_error = 0; |
2385 | m_freem(m); |
2386 | } |
2387 | recv->ec_cur_pkt = recv->ec_cur_mbuf = NULL; |
2388 | } |
2389 | |
2390 | recv->ec_descr[offset].rd_control = 0; |
2391 | recv->ec_consumer = NEXT_RECV(recv->ec_consumer); |
2392 | } |
2393 | |
2394 | esh_dma_sync(sc, recv->ec_descr, |
2395 | start_consumer, consumer, |
2396 | RR_SNAP_RECV_RING_SIZE, |
2397 | sizeof(struct rr_descr), 0, |
2398 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2399 | |
2400 | esh_fill_snap_ring(sc); |
2401 | } |
2402 | |
2403 | |
2404 | /* |
2405 | * Add the SNAP (IEEE 802) receive ring to the NIC. It is possible |
2406 | * that we are doing this after resetting the card, in which case |
2407 | * the structures have already been filled in and we may need to |
2408 | * resume sending data. |
2409 | */ |
2410 | |
2411 | static void |
2412 | esh_init_snap_ring(struct esh_softc *sc) |
2413 | { |
2414 | struct rr_ring_ctl *ring = sc->sc_recv_ring_table + HIPPI_ULP_802; |
2415 | |
2416 | if ((sc->sc_flags & ESH_FL_CLOSING_SNAP) != 0) { |
2417 | aprint_error_dev(sc->sc_dev, "can't reopen SNAP ring until ring disable is completed\n" ); |
2418 | return; |
2419 | } |
2420 | |
2421 | if (ring->rr_entry_size == 0) { |
2422 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
2423 | (char *) ring - (char *) sc->sc_dma_addr, |
2424 | sizeof(*ring), |
2425 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2426 | |
2427 | ring->rr_ring_addr = sc->sc_snap_recv_ring_dma; |
2428 | ring->rr_free_bufs = RR_SNAP_RECV_RING_SIZE / 4; |
2429 | ring->rr_entries = RR_SNAP_RECV_RING_SIZE; |
2430 | ring->rr_entry_size = sizeof(struct rr_descr); |
2431 | ring->rr_prod_index = 0; |
2432 | sc->sc_snap_recv.ec_producer = 0; |
2433 | sc->sc_snap_recv.ec_consumer = 0; |
2434 | ring->rr_mode = RR_RR_IP; |
2435 | |
2436 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
2437 | (char *) ring - (char *) sc->sc_dma_addr, |
2438 | sizeof(ring), |
2439 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2440 | esh_send_cmd(sc, RR_CC_ENABLE_RING, HIPPI_ULP_802, |
2441 | sc->sc_snap_recv.ec_producer); |
2442 | } else { |
2443 | printf("%s: snap receive ring already initialized!\n" , |
2444 | device_xname(sc->sc_dev)); |
2445 | } |
2446 | } |
2447 | |
2448 | static void |
2449 | esh_close_snap_ring(struct esh_softc *sc) |
2450 | { |
2451 | #ifdef ESH_PRINTF |
2452 | printf("esh_close_snap_ring: starting\n" ); |
2453 | #endif |
2454 | |
2455 | if ((sc->sc_flags & ESH_FL_SNAP_RING_UP) == 0) |
2456 | return; |
2457 | |
2458 | sc->sc_flags |= ESH_FL_CLOSING_SNAP; |
2459 | esh_send_cmd(sc, RR_CC_DISABLE_RING, HIPPI_ULP_802, 0); |
2460 | |
2461 | /* Disable event will trigger the rest of the cleanup. */ |
2462 | } |
2463 | |
2464 | /* |
2465 | * Fill in the snap ring with more mbuf buffers so that we can |
2466 | * receive traffic. |
2467 | */ |
2468 | |
2469 | static void |
2470 | esh_fill_snap_ring(struct esh_softc *sc) |
2471 | { |
2472 | struct esh_snap_ring_ctl *recv = &sc->sc_snap_recv; |
2473 | int start_producer = recv->ec_producer; |
2474 | int error; |
2475 | |
2476 | esh_dma_sync(sc, recv->ec_descr, |
2477 | recv->ec_producer, recv->ec_consumer, |
2478 | RR_SNAP_RECV_RING_SIZE, |
2479 | sizeof(struct rr_descr), 1, |
2480 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2481 | |
2482 | while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) { |
2483 | int offset = recv->ec_producer; |
2484 | struct mbuf *m; |
2485 | |
2486 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
2487 | if (!m) |
2488 | break; |
2489 | MCLGET(m, M_DONTWAIT); |
2490 | if ((m->m_flags & M_EXT) == 0) { |
2491 | m_free(m); |
2492 | break; |
2493 | } |
2494 | |
2495 | error = bus_dmamap_load(sc->sc_dmat, recv->ec_dma[offset], |
2496 | mtod(m, void *), MCLBYTES, |
2497 | NULL, BUS_DMA_READ|BUS_DMA_NOWAIT); |
2498 | if (error) { |
2499 | printf("%s: esh_fill_recv_ring: bus_dmamap_load " |
2500 | "failed\toffset %x, error code %d\n" , |
2501 | device_xname(sc->sc_dev), offset, error); |
2502 | m_free(m); |
2503 | break; |
2504 | } |
2505 | |
2506 | /* |
2507 | * In this implementation, we should only see one segment |
2508 | * per DMA. |
2509 | */ |
2510 | |
2511 | assert(recv->ec_dma[offset]->dm_nsegs == 1); |
2512 | |
2513 | /* |
2514 | * Load into the descriptors. |
2515 | */ |
2516 | |
2517 | recv->ec_descr[offset].rd_ring = |
2518 | (sc->sc_version == 1) ? HIPPI_ULP_802 : 0; |
2519 | recv->ec_descr[offset].rd_buffer_addr = |
2520 | recv->ec_dma[offset]->dm_segs->ds_addr; |
2521 | recv->ec_descr[offset].rd_length = |
2522 | recv->ec_dma[offset]->dm_segs->ds_len; |
2523 | recv->ec_descr[offset].rd_control = 0; |
2524 | |
2525 | bus_dmamap_sync(sc->sc_dmat, recv->ec_dma[offset], 0, MCLBYTES, |
2526 | BUS_DMASYNC_PREREAD); |
2527 | |
2528 | recv->ec_m[offset] = m; |
2529 | |
2530 | recv->ec_producer = NEXT_RECV(recv->ec_producer); |
2531 | } |
2532 | |
2533 | esh_dma_sync(sc, recv->ec_descr, |
2534 | start_producer, recv->ec_consumer, |
2535 | RR_SNAP_RECV_RING_SIZE, |
2536 | sizeof(struct rr_descr), 1, |
2537 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2538 | |
2539 | if (sc->sc_version == 1) |
2540 | esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, HIPPI_ULP_802, |
2541 | recv->ec_producer); |
2542 | else |
2543 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, |
2544 | RR_SNAP_RECV_PRODUCER, recv->ec_producer); |
2545 | } |
2546 | |
2547 | static void |
2548 | esh_init_fp_rings(struct esh_softc *sc) |
2549 | { |
2550 | struct esh_fp_ring_ctl *recv; |
2551 | struct rr_ring_ctl *ring_ctl; |
2552 | int ulp; |
2553 | |
2554 | for (ulp = 0; ulp < RR_ULP_COUNT; ulp++) { |
2555 | ring_ctl = &sc->sc_recv_ring_table[ulp]; |
2556 | recv = sc->sc_fp_recv[ulp]; |
2557 | |
2558 | if (recv == NULL) |
2559 | continue; |
2560 | |
2561 | ring_ctl->rr_ring_addr = recv->ec_dma->dm_segs->ds_addr; |
2562 | ring_ctl->rr_free_bufs = RR_FP_RECV_RING_SIZE / 4; |
2563 | ring_ctl->rr_entries = RR_FP_RECV_RING_SIZE; |
2564 | ring_ctl->rr_entry_size = sizeof(struct rr_descr); |
2565 | ring_ctl->rr_prod_index = 0; |
2566 | ring_ctl->rr_mode = RR_RR_CHARACTER; |
2567 | recv->ec_producer = 0; |
2568 | recv->ec_consumer = 0; |
2569 | recv->ec_index = -1; |
2570 | |
2571 | esh_send_cmd(sc, RR_CC_ENABLE_RING, ulp, recv->ec_producer); |
2572 | } |
2573 | } |
2574 | |
2575 | static void |
2576 | esh_read_fp_ring(struct esh_softc *sc, u_int16_t consumer, int error, int ulp) |
2577 | { |
2578 | struct esh_fp_ring_ctl *recv = sc->sc_fp_recv[ulp]; |
2579 | int start_consumer = recv->ec_consumer; |
2580 | u_int16_t control; |
2581 | |
2582 | #ifdef ESH_PRINTF |
2583 | printf("esh_read_fp_ring: ulp %d, consumer %d, producer %d, old consumer %d\n" , |
2584 | recv->ec_ulp, consumer, recv->ec_producer, recv->ec_consumer); |
2585 | #endif |
2586 | if ((sc->sc_flags & ESH_FL_FP_RING_UP) == 0) |
2587 | return; |
2588 | |
2589 | if (error != 0) |
2590 | recv->ec_error = error; |
2591 | |
2592 | esh_dma_sync(sc, recv->ec_descr, |
2593 | start_consumer, consumer, |
2594 | RR_FP_RECV_RING_SIZE, |
2595 | sizeof(struct rr_descr), 0, |
2596 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2597 | |
2598 | while (recv->ec_consumer != consumer) { |
2599 | u_int16_t offset = recv->ec_consumer; |
2600 | |
2601 | control = recv->ec_descr[offset].rd_control; |
2602 | |
2603 | if (control & RR_CT_PACKET_START) { |
2604 | if (recv->ec_read_len) { |
2605 | recv->ec_error = 0; |
2606 | printf("%s: ulp %d: possible skipped FP packet!\n" , |
2607 | device_xname(sc->sc_dev), recv->ec_ulp); |
2608 | } |
2609 | recv->ec_seen_end = 0; |
2610 | recv->ec_read_len = 0; |
2611 | } |
2612 | if (recv->ec_seen_end == 0) |
2613 | recv->ec_read_len += recv->ec_descr[offset].rd_length; |
2614 | |
2615 | #if NOT_LAME |
2616 | recv->ec_descr[offset].rd_length = 0; |
2617 | recv->ec_descr[offset].rd_buffer_addr = 0; |
2618 | #endif |
2619 | |
2620 | #ifdef ESH_PRINTF |
2621 | printf("esh_read_fp_ring: offset %d addr %d len %d flags %x, total %d\n" , |
2622 | offset, recv->ec_descr[offset].rd_buffer_addr, |
2623 | recv->ec_descr[offset].rd_length, control, recv->ec_read_len); |
2624 | #endif |
2625 | /* Note that we can START and END on the same buffer */ |
2626 | |
2627 | if ((control & RR_CT_PACKET_END) == RR_CT_PACKET_END) { |
2628 | if (recv->ec_dmainfo[offset] != NULL) { |
2629 | struct esh_dmainfo *di = |
2630 | recv->ec_dmainfo[offset]; |
2631 | |
2632 | recv->ec_dmainfo[offset] = NULL; |
2633 | bus_dmamap_sync(sc->sc_dmat, di->ed_dma, |
2634 | 0, recv->ec_read_len, |
2635 | BUS_DMASYNC_POSTREAD); |
2636 | bus_dmamap_unload(sc->sc_dmat, di->ed_dma); |
2637 | |
2638 | if (!error && !recv->ec_error) { |
2639 | /* |
2640 | * XXX: we oughta do this right, with full |
2641 | * BPF support and the rest... |
2642 | */ |
2643 | if (di->ed_buf != NULL) { |
2644 | di->ed_buf->b_resid = |
2645 | di->ed_buf->b_bcount - |
2646 | recv->ec_read_len; |
2647 | } else { |
2648 | di->ed_read_len = |
2649 | recv->ec_read_len; |
2650 | } |
2651 | } else { |
2652 | if (di->ed_buf != NULL) { |
2653 | di->ed_buf->b_resid = |
2654 | di->ed_buf->b_bcount; |
2655 | di->ed_buf->b_error = EIO; |
2656 | } else { |
2657 | di->ed_error = EIO; |
2658 | recv->ec_error = 0; |
2659 | } |
2660 | } |
2661 | |
2662 | #ifdef ESH_PRINTF |
2663 | printf("esh_read_fp_ring: ulp %d, read %d, resid %ld\n" , |
2664 | recv->ec_ulp, recv->ec_read_len, (di->ed_buf ? di->ed_buf->b_resid : di->ed_read_len)); |
2665 | #endif |
2666 | di->ed_flags &= |
2667 | ~(ESH_DI_BUSY | ESH_DI_READING); |
2668 | if (di->ed_buf != NULL) |
2669 | biodone(di->ed_buf); |
2670 | else |
2671 | wakeup((void *) di); |
2672 | recv->ec_read_len = 0; |
2673 | } else { |
2674 | #ifdef ESH_PRINTF |
2675 | printf("esh_read_fp_ring: ulp %d, seen end at %d\n" , |
2676 | recv->ec_ulp, offset); |
2677 | #endif |
2678 | recv->ec_seen_end = 1; |
2679 | } |
2680 | } |
2681 | |
2682 | #if NOT_LAME |
2683 | recv->ec_descr[offset].rd_control = 0; |
2684 | #endif |
2685 | recv->ec_consumer = NEXT_RECV(recv->ec_consumer); |
2686 | } |
2687 | |
2688 | esh_dma_sync(sc, recv->ec_descr, |
2689 | start_consumer, consumer, |
2690 | RR_SNAP_RECV_RING_SIZE, |
2691 | sizeof(struct rr_descr), 0, |
2692 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2693 | |
2694 | esh_fill_fp_ring(sc, recv); |
2695 | } |
2696 | |
2697 | |
2698 | static void |
2699 | esh_fill_fp_ring(struct esh_softc *sc, struct esh_fp_ring_ctl *recv) |
2700 | { |
2701 | struct esh_dmainfo *di = recv->ec_cur_dmainfo; |
2702 | int start_producer = recv->ec_producer; |
2703 | |
2704 | #ifdef ESH_PRINTF |
2705 | printf("esh_fill_fp_ring: ulp %d, di %p, producer %d\n" , |
2706 | recv->ec_ulp, di, start_producer); |
2707 | #endif |
2708 | |
2709 | esh_dma_sync(sc, recv->ec_descr, |
2710 | recv->ec_producer, recv->ec_consumer, |
2711 | RR_SNAP_RECV_RING_SIZE, |
2712 | sizeof(struct rr_descr), 1, |
2713 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2714 | |
2715 | while (NEXT_RECV(recv->ec_producer) != recv->ec_consumer) { |
2716 | int offset = recv->ec_producer; |
2717 | |
2718 | if (di == NULL) { |
2719 | /* |
2720 | * Must allow only one reader at a time; see |
2721 | * esh_flush_fp_ring(). |
2722 | */ |
2723 | |
2724 | if (offset != start_producer) |
2725 | goto fp_fill_done; |
2726 | |
2727 | di = TAILQ_FIRST(&recv->ec_queue); |
2728 | if (di == NULL) |
2729 | goto fp_fill_done; |
2730 | TAILQ_REMOVE(&recv->ec_queue, di, ed_list); |
2731 | recv->ec_offset = 0; |
2732 | recv->ec_cur_dmainfo = di; |
2733 | di->ed_flags |= ESH_DI_READING; |
2734 | #ifdef ESH_PRINTF |
2735 | printf("\toffset %d nsegs %d\n" , |
2736 | recv->ec_offset, di->ed_dma->dm_nsegs); |
2737 | #endif |
2738 | } |
2739 | |
2740 | /* |
2741 | * Load into the descriptors. |
2742 | */ |
2743 | |
2744 | recv->ec_descr[offset].rd_ring = 0; |
2745 | recv->ec_descr[offset].rd_buffer_addr = |
2746 | di->ed_dma->dm_segs[recv->ec_offset].ds_addr; |
2747 | recv->ec_descr[offset].rd_length = |
2748 | di->ed_dma->dm_segs[recv->ec_offset].ds_len; |
2749 | recv->ec_descr[offset].rd_control = 0; |
2750 | recv->ec_dmainfo[offset] = NULL; |
2751 | |
2752 | if (recv->ec_offset == 0) { |
2753 | /* Start of the dmamap... */ |
2754 | recv->ec_descr[offset].rd_control |= |
2755 | RR_CT_PACKET_START; |
2756 | } |
2757 | |
2758 | assert(recv->ec_offset < di->ed_dma->dm_nsegs); |
2759 | |
2760 | recv->ec_offset++; |
2761 | if (recv->ec_offset == di->ed_dma->dm_nsegs) { |
2762 | recv->ec_descr[offset].rd_control |= RR_CT_PACKET_END; |
2763 | recv->ec_dmainfo[offset] = di; |
2764 | di = NULL; |
2765 | recv->ec_offset = 0; |
2766 | recv->ec_cur_dmainfo = NULL; |
2767 | } |
2768 | |
2769 | recv->ec_producer = NEXT_RECV(recv->ec_producer); |
2770 | } |
2771 | |
2772 | fp_fill_done: |
2773 | esh_dma_sync(sc, recv->ec_descr, |
2774 | start_producer, recv->ec_consumer, |
2775 | RR_SNAP_RECV_RING_SIZE, |
2776 | sizeof(struct rr_descr), 1, |
2777 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2778 | |
2779 | |
2780 | if (sc->sc_version == 1) { |
2781 | esh_send_cmd(sc, RR_CC_SET_RECV_PRODUCER, recv->ec_ulp, |
2782 | recv->ec_producer); |
2783 | } else { |
2784 | union { |
2785 | u_int32_t producer; |
2786 | u_int8_t indices[4]; |
2787 | } v; |
2788 | int which; |
2789 | int i; |
2790 | struct esh_fp_ring_ctl *r; |
2791 | |
2792 | which = (recv->ec_index / 4) * 4; |
2793 | #if BAD_PRODUCER |
2794 | v.producer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, |
2795 | RR_RECVS_PRODUCER + which); |
2796 | NTOHL(v.producer); |
2797 | #endif |
2798 | for (i = 0; i < 4; i++) { |
2799 | r = sc->sc_fp_recv_index[i + which]; |
2800 | if (r != NULL) |
2801 | v.indices[i] = r->ec_producer; |
2802 | else |
2803 | v.indices[i] = 0; |
2804 | } |
2805 | #ifdef ESH_PRINTF |
2806 | printf("esh_fill_fp_ring: ulp %d, updating producer %d: %.8x\n" , |
2807 | recv->ec_ulp, which, v.producer); |
2808 | #endif |
2809 | HTONL(v.producer); |
2810 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, |
2811 | RR_RECVS_PRODUCER + which, v.producer); |
2812 | } |
2813 | #ifdef ESH_PRINTF |
2814 | printf("esh_fill_fp_ring: ulp %d, final producer %d\n" , |
2815 | recv->ec_ulp, recv->ec_producer); |
2816 | #endif |
2817 | } |
2818 | |
2819 | /* |
2820 | * When a read is interrupted, we need to flush the buffers out of |
2821 | * the ring; otherwise, a driver error could lock a process up, |
2822 | * with no way to exit. |
2823 | */ |
2824 | |
2825 | static void |
2826 | esh_flush_fp_ring(struct esh_softc *sc, struct esh_fp_ring_ctl *recv, struct esh_dmainfo *di) |
2827 | { |
2828 | int error = 0; |
2829 | |
2830 | /* |
2831 | * If the read request hasn't yet made it to the top of the queue, |
2832 | * just remove it from the queue, and return. |
2833 | */ |
2834 | |
2835 | if ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING) { |
2836 | TAILQ_REMOVE(&recv->ec_queue, di, ed_list); |
2837 | return; |
2838 | } |
2839 | |
2840 | #ifdef ESH_PRINTF |
2841 | printf("esh_flush_fp_ring: di->ed_flags %x, ulp %d, producer %x\n" , |
2842 | di->ed_flags, recv->ec_ulp, recv->ec_producer); |
2843 | #endif |
2844 | |
2845 | /* Now we gotta get tough. Issue a discard packet command */ |
2846 | |
2847 | esh_send_cmd(sc, RR_CC_DISCARD_PKT, recv->ec_ulp, |
2848 | recv->ec_producer - 1); |
2849 | |
2850 | /* Wait for it to finish */ |
2851 | |
2852 | while ((di->ed_flags & ESH_DI_READING) != ESH_DI_READING && |
2853 | error == 0) { |
2854 | error = tsleep((void *) &di->ed_flags, PRIBIO, |
2855 | "esh_flush_fp_ring" , hz); |
2856 | printf("esh_flush_fp_ring: di->ed_flags %x, error %d\n" , |
2857 | di->ed_flags, error); |
2858 | /* |
2859 | * What do I do if this times out or gets interrupted? |
2860 | * Reset the card? I could get an interrupt before |
2861 | * giving it a chance to check. Perhaps I oughta wait |
2862 | * awhile? What about not giving the user a chance |
2863 | * to interrupt, and just expecting a quick answer? |
2864 | * That way I could reset the card if it doesn't |
2865 | * come back right away! |
2866 | */ |
2867 | if (error != 0) { |
2868 | eshreset(sc); |
2869 | break; |
2870 | } |
2871 | } |
2872 | |
2873 | /* XXX: Do we need to clear out the dmainfo pointers */ |
2874 | } |
2875 | |
2876 | |
2877 | int |
2878 | eshioctl(struct ifnet *ifp, u_long cmd, void *data) |
2879 | { |
2880 | int error = 0; |
2881 | struct esh_softc *sc = ifp->if_softc; |
2882 | struct ifaddr *ifa = (struct ifaddr *)data; |
2883 | struct ifdrv *ifd = (struct ifdrv *) data; |
2884 | u_long len; |
2885 | int s; |
2886 | |
2887 | s = splnet(); |
2888 | |
2889 | while (sc->sc_flags & ESH_FL_EEPROM_BUSY) { |
2890 | error = tsleep(&sc->sc_flags, PCATCH | PRIBIO, |
2891 | "esheeprom" , 0); |
2892 | if (error != 0) |
2893 | goto ioctl_done; |
2894 | } |
2895 | |
2896 | switch (cmd) { |
2897 | |
2898 | case SIOCINITIFADDR: |
2899 | ifp->if_flags |= IFF_UP; |
2900 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
2901 | eshinit(sc); |
2902 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
2903 | error = EIO; |
2904 | goto ioctl_done; |
2905 | } |
2906 | } |
2907 | |
2908 | if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP)) |
2909 | == ESH_FL_RUNCODE_UP) { |
2910 | while (sc->sc_flags & ESH_FL_CLOSING_SNAP) { |
2911 | error = tsleep((void *) &sc->sc_snap_recv, |
2912 | PRIBIO, "esh_closing_fp_ring" , |
2913 | hz); |
2914 | if (error != 0) |
2915 | goto ioctl_done; |
2916 | } |
2917 | esh_init_snap_ring(sc); |
2918 | } |
2919 | |
2920 | switch (ifa->ifa_addr->sa_family) { |
2921 | #ifdef INET |
2922 | case AF_INET: |
2923 | /* The driver doesn't really care about IP addresses */ |
2924 | break; |
2925 | #endif |
2926 | default: |
2927 | break; |
2928 | } |
2929 | break; |
2930 | |
2931 | case SIOCSIFFLAGS: |
2932 | if ((error = ifioctl_common(ifp, cmd, data)) != 0) |
2933 | break; |
2934 | if ((ifp->if_flags & IFF_UP) == 0 && |
2935 | (ifp->if_flags & IFF_RUNNING) != 0) { |
2936 | /* |
2937 | * If interface is marked down and it is running, then |
2938 | * stop it. |
2939 | */ |
2940 | |
2941 | ifp->if_flags &= ~IFF_RUNNING; |
2942 | esh_close_snap_ring(sc); |
2943 | while (sc->sc_flags & ESH_FL_CLOSING_SNAP) { |
2944 | error = tsleep((void *) &sc->sc_snap_recv, |
2945 | PRIBIO, "esh_closing_fp_ring" , |
2946 | hz); |
2947 | if (error != 0) |
2948 | goto ioctl_done; |
2949 | } |
2950 | |
2951 | } else if ((ifp->if_flags & IFF_UP) != 0 && |
2952 | (ifp->if_flags & IFF_RUNNING) == 0) { |
2953 | |
2954 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
2955 | eshinit(sc); |
2956 | if ((sc->sc_flags & ESH_FL_INITIALIZED) == 0) { |
2957 | error = EIO; |
2958 | goto ioctl_done; |
2959 | } |
2960 | } |
2961 | |
2962 | if ((sc->sc_flags & (ESH_FL_RUNCODE_UP | ESH_FL_SNAP_RING_UP)) == ESH_FL_RUNCODE_UP) { |
2963 | while (sc->sc_flags & ESH_FL_CLOSING_SNAP) { |
2964 | error = tsleep((void *) &sc->sc_snap_recv, PRIBIO, "esh_closing_fp_ring" , hz); |
2965 | if (error != 0) |
2966 | goto ioctl_done; |
2967 | } |
2968 | esh_init_snap_ring(sc); |
2969 | } |
2970 | } |
2971 | break; |
2972 | |
2973 | case SIOCSDRVSPEC: /* Driver-specific configuration calls */ |
2974 | cmd = ifd->ifd_cmd; |
2975 | len = ifd->ifd_len; |
2976 | data = ifd->ifd_data; |
2977 | |
2978 | esh_generic_ioctl(sc, cmd, data, len, NULL); |
2979 | break; |
2980 | |
2981 | default: |
2982 | error = ether_ioctl(ifp, cmd, data); |
2983 | break; |
2984 | } |
2985 | |
2986 | ioctl_done: |
2987 | splx(s); |
2988 | return (error); |
2989 | } |
2990 | |
2991 | |
2992 | static int |
2993 | esh_generic_ioctl(struct esh_softc *sc, u_long cmd, void *data, |
2994 | u_long len, struct lwp *l) |
2995 | { |
2996 | struct ifnet *ifp = &sc->sc_if; |
2997 | struct rr_eeprom rr_eeprom; |
2998 | bus_space_tag_t iot = sc->sc_iot; |
2999 | bus_space_handle_t ioh = sc->sc_ioh; |
3000 | u_int32_t misc_host_ctl; |
3001 | u_int32_t misc_local_ctl; |
3002 | u_int32_t address; |
3003 | u_int32_t value; |
3004 | u_int32_t offset; |
3005 | u_int32_t length; |
3006 | int error = 0; |
3007 | int i; |
3008 | |
3009 | /* |
3010 | * If we have a LWP pointer, check to make sure that the |
3011 | * user is privileged before performing any destruction operations. |
3012 | */ |
3013 | |
3014 | if (l != NULL) { |
3015 | switch (cmd) { |
3016 | case EIOCGTUNE: |
3017 | case EIOCGEEPROM: |
3018 | case EIOCGSTATS: |
3019 | break; |
3020 | |
3021 | default: |
3022 | error = kauth_authorize_network(l->l_cred, |
3023 | KAUTH_NETWORK_INTERFACE, |
3024 | KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, |
3025 | ifp, KAUTH_ARG(cmd), NULL); |
3026 | if (error) |
3027 | return (error); |
3028 | } |
3029 | } |
3030 | |
3031 | switch (cmd) { |
3032 | case EIOCGTUNE: |
3033 | if (len != sizeof(struct rr_tuning)) |
3034 | error = EMSGSIZE; |
3035 | else { |
3036 | error = copyout((void *) &sc->sc_tune, data, |
3037 | sizeof(struct rr_tuning)); |
3038 | } |
3039 | break; |
3040 | |
3041 | case EIOCSTUNE: |
3042 | if ((ifp->if_flags & IFF_UP) == 0) { |
3043 | if (len != sizeof(struct rr_tuning)) { |
3044 | error = EMSGSIZE; |
3045 | } else { |
3046 | error = copyin(data, (void *) &sc->sc_tune, |
3047 | sizeof(struct rr_tuning)); |
3048 | } |
3049 | } else { |
3050 | error = EBUSY; |
3051 | } |
3052 | break; |
3053 | |
3054 | case EIOCGSTATS: |
3055 | if (len != sizeof(struct rr_stats)) |
3056 | error = EMSGSIZE; |
3057 | else |
3058 | error = copyout((void *) &sc->sc_gen_info->ri_stats, |
3059 | data, sizeof(struct rr_stats)); |
3060 | break; |
3061 | |
3062 | case EIOCGEEPROM: |
3063 | case EIOCSEEPROM: |
3064 | if ((ifp->if_flags & IFF_UP) != 0) { |
3065 | error = EBUSY; |
3066 | break; |
3067 | } |
3068 | |
3069 | if (len != sizeof(struct rr_eeprom)) { |
3070 | error = EMSGSIZE; |
3071 | break; |
3072 | } |
3073 | |
3074 | error = copyin(data, (void *) &rr_eeprom, sizeof(rr_eeprom)); |
3075 | if (error != 0) |
3076 | break; |
3077 | |
3078 | offset = rr_eeprom.ifr_offset; |
3079 | length = rr_eeprom.ifr_length; |
3080 | |
3081 | if (length > RR_EE_MAX_LEN * sizeof(u_int32_t)) { |
3082 | error = EFBIG; |
3083 | break; |
3084 | } |
3085 | |
3086 | if (offset + length > RR_EE_MAX_LEN * sizeof(u_int32_t)) { |
3087 | error = EFAULT; |
3088 | break; |
3089 | } |
3090 | |
3091 | if (offset % 4 || length % 4) { |
3092 | error = EIO; |
3093 | break; |
3094 | } |
3095 | |
3096 | /* Halt the processor (preserve NO_SWAP, if set) */ |
3097 | |
3098 | misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL); |
3099 | bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, |
3100 | (misc_host_ctl & RR_MH_NO_SWAP) | |
3101 | RR_MH_HALT_PROC); |
3102 | |
3103 | /* Make the EEPROM accessible */ |
3104 | |
3105 | misc_local_ctl = bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL); |
3106 | value = misc_local_ctl & |
3107 | ~(RR_LC_FAST_PROM | RR_LC_ADD_SRAM | RR_LC_PARITY_ON); |
3108 | if (cmd == EIOCSEEPROM) /* make writable! */ |
3109 | value |= RR_LC_WRITE_PROM; |
3110 | bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, value); |
3111 | |
3112 | if (cmd == EIOCSEEPROM) { |
3113 | printf("%s: writing EEPROM\n" , device_xname(sc->sc_dev)); |
3114 | sc->sc_flags |= ESH_FL_EEPROM_BUSY; |
3115 | } |
3116 | |
3117 | /* Do that EEPROM voodoo that you do so well... */ |
3118 | |
3119 | address = offset * RR_EE_BYTE_LEN; |
3120 | for (i = 0; i < length; i += 4) { |
3121 | if (cmd == EIOCGEEPROM) { |
3122 | value = esh_read_eeprom(sc, address); |
3123 | address += RR_EE_WORD_LEN; |
3124 | if (copyout(&value, |
3125 | (char *) rr_eeprom.ifr_buffer + i, |
3126 | sizeof(u_int32_t)) != 0) { |
3127 | error = EFAULT; |
3128 | break; |
3129 | } |
3130 | } else { |
3131 | if (copyin((char *) rr_eeprom.ifr_buffer + i, |
3132 | &value, sizeof(u_int32_t)) != 0) { |
3133 | error = EFAULT; |
3134 | break; |
3135 | } |
3136 | if (esh_write_eeprom(sc, address, |
3137 | value) != 0) { |
3138 | error = EIO; |
3139 | break; |
3140 | } |
3141 | |
3142 | /* |
3143 | * Have to give up control now and |
3144 | * then, so sleep for a clock tick. |
3145 | * Might be good to figure out how |
3146 | * long a tick is, so that we could |
3147 | * intelligently chose the frequency |
3148 | * of these pauses. |
3149 | */ |
3150 | |
3151 | if (i % 40 == 0) { |
3152 | tsleep(&sc->sc_flags, |
3153 | PRIBIO, "eshweeprom" , 1); |
3154 | } |
3155 | |
3156 | address += RR_EE_WORD_LEN; |
3157 | } |
3158 | } |
3159 | |
3160 | bus_space_write_4(iot, ioh, RR_MISC_LOCAL_CTL, misc_local_ctl); |
3161 | if (cmd == EIOCSEEPROM) { |
3162 | sc->sc_flags &= ~ESH_FL_EEPROM_BUSY; |
3163 | wakeup(&sc->sc_flags); |
3164 | printf("%s: done writing EEPROM\n" , |
3165 | device_xname(sc->sc_dev)); |
3166 | } |
3167 | break; |
3168 | |
3169 | case EIOCRESET: |
3170 | eshreset(sc); |
3171 | break; |
3172 | |
3173 | default: |
3174 | error = EINVAL; |
3175 | break; |
3176 | } |
3177 | |
3178 | return error; |
3179 | } |
3180 | |
3181 | |
3182 | void |
3183 | eshreset(struct esh_softc *sc) |
3184 | { |
3185 | int s; |
3186 | |
3187 | s = splnet(); |
3188 | eshstop(sc); |
3189 | eshinit(sc); |
3190 | splx(s); |
3191 | } |
3192 | |
3193 | /* |
3194 | * The NIC expects a watchdog command every 10 seconds. If it doesn't |
3195 | * get the watchdog, it figures the host is dead and stops. When it does |
3196 | * get the command, it'll generate a watchdog event to let the host know |
3197 | * that it is still alive. We watch for this. |
3198 | */ |
3199 | |
3200 | void |
3201 | eshwatchdog(struct ifnet *ifp) |
3202 | { |
3203 | struct esh_softc *sc = ifp->if_softc; |
3204 | |
3205 | if (!sc->sc_watchdog) { |
3206 | printf("%s: watchdog timer expired. " |
3207 | "Should reset interface!\n" , |
3208 | device_xname(sc->sc_dev)); |
3209 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
3210 | eshstatus(sc); |
3211 | #if 0 |
3212 | eshstop(sc); /* DON'T DO THIS, it'll clear data we |
3213 | could use to debug it! */ |
3214 | #endif |
3215 | } else { |
3216 | sc->sc_watchdog = 0; |
3217 | |
3218 | esh_send_cmd(sc, RR_CC_WATCHDOG, 0, 0); |
3219 | ifp->if_timer = 5; |
3220 | } |
3221 | } |
3222 | |
3223 | |
3224 | /* |
3225 | * Stop the NIC and throw away packets that have started to be sent, |
3226 | * but didn't make it all the way. Re-adjust the various queue |
3227 | * pointers to account for this. |
3228 | */ |
3229 | |
3230 | void |
3231 | eshstop(struct esh_softc *sc) |
3232 | { |
3233 | struct ifnet *ifp = &sc->sc_if; |
3234 | bus_space_tag_t iot = sc->sc_iot; |
3235 | bus_space_handle_t ioh = sc->sc_ioh; |
3236 | u_int32_t misc_host_ctl; |
3237 | int i; |
3238 | |
3239 | if (!(sc->sc_flags & ESH_FL_INITIALIZED)) |
3240 | return; |
3241 | |
3242 | /* Just shut it all down. This isn't pretty, but it works */ |
3243 | |
3244 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, 0, sc->sc_dma_size, |
3245 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
3246 | |
3247 | misc_host_ctl = bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL); |
3248 | bus_space_write_4(iot, ioh, RR_MISC_HOST_CTL, |
3249 | (misc_host_ctl & RR_MH_NO_SWAP) | RR_MH_HALT_PROC); |
3250 | sc->sc_flags = 0; |
3251 | ifp->if_timer = 0; /* turn off watchdog timer */ |
3252 | |
3253 | while (sc->sc_snap_recv.ec_consumer |
3254 | != sc->sc_snap_recv.ec_producer) { |
3255 | u_int16_t offset = sc->sc_snap_recv.ec_consumer; |
3256 | |
3257 | bus_dmamap_unload(sc->sc_dmat, |
3258 | sc->sc_snap_recv.ec_dma[offset]); |
3259 | m_free(sc->sc_snap_recv.ec_m[offset]); |
3260 | sc->sc_snap_recv.ec_m[offset] = NULL; |
3261 | sc->sc_snap_recv.ec_consumer = |
3262 | NEXT_RECV(sc->sc_snap_recv.ec_consumer); |
3263 | wakeup((void *) &sc->sc_snap_recv); |
3264 | } |
3265 | |
3266 | /* Handle FP rings */ |
3267 | |
3268 | for (i = 0; i < RR_ULP_COUNT; i++) { |
3269 | struct esh_fp_ring_ctl *ring = sc->sc_fp_recv[i]; |
3270 | struct esh_dmainfo *di = NULL; |
3271 | |
3272 | if (ring == NULL) |
3273 | continue; |
3274 | |
3275 | /* Get rid of outstanding buffers */ |
3276 | |
3277 | esh_dma_sync(sc, ring->ec_descr, |
3278 | ring->ec_consumer, ring->ec_producer, |
3279 | RR_FP_RECV_RING_SIZE, sizeof(struct rr_descr), 0, |
3280 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
3281 | |
3282 | while (ring->ec_consumer != ring->ec_producer) { |
3283 | di = ring->ec_dmainfo[ring->ec_consumer]; |
3284 | if (di != NULL) |
3285 | break; |
3286 | ring->ec_consumer = NEXT_RECV(ring->ec_consumer); |
3287 | } |
3288 | if (di == NULL) |
3289 | di = ring->ec_cur_dmainfo; |
3290 | |
3291 | if (di != NULL) { |
3292 | bus_dmamap_unload(sc->sc_dmat, di->ed_dma); |
3293 | di->ed_error = EIO; |
3294 | di->ed_flags = 0; |
3295 | wakeup((void *) &di->ed_flags); /* packet discard */ |
3296 | wakeup((void *) di); /* wait on read */ |
3297 | } |
3298 | wakeup((void *) &ring->ec_ulp); /* ring create */ |
3299 | wakeup((void *) &ring->ec_index); /* ring disable */ |
3300 | } |
3301 | |
3302 | /* XXX: doesn't clear bufs being sent */ |
3303 | |
3304 | bus_dmamap_unload(sc->sc_dmat, sc->sc_send.ec_dma); |
3305 | if (sc->sc_send.ec_cur_mbuf) { |
3306 | m_freem(sc->sc_send.ec_cur_mbuf); |
3307 | } else if (sc->sc_send.ec_cur_buf) { |
3308 | struct buf *bp = sc->sc_send.ec_cur_buf; |
3309 | |
3310 | bp->b_resid = bp->b_bcount; |
3311 | bp->b_error = EIO; |
3312 | biodone(bp); |
3313 | } else if (sc->sc_send.ec_cur_dmainfo) { |
3314 | struct esh_dmainfo *di = sc->sc_send.ec_cur_dmainfo; |
3315 | |
3316 | di->ed_flags &= ~ESH_DI_BUSY; |
3317 | di->ed_error = EIO; |
3318 | wakeup((void *) di); |
3319 | } |
3320 | sc->sc_send.ec_cur_mbuf = NULL; |
3321 | sc->sc_send.ec_cur_buf = NULL; |
3322 | sc->sc_send.ec_cur_dmainfo = NULL; |
3323 | |
3324 | /* |
3325 | * Clear out the index values, since they'll be useless |
3326 | * when we restart. |
3327 | */ |
3328 | |
3329 | memset(sc->sc_fp_recv_index, 0, |
3330 | sizeof(struct esh_fp_ring_ctl *) * RR_MAX_RECV_RING); |
3331 | |
3332 | /* Be sure to wake up any other processes waiting on driver action. */ |
3333 | |
3334 | wakeup(sc); /* Wait on initialization */ |
3335 | wakeup(&sc->sc_flags); /* Wait on EEPROM write */ |
3336 | |
3337 | /* |
3338 | * XXX: I have to come up with a way to avoid handling interrupts |
3339 | * received before this shuts down the card, but processed |
3340 | * afterwards! |
3341 | */ |
3342 | } |
3343 | |
3344 | /* |
3345 | * Read a value from the eeprom. This expects that the NIC has already |
3346 | * been tweaked to put it into the right state for reading from the |
3347 | * EEPROM -- the HALT bit is set in the MISC_HOST_CTL register, |
3348 | * and the FAST_PROM, ADD_SRAM, and PARITY flags have been cleared |
3349 | * in the MISC_LOCAL_CTL register. |
3350 | * |
3351 | * The EEPROM layout is a little weird. There is a valid byte every |
3352 | * eight bytes. Words are then smeared out over 32 bytes. |
3353 | * All addresses listed here are the actual starting addresses. |
3354 | */ |
3355 | |
3356 | static u_int32_t |
3357 | esh_read_eeprom(struct esh_softc *sc, u_int32_t addr) |
3358 | { |
3359 | int i; |
3360 | u_int32_t tmp; |
3361 | u_int32_t value = 0; |
3362 | |
3363 | /* If the offset hasn't been added, add it. Otherwise pass through */ |
3364 | |
3365 | if (!(addr & RR_EE_OFFSET)) |
3366 | addr += RR_EE_OFFSET; |
3367 | |
3368 | for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) { |
3369 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, |
3370 | RR_WINDOW_BASE, addr); |
3371 | tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh, |
3372 | RR_WINDOW_DATA); |
3373 | value = (value << 8) | ((tmp >> 24) & 0xff); |
3374 | } |
3375 | return value; |
3376 | } |
3377 | |
3378 | |
3379 | /* |
3380 | * Write a value to the eeprom. Just like esh_read_eeprom, this routine |
3381 | * expects that the NIC has already been tweaked to put it into the right |
3382 | * state for reading from the EEPROM. Things are further complicated |
3383 | * in that we need to read each byte after we write it to ensure that |
3384 | * the new value has been successfully written. It can take as long |
3385 | * as 1ms (!) to write a byte. |
3386 | */ |
3387 | |
3388 | static int |
3389 | esh_write_eeprom(struct esh_softc *sc, u_int32_t addr, u_int32_t value) |
3390 | { |
3391 | int i, j; |
3392 | u_int32_t shifted_value, tmp = 0; |
3393 | |
3394 | /* If the offset hasn't been added, add it. Otherwise pass through */ |
3395 | |
3396 | if (!(addr & RR_EE_OFFSET)) |
3397 | addr += RR_EE_OFFSET; |
3398 | |
3399 | for (i = 0; i < 4; i++, addr += RR_EE_BYTE_LEN) { |
3400 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, |
3401 | RR_WINDOW_BASE, addr); |
3402 | |
3403 | /* |
3404 | * Get the byte out of value, starting with the top, and |
3405 | * put it into the top byte of the word to write. |
3406 | */ |
3407 | |
3408 | shifted_value = ((value >> ((3 - i) * 8)) & 0xff) << 24; |
3409 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, RR_WINDOW_DATA, |
3410 | shifted_value); |
3411 | for (j = 0; j < 50; j++) { |
3412 | tmp = bus_space_read_4(sc->sc_iot, sc->sc_ioh, |
3413 | RR_WINDOW_DATA); |
3414 | if (tmp == shifted_value) |
3415 | break; |
3416 | delay(500); /* 50us break * 20 = 1ms */ |
3417 | } |
3418 | if (tmp != shifted_value) |
3419 | return -1; |
3420 | } |
3421 | |
3422 | return 0; |
3423 | } |
3424 | |
3425 | |
3426 | /* |
3427 | * Send a command to the NIC. If there is no room in the command ring, |
3428 | * panic. |
3429 | */ |
3430 | |
3431 | static void |
3432 | esh_send_cmd(struct esh_softc *sc, u_int8_t cmd, u_int8_t ring, u_int8_t index) |
3433 | { |
3434 | union rr_cmd c; |
3435 | |
3436 | #define NEXT_CMD(i) (((i) + 0x10 - 1) & 0x0f) |
3437 | |
3438 | c.l = 0; |
3439 | c.b.rc_code = cmd; |
3440 | c.b.rc_ring = ring; |
3441 | c.b.rc_index = index; |
3442 | |
3443 | bus_space_write_4(sc->sc_iot, sc->sc_ioh, |
3444 | RR_COMMAND_RING + sizeof(c) * sc->sc_cmd_producer, |
3445 | c.l); |
3446 | |
3447 | #ifdef ESH_PRINTF |
3448 | /* avoid annoying messages when possible */ |
3449 | if (cmd != RR_CC_WATCHDOG) |
3450 | printf("esh_send_cmd: cmd %x ring %d index %d slot %x\n" , |
3451 | cmd, ring, index, sc->sc_cmd_producer); |
3452 | #endif |
3453 | |
3454 | sc->sc_cmd_producer = NEXT_CMD(sc->sc_cmd_producer); |
3455 | } |
3456 | |
3457 | |
3458 | /* |
3459 | * Write an address to the device. |
3460 | * XXX: This belongs in bus-dependent land! |
3461 | */ |
3462 | |
3463 | static void |
3464 | esh_write_addr(bus_space_tag_t iot, bus_space_handle_t ioh, bus_addr_t addr, bus_addr_t value) |
3465 | { |
3466 | bus_space_write_4(iot, ioh, addr, 0); |
3467 | bus_space_write_4(iot, ioh, addr + sizeof(u_int32_t), value); |
3468 | } |
3469 | |
3470 | |
3471 | /* Copy the RunCode from EEPROM to SRAM. Ughly. */ |
3472 | |
3473 | static void |
3474 | esh_reset_runcode(struct esh_softc *sc) |
3475 | { |
3476 | bus_space_tag_t iot = sc->sc_iot; |
3477 | bus_space_handle_t ioh = sc->sc_ioh; |
3478 | u_int32_t value; |
3479 | u_int32_t len; |
3480 | u_int32_t i; |
3481 | u_int32_t segments; |
3482 | u_int32_t ee_addr; |
3483 | u_int32_t rc_addr; |
3484 | u_int32_t sram_addr; |
3485 | |
3486 | /* Zero the SRAM */ |
3487 | |
3488 | for (i = 0; i < sc->sc_sram_size; i += 4) { |
3489 | bus_space_write_4(iot, ioh, RR_WINDOW_BASE, i); |
3490 | bus_space_write_4(iot, ioh, RR_WINDOW_DATA, 0); |
3491 | } |
3492 | |
3493 | /* Find the address of the segment description section */ |
3494 | |
3495 | rc_addr = esh_read_eeprom(sc, RR_EE_RUNCODE_SEGMENTS); |
3496 | segments = esh_read_eeprom(sc, rc_addr); |
3497 | |
3498 | for (i = 0; i < segments; i++) { |
3499 | rc_addr += RR_EE_WORD_LEN; |
3500 | sram_addr = esh_read_eeprom(sc, rc_addr); |
3501 | rc_addr += RR_EE_WORD_LEN; |
3502 | len = esh_read_eeprom(sc, rc_addr); |
3503 | rc_addr += RR_EE_WORD_LEN; |
3504 | ee_addr = esh_read_eeprom(sc, rc_addr); |
3505 | |
3506 | while (len--) { |
3507 | value = esh_read_eeprom(sc, ee_addr); |
3508 | bus_space_write_4(iot, ioh, RR_WINDOW_BASE, sram_addr); |
3509 | bus_space_write_4(iot, ioh, RR_WINDOW_DATA, value); |
3510 | |
3511 | ee_addr += RR_EE_WORD_LEN; |
3512 | sram_addr += 4; |
3513 | } |
3514 | } |
3515 | } |
3516 | |
3517 | |
3518 | /* |
3519 | * Perform bus DMA syncing operations on various rings. |
3520 | * We have to worry about our relative position in the ring, |
3521 | * and whether the ring has wrapped. All of this code should take |
3522 | * care of those worries. |
3523 | */ |
3524 | |
3525 | static void |
3526 | esh_dma_sync(struct esh_softc *sc, void *mem, int start, int end, int entries, int size, int do_equal, int ops) |
3527 | { |
3528 | int offset = (char *)mem - (char *)sc->sc_dma_addr; |
3529 | |
3530 | if (start < end) { |
3531 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
3532 | offset + start * size, |
3533 | (end - start) * size, ops); |
3534 | } else if (do_equal || start != end) { |
3535 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
3536 | offset, |
3537 | end * size, ops); |
3538 | bus_dmamap_sync(sc->sc_dmat, sc->sc_dma, |
3539 | offset + start * size, |
3540 | (entries - start) * size, ops); |
3541 | } |
3542 | } |
3543 | |
3544 | |
3545 | static struct esh_dmainfo * |
3546 | esh_new_dmainfo(struct esh_softc *sc) |
3547 | { |
3548 | struct esh_dmainfo *di; |
3549 | int s; |
3550 | |
3551 | s = splnet(); |
3552 | |
3553 | di = TAILQ_FIRST(&sc->sc_dmainfo_freelist); |
3554 | if (di != NULL) { |
3555 | TAILQ_REMOVE(&sc->sc_dmainfo_freelist, di, ed_list); |
3556 | sc->sc_dmainfo_freelist_count--; |
3557 | splx(s); |
3558 | return di; |
3559 | } |
3560 | |
3561 | /* None sitting around, so build one now... */ |
3562 | |
3563 | di = (struct esh_dmainfo *) malloc(sizeof(*di), M_DEVBUF, |
3564 | M_WAITOK|M_ZERO); |
3565 | assert(di != NULL); |
3566 | |
3567 | if (bus_dmamap_create(sc->sc_dmat, ESH_MAX_NSEGS * RR_DMA_MAX, |
3568 | ESH_MAX_NSEGS, RR_DMA_MAX, RR_DMA_BOUNDARY, |
3569 | BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, |
3570 | &di->ed_dma)) { |
3571 | printf("%s: failed dmainfo bus_dmamap_create\n" , |
3572 | device_xname(sc->sc_dev)); |
3573 | free(di, M_DEVBUF); |
3574 | di = NULL; |
3575 | } |
3576 | |
3577 | splx(s); |
3578 | return di; |
3579 | } |
3580 | |
3581 | static void |
3582 | esh_free_dmainfo(struct esh_softc *sc, struct esh_dmainfo *di) |
3583 | { |
3584 | int s = splnet(); |
3585 | |
3586 | assert(di != NULL); |
3587 | di->ed_buf = NULL; |
3588 | TAILQ_INSERT_TAIL(&sc->sc_dmainfo_freelist, di, ed_list); |
3589 | sc->sc_dmainfo_freelist_count++; |
3590 | #ifdef ESH_PRINTF |
3591 | printf("esh_free_dmainfo: freelist count %d\n" , sc->sc_dmainfo_freelist_count); |
3592 | #endif |
3593 | |
3594 | splx(s); |
3595 | } |
3596 | |
3597 | |
3598 | /* ------------------------- debugging functions --------------------------- */ |
3599 | |
3600 | /* |
3601 | * Print out status information about the NIC and the driver. |
3602 | */ |
3603 | |
3604 | static int |
3605 | eshstatus(struct esh_softc *sc) |
3606 | { |
3607 | bus_space_tag_t iot = sc->sc_iot; |
3608 | bus_space_handle_t ioh = sc->sc_ioh; |
3609 | int i; |
3610 | |
3611 | /* XXX: This looks pathetic, and should be improved! */ |
3612 | |
3613 | printf("%s: status -- fail1 %x fail2 %x\n" , |
3614 | device_xname(sc->sc_dev), |
3615 | bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL1), |
3616 | bus_space_read_4(iot, ioh, RR_RUNCODE_FAIL2)); |
3617 | printf("\tmisc host ctl %x misc local ctl %x\n" , |
3618 | bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL), |
3619 | bus_space_read_4(iot, ioh, RR_MISC_LOCAL_CTL)); |
3620 | printf("\toperating mode %x event producer %x\n" , |
3621 | bus_space_read_4(iot, ioh, RR_MODE_AND_STATUS), |
3622 | bus_space_read_4(iot, ioh, RR_EVENT_PRODUCER)); |
3623 | printf("\tPC %x max rings %x\n" , |
3624 | bus_space_read_4(iot, ioh, RR_PROC_PC), |
3625 | bus_space_read_4(iot, ioh, RR_MAX_RECV_RINGS)); |
3626 | printf("\tHIPPI tx state %x rx state %x\n" , |
3627 | bus_space_read_4(iot, ioh, RR_TX_STATE), |
3628 | bus_space_read_4(iot, ioh, RR_RX_STATE)); |
3629 | printf("\tDMA write state %x read state %x\n" , |
3630 | bus_space_read_4(iot, ioh, RR_DMA_WRITE_STATE), |
3631 | bus_space_read_4(iot, ioh, RR_DMA_READ_STATE)); |
3632 | printf("\tDMA write addr %x%x read addr %x%x\n" , |
3633 | bus_space_read_4(iot, ioh, RR_WRITE_HOST), |
3634 | bus_space_read_4(iot, ioh, RR_WRITE_HOST + 4), |
3635 | bus_space_read_4(iot, ioh, RR_READ_HOST), |
3636 | bus_space_read_4(iot, ioh, RR_READ_HOST + 4)); |
3637 | |
3638 | for (i = 0; i < 64; i++) |
3639 | if (sc->sc_gen_info->ri_stats.rs_stats[i]) |
3640 | printf("stat %x is %x\n" , i * 4, |
3641 | sc->sc_gen_info->ri_stats.rs_stats[i]); |
3642 | |
3643 | return 0; |
3644 | } |
3645 | |
3646 | |
3647 | #ifdef ESH_PRINTF |
3648 | |
3649 | /* Check to make sure that the NIC is still running */ |
3650 | |
3651 | static int |
3652 | esh_check(struct esh_softc *sc) |
3653 | { |
3654 | bus_space_tag_t iot = sc->sc_iot; |
3655 | bus_space_handle_t ioh = sc->sc_ioh; |
3656 | |
3657 | if (bus_space_read_4(iot, ioh, RR_MISC_HOST_CTL) & RR_MH_HALT_PROC) { |
3658 | printf("esh_check: NIC stopped\n" ); |
3659 | eshstatus(sc); |
3660 | return 1; |
3661 | } else { |
3662 | return 0; |
3663 | } |
3664 | } |
3665 | #endif |
3666 | |
3667 | |