| 1 | /* $NetBSD: i82557.c,v 1.145 2016/06/10 13:27:13 ozaki-r Exp $ */ |
| 2 | |
| 3 | /*- |
| 4 | * Copyright (c) 1997, 1998, 1999, 2001, 2002 The NetBSD Foundation, Inc. |
| 5 | * All rights reserved. |
| 6 | * |
| 7 | * This code is derived from software contributed to The NetBSD Foundation |
| 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
| 9 | * NASA Ames Research Center. |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or without |
| 12 | * modification, are permitted provided that the following conditions |
| 13 | * are met: |
| 14 | * 1. Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * 2. Redistributions in binary form must reproduce the above copyright |
| 17 | * notice, this list of conditions and the following disclaimer in the |
| 18 | * documentation and/or other materials provided with the distribution. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
| 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
| 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
| 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 30 | * POSSIBILITY OF SUCH DAMAGE. |
| 31 | */ |
| 32 | |
| 33 | /* |
| 34 | * Copyright (c) 1995, David Greenman |
| 35 | * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org> |
| 36 | * All rights reserved. |
| 37 | * |
| 38 | * Redistribution and use in source and binary forms, with or without |
| 39 | * modification, are permitted provided that the following conditions |
| 40 | * are met: |
| 41 | * 1. Redistributions of source code must retain the above copyright |
| 42 | * notice unmodified, this list of conditions, and the following |
| 43 | * disclaimer. |
| 44 | * 2. Redistributions in binary form must reproduce the above copyright |
| 45 | * notice, this list of conditions and the following disclaimer in the |
| 46 | * documentation and/or other materials provided with the distribution. |
| 47 | * |
| 48 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 58 | * SUCH DAMAGE. |
| 59 | * |
| 60 | * Id: if_fxp.c,v 1.113 2001/05/17 23:50:24 jlemon |
| 61 | */ |
| 62 | |
| 63 | /* |
| 64 | * Device driver for the Intel i82557 fast Ethernet controller, |
| 65 | * and its successors, the i82558 and i82559. |
| 66 | */ |
| 67 | |
| 68 | #include <sys/cdefs.h> |
| 69 | __KERNEL_RCSID(0, "$NetBSD: i82557.c,v 1.145 2016/06/10 13:27:13 ozaki-r Exp $" ); |
| 70 | |
| 71 | #include <sys/param.h> |
| 72 | #include <sys/systm.h> |
| 73 | #include <sys/callout.h> |
| 74 | #include <sys/mbuf.h> |
| 75 | #include <sys/malloc.h> |
| 76 | #include <sys/kernel.h> |
| 77 | #include <sys/socket.h> |
| 78 | #include <sys/ioctl.h> |
| 79 | #include <sys/errno.h> |
| 80 | #include <sys/device.h> |
| 81 | #include <sys/syslog.h> |
| 82 | #include <sys/proc.h> |
| 83 | |
| 84 | #include <machine/endian.h> |
| 85 | |
| 86 | #include <sys/rndsource.h> |
| 87 | |
| 88 | #include <net/if.h> |
| 89 | #include <net/if_dl.h> |
| 90 | #include <net/if_media.h> |
| 91 | #include <net/if_ether.h> |
| 92 | |
| 93 | #include <netinet/in.h> |
| 94 | #include <netinet/in_systm.h> |
| 95 | #include <netinet/ip.h> |
| 96 | #include <netinet/tcp.h> |
| 97 | #include <netinet/udp.h> |
| 98 | |
| 99 | #include <net/bpf.h> |
| 100 | |
| 101 | #include <sys/bus.h> |
| 102 | #include <sys/intr.h> |
| 103 | |
| 104 | #include <dev/mii/miivar.h> |
| 105 | |
| 106 | #include <dev/ic/i82557reg.h> |
| 107 | #include <dev/ic/i82557var.h> |
| 108 | |
| 109 | #include <dev/microcode/i8255x/rcvbundl.h> |
| 110 | |
| 111 | /* |
| 112 | * NOTE! On the Alpha, we have an alignment constraint. The |
| 113 | * card DMAs the packet immediately following the RFA. However, |
| 114 | * the first thing in the packet is a 14-byte Ethernet header. |
| 115 | * This means that the packet is misaligned. To compensate, |
| 116 | * we actually offset the RFA 2 bytes into the cluster. This |
| 117 | * alignes the packet after the Ethernet header at a 32-bit |
| 118 | * boundary. HOWEVER! This means that the RFA is misaligned! |
| 119 | */ |
| 120 | #define RFA_ALIGNMENT_FUDGE 2 |
| 121 | |
| 122 | /* |
| 123 | * The configuration byte map has several undefined fields which |
| 124 | * must be one or must be zero. Set up a template for these bits |
| 125 | * only (assuming an i82557 chip), leaving the actual configuration |
| 126 | * for fxp_init(). |
| 127 | * |
| 128 | * See the definition of struct fxp_cb_config for the bit definitions. |
| 129 | */ |
| 130 | const uint8_t fxp_cb_config_template[] = { |
| 131 | 0x0, 0x0, /* cb_status */ |
| 132 | 0x0, 0x0, /* cb_command */ |
| 133 | 0x0, 0x0, 0x0, 0x0, /* link_addr */ |
| 134 | 0x0, /* 0 */ |
| 135 | 0x0, /* 1 */ |
| 136 | 0x0, /* 2 */ |
| 137 | 0x0, /* 3 */ |
| 138 | 0x0, /* 4 */ |
| 139 | 0x0, /* 5 */ |
| 140 | 0x32, /* 6 */ |
| 141 | 0x0, /* 7 */ |
| 142 | 0x0, /* 8 */ |
| 143 | 0x0, /* 9 */ |
| 144 | 0x6, /* 10 */ |
| 145 | 0x0, /* 11 */ |
| 146 | 0x0, /* 12 */ |
| 147 | 0x0, /* 13 */ |
| 148 | 0xf2, /* 14 */ |
| 149 | 0x48, /* 15 */ |
| 150 | 0x0, /* 16 */ |
| 151 | 0x40, /* 17 */ |
| 152 | 0xf0, /* 18 */ |
| 153 | 0x0, /* 19 */ |
| 154 | 0x3f, /* 20 */ |
| 155 | 0x5, /* 21 */ |
| 156 | 0x0, /* 22 */ |
| 157 | 0x0, /* 23 */ |
| 158 | 0x0, /* 24 */ |
| 159 | 0x0, /* 25 */ |
| 160 | 0x0, /* 26 */ |
| 161 | 0x0, /* 27 */ |
| 162 | 0x0, /* 28 */ |
| 163 | 0x0, /* 29 */ |
| 164 | 0x0, /* 30 */ |
| 165 | 0x0, /* 31 */ |
| 166 | }; |
| 167 | |
| 168 | void fxp_mii_initmedia(struct fxp_softc *); |
| 169 | void fxp_mii_mediastatus(struct ifnet *, struct ifmediareq *); |
| 170 | |
| 171 | void fxp_80c24_initmedia(struct fxp_softc *); |
| 172 | int fxp_80c24_mediachange(struct ifnet *); |
| 173 | void fxp_80c24_mediastatus(struct ifnet *, struct ifmediareq *); |
| 174 | |
| 175 | void fxp_start(struct ifnet *); |
| 176 | int fxp_ioctl(struct ifnet *, u_long, void *); |
| 177 | void fxp_watchdog(struct ifnet *); |
| 178 | int fxp_init(struct ifnet *); |
| 179 | void fxp_stop(struct ifnet *, int); |
| 180 | |
| 181 | void fxp_txintr(struct fxp_softc *); |
| 182 | int fxp_rxintr(struct fxp_softc *); |
| 183 | |
| 184 | void fxp_rx_hwcksum(struct fxp_softc *,struct mbuf *, |
| 185 | const struct fxp_rfa *, u_int); |
| 186 | |
| 187 | void fxp_rxdrain(struct fxp_softc *); |
| 188 | int fxp_add_rfabuf(struct fxp_softc *, bus_dmamap_t, int); |
| 189 | int fxp_mdi_read(device_t, int, int); |
| 190 | void fxp_statchg(struct ifnet *); |
| 191 | void fxp_mdi_write(device_t, int, int, int); |
| 192 | void fxp_autosize_eeprom(struct fxp_softc*); |
| 193 | void fxp_read_eeprom(struct fxp_softc *, uint16_t *, int, int); |
| 194 | void fxp_write_eeprom(struct fxp_softc *, uint16_t *, int, int); |
| 195 | void fxp_eeprom_update_cksum(struct fxp_softc *); |
| 196 | void fxp_get_info(struct fxp_softc *, uint8_t *); |
| 197 | void fxp_tick(void *); |
| 198 | void fxp_mc_setup(struct fxp_softc *); |
| 199 | void fxp_load_ucode(struct fxp_softc *); |
| 200 | |
| 201 | int fxp_copy_small = 0; |
| 202 | |
| 203 | /* |
| 204 | * Variables for interrupt mitigating microcode. |
| 205 | */ |
| 206 | int fxp_int_delay = 1000; /* usec */ |
| 207 | int fxp_bundle_max = 6; /* packets */ |
| 208 | |
| 209 | struct fxp_phytype { |
| 210 | int fp_phy; /* type of PHY, -1 for MII at the end. */ |
| 211 | void (*fp_init)(struct fxp_softc *); |
| 212 | } fxp_phytype_table[] = { |
| 213 | { FXP_PHY_80C24, fxp_80c24_initmedia }, |
| 214 | { -1, fxp_mii_initmedia }, |
| 215 | }; |
| 216 | |
| 217 | /* |
| 218 | * Set initial transmit threshold at 64 (512 bytes). This is |
| 219 | * increased by 64 (512 bytes) at a time, to maximum of 192 |
| 220 | * (1536 bytes), if an underrun occurs. |
| 221 | */ |
| 222 | static int tx_threshold = 64; |
| 223 | |
| 224 | /* |
| 225 | * Wait for the previous command to be accepted (but not necessarily |
| 226 | * completed). |
| 227 | */ |
| 228 | static inline void |
| 229 | fxp_scb_wait(struct fxp_softc *sc) |
| 230 | { |
| 231 | int i = 10000; |
| 232 | |
| 233 | while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) |
| 234 | delay(2); |
| 235 | if (i == 0) |
| 236 | log(LOG_WARNING, |
| 237 | "%s: WARNING: SCB timed out!\n" , device_xname(sc->sc_dev)); |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Submit a command to the i82557. |
| 242 | */ |
| 243 | static inline void |
| 244 | fxp_scb_cmd(struct fxp_softc *sc, uint8_t cmd) |
| 245 | { |
| 246 | |
| 247 | CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Finish attaching an i82557 interface. Called by bus-specific front-end. |
| 252 | */ |
| 253 | void |
| 254 | fxp_attach(struct fxp_softc *sc) |
| 255 | { |
| 256 | uint8_t enaddr[ETHER_ADDR_LEN]; |
| 257 | struct ifnet *ifp; |
| 258 | bus_dma_segment_t seg; |
| 259 | int rseg, i, error; |
| 260 | struct fxp_phytype *fp; |
| 261 | |
| 262 | callout_init(&sc->sc_callout, 0); |
| 263 | |
| 264 | /* |
| 265 | * Enable use of extended RFDs and IPCBs for 82550 and later chips. |
| 266 | * Note: to use IPCB we need extended TXCB support too, and |
| 267 | * these feature flags should be set in each bus attachment. |
| 268 | */ |
| 269 | if (sc->sc_flags & FXPF_EXT_RFA) { |
| 270 | sc->sc_txcmd = htole16(FXP_CB_COMMAND_IPCBXMIT); |
| 271 | sc->sc_rfa_size = RFA_EXT_SIZE; |
| 272 | } else { |
| 273 | sc->sc_txcmd = htole16(FXP_CB_COMMAND_XMIT); |
| 274 | sc->sc_rfa_size = RFA_SIZE; |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * Allocate the control data structures, and create and load the |
| 279 | * DMA map for it. |
| 280 | */ |
| 281 | if ((error = bus_dmamem_alloc(sc->sc_dmat, |
| 282 | sizeof(struct fxp_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, |
| 283 | 0)) != 0) { |
| 284 | aprint_error_dev(sc->sc_dev, |
| 285 | "unable to allocate control data, error = %d\n" , |
| 286 | error); |
| 287 | goto fail_0; |
| 288 | } |
| 289 | |
| 290 | if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, |
| 291 | sizeof(struct fxp_control_data), (void **)&sc->sc_control_data, |
| 292 | BUS_DMA_COHERENT)) != 0) { |
| 293 | aprint_error_dev(sc->sc_dev, |
| 294 | "unable to map control data, error = %d\n" , error); |
| 295 | goto fail_1; |
| 296 | } |
| 297 | sc->sc_cdseg = seg; |
| 298 | sc->sc_cdnseg = rseg; |
| 299 | |
| 300 | memset(sc->sc_control_data, 0, sizeof(struct fxp_control_data)); |
| 301 | |
| 302 | if ((error = bus_dmamap_create(sc->sc_dmat, |
| 303 | sizeof(struct fxp_control_data), 1, |
| 304 | sizeof(struct fxp_control_data), 0, 0, &sc->sc_dmamap)) != 0) { |
| 305 | aprint_error_dev(sc->sc_dev, |
| 306 | "unable to create control data DMA map, error = %d\n" , |
| 307 | error); |
| 308 | goto fail_2; |
| 309 | } |
| 310 | |
| 311 | if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, |
| 312 | sc->sc_control_data, sizeof(struct fxp_control_data), NULL, |
| 313 | 0)) != 0) { |
| 314 | aprint_error_dev(sc->sc_dev, |
| 315 | "can't load control data DMA map, error = %d\n" , |
| 316 | error); |
| 317 | goto fail_3; |
| 318 | } |
| 319 | |
| 320 | /* |
| 321 | * Create the transmit buffer DMA maps. |
| 322 | */ |
| 323 | for (i = 0; i < FXP_NTXCB; i++) { |
| 324 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
| 325 | (sc->sc_flags & FXPF_EXT_RFA) ? |
| 326 | FXP_IPCB_NTXSEG : FXP_NTXSEG, |
| 327 | MCLBYTES, 0, 0, &FXP_DSTX(sc, i)->txs_dmamap)) != 0) { |
| 328 | aprint_error_dev(sc->sc_dev, |
| 329 | "unable to create tx DMA map %d, error = %d\n" , |
| 330 | i, error); |
| 331 | goto fail_4; |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * Create the receive buffer DMA maps. |
| 337 | */ |
| 338 | for (i = 0; i < FXP_NRFABUFS; i++) { |
| 339 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, |
| 340 | MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { |
| 341 | aprint_error_dev(sc->sc_dev, |
| 342 | "unable to create rx DMA map %d, error = %d\n" , |
| 343 | i, error); |
| 344 | goto fail_5; |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | /* Initialize MAC address and media structures. */ |
| 349 | fxp_get_info(sc, enaddr); |
| 350 | |
| 351 | aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n" , |
| 352 | ether_sprintf(enaddr)); |
| 353 | |
| 354 | ifp = &sc->sc_ethercom.ec_if; |
| 355 | |
| 356 | /* |
| 357 | * Get info about our media interface, and initialize it. Note |
| 358 | * the table terminates itself with a phy of -1, indicating |
| 359 | * that we're using MII. |
| 360 | */ |
| 361 | for (fp = fxp_phytype_table; fp->fp_phy != -1; fp++) |
| 362 | if (fp->fp_phy == sc->phy_primary_device) |
| 363 | break; |
| 364 | (*fp->fp_init)(sc); |
| 365 | |
| 366 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
| 367 | ifp->if_softc = sc; |
| 368 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
| 369 | ifp->if_ioctl = fxp_ioctl; |
| 370 | ifp->if_start = fxp_start; |
| 371 | ifp->if_watchdog = fxp_watchdog; |
| 372 | ifp->if_init = fxp_init; |
| 373 | ifp->if_stop = fxp_stop; |
| 374 | IFQ_SET_READY(&ifp->if_snd); |
| 375 | |
| 376 | if (sc->sc_flags & FXPF_EXT_RFA) { |
| 377 | /* |
| 378 | * Enable hardware cksum support by EXT_RFA and IPCB. |
| 379 | * |
| 380 | * IFCAP_CSUM_IPv4_Tx seems to have a problem, |
| 381 | * at least, on i82550 rev.12. |
| 382 | * specifically, it doesn't set ipv4 checksum properly |
| 383 | * when sending UDP (and probably TCP) packets with |
| 384 | * 20 byte ipv4 header + 1 or 2 byte data, |
| 385 | * though ICMP packets seem working. |
| 386 | * FreeBSD driver has related comments. |
| 387 | * We've added a workaround to handle the bug by padding |
| 388 | * such packets manually. |
| 389 | */ |
| 390 | ifp->if_capabilities = |
| 391 | IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | |
| 392 | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | |
| 393 | IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; |
| 394 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; |
| 395 | } else if (sc->sc_flags & FXPF_82559_RXCSUM) { |
| 396 | ifp->if_capabilities = |
| 397 | IFCAP_CSUM_TCPv4_Rx | |
| 398 | IFCAP_CSUM_UDPv4_Rx; |
| 399 | } |
| 400 | |
| 401 | /* |
| 402 | * We can support 802.1Q VLAN-sized frames. |
| 403 | */ |
| 404 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
| 405 | |
| 406 | /* |
| 407 | * Attach the interface. |
| 408 | */ |
| 409 | if_attach(ifp); |
| 410 | ether_ifattach(ifp, enaddr); |
| 411 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), |
| 412 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
| 413 | |
| 414 | #ifdef FXP_EVENT_COUNTERS |
| 415 | evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, |
| 416 | NULL, device_xname(sc->sc_dev), "txstall" ); |
| 417 | evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, |
| 418 | NULL, device_xname(sc->sc_dev), "txintr" ); |
| 419 | evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, |
| 420 | NULL, device_xname(sc->sc_dev), "rxintr" ); |
| 421 | if (sc->sc_flags & FXPF_FC) { |
| 422 | evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC, |
| 423 | NULL, device_xname(sc->sc_dev), "txpause" ); |
| 424 | evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC, |
| 425 | NULL, device_xname(sc->sc_dev), "rxpause" ); |
| 426 | } |
| 427 | #endif /* FXP_EVENT_COUNTERS */ |
| 428 | |
| 429 | /* The attach is successful. */ |
| 430 | sc->sc_flags |= FXPF_ATTACHED; |
| 431 | |
| 432 | return; |
| 433 | |
| 434 | /* |
| 435 | * Free any resources we've allocated during the failed attach |
| 436 | * attempt. Do this in reverse order and fall though. |
| 437 | */ |
| 438 | fail_5: |
| 439 | for (i = 0; i < FXP_NRFABUFS; i++) { |
| 440 | if (sc->sc_rxmaps[i] != NULL) |
| 441 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmaps[i]); |
| 442 | } |
| 443 | fail_4: |
| 444 | for (i = 0; i < FXP_NTXCB; i++) { |
| 445 | if (FXP_DSTX(sc, i)->txs_dmamap != NULL) |
| 446 | bus_dmamap_destroy(sc->sc_dmat, |
| 447 | FXP_DSTX(sc, i)->txs_dmamap); |
| 448 | } |
| 449 | bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); |
| 450 | fail_3: |
| 451 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); |
| 452 | fail_2: |
| 453 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
| 454 | sizeof(struct fxp_control_data)); |
| 455 | fail_1: |
| 456 | bus_dmamem_free(sc->sc_dmat, &seg, rseg); |
| 457 | fail_0: |
| 458 | return; |
| 459 | } |
| 460 | |
| 461 | void |
| 462 | fxp_mii_initmedia(struct fxp_softc *sc) |
| 463 | { |
| 464 | int flags; |
| 465 | |
| 466 | sc->sc_flags |= FXPF_MII; |
| 467 | |
| 468 | sc->sc_mii.mii_ifp = &sc->sc_ethercom.ec_if; |
| 469 | sc->sc_mii.mii_readreg = fxp_mdi_read; |
| 470 | sc->sc_mii.mii_writereg = fxp_mdi_write; |
| 471 | sc->sc_mii.mii_statchg = fxp_statchg; |
| 472 | |
| 473 | sc->sc_ethercom.ec_mii = &sc->sc_mii; |
| 474 | ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, |
| 475 | fxp_mii_mediastatus); |
| 476 | |
| 477 | flags = MIIF_NOISOLATE; |
| 478 | if (sc->sc_flags & FXPF_FC) |
| 479 | flags |= MIIF_FORCEANEG|MIIF_DOPAUSE; |
| 480 | /* |
| 481 | * The i82557 wedges if all of its PHYs are isolated! |
| 482 | */ |
| 483 | mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, |
| 484 | MII_OFFSET_ANY, flags); |
| 485 | if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { |
| 486 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
| 487 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
| 488 | } else |
| 489 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
| 490 | } |
| 491 | |
| 492 | void |
| 493 | fxp_80c24_initmedia(struct fxp_softc *sc) |
| 494 | { |
| 495 | |
| 496 | /* |
| 497 | * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter |
| 498 | * doesn't have a programming interface of any sort. The |
| 499 | * media is sensed automatically based on how the link partner |
| 500 | * is configured. This is, in essence, manual configuration. |
| 501 | */ |
| 502 | aprint_normal_dev(sc->sc_dev, |
| 503 | "Seeq 80c24 AutoDUPLEX media interface present\n" ); |
| 504 | ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_80c24_mediachange, |
| 505 | fxp_80c24_mediastatus); |
| 506 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); |
| 507 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * Initialize the interface media. |
| 512 | */ |
| 513 | void |
| 514 | fxp_get_info(struct fxp_softc *sc, uint8_t *enaddr) |
| 515 | { |
| 516 | uint16_t data, myea[ETHER_ADDR_LEN / 2]; |
| 517 | |
| 518 | /* |
| 519 | * Reset to a stable state. |
| 520 | */ |
| 521 | CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); |
| 522 | DELAY(100); |
| 523 | |
| 524 | sc->sc_eeprom_size = 0; |
| 525 | fxp_autosize_eeprom(sc); |
| 526 | if (sc->sc_eeprom_size == 0) { |
| 527 | aprint_error_dev(sc->sc_dev, "failed to detect EEPROM size\n" ); |
| 528 | sc->sc_eeprom_size = 6; /* XXX panic here? */ |
| 529 | } |
| 530 | #ifdef DEBUG |
| 531 | aprint_debug_dev(sc->sc_dev, "detected %d word EEPROM\n" , |
| 532 | 1 << sc->sc_eeprom_size); |
| 533 | #endif |
| 534 | |
| 535 | /* |
| 536 | * Get info about the primary PHY |
| 537 | */ |
| 538 | fxp_read_eeprom(sc, &data, 6, 1); |
| 539 | sc->phy_primary_device = |
| 540 | (data & FXP_PHY_DEVICE_MASK) >> FXP_PHY_DEVICE_SHIFT; |
| 541 | |
| 542 | /* |
| 543 | * Read MAC address. |
| 544 | */ |
| 545 | fxp_read_eeprom(sc, myea, 0, 3); |
| 546 | enaddr[0] = myea[0] & 0xff; |
| 547 | enaddr[1] = myea[0] >> 8; |
| 548 | enaddr[2] = myea[1] & 0xff; |
| 549 | enaddr[3] = myea[1] >> 8; |
| 550 | enaddr[4] = myea[2] & 0xff; |
| 551 | enaddr[5] = myea[2] >> 8; |
| 552 | |
| 553 | /* |
| 554 | * Systems based on the ICH2/ICH2-M chip from Intel, as well |
| 555 | * as some i82559 designs, have a defect where the chip can |
| 556 | * cause a PCI protocol violation if it receives a CU_RESUME |
| 557 | * command when it is entering the IDLE state. |
| 558 | * |
| 559 | * The work-around is to disable Dynamic Standby Mode, so that |
| 560 | * the chip never deasserts #CLKRUN, and always remains in the |
| 561 | * active state. |
| 562 | * |
| 563 | * Unfortunately, the only way to disable Dynamic Standby is |
| 564 | * to frob an EEPROM setting and reboot (the EEPROM setting |
| 565 | * is only consulted when the PCI bus comes out of reset). |
| 566 | * |
| 567 | * See Intel 82801BA/82801BAM Specification Update, Errata #30. |
| 568 | */ |
| 569 | if (sc->sc_flags & FXPF_HAS_RESUME_BUG) { |
| 570 | fxp_read_eeprom(sc, &data, 10, 1); |
| 571 | if (data & 0x02) { /* STB enable */ |
| 572 | aprint_error_dev(sc->sc_dev, "WARNING: " |
| 573 | "Disabling dynamic standby mode in EEPROM " |
| 574 | "to work around a\n" ); |
| 575 | aprint_normal_dev(sc->sc_dev, |
| 576 | "WARNING: hardware bug. You must reset " |
| 577 | "the system before using this\n" ); |
| 578 | aprint_normal_dev(sc->sc_dev, "WARNING: interface.\n" ); |
| 579 | data &= ~0x02; |
| 580 | fxp_write_eeprom(sc, &data, 10, 1); |
| 581 | aprint_normal_dev(sc->sc_dev, "new EEPROM ID: 0x%04x\n" , |
| 582 | data); |
| 583 | fxp_eeprom_update_cksum(sc); |
| 584 | } |
| 585 | } |
| 586 | |
| 587 | /* Receiver lock-up workaround detection. (FXPF_RECV_WORKAROUND) */ |
| 588 | /* Due to false positives we make it conditional on setting link1 */ |
| 589 | fxp_read_eeprom(sc, &data, 3, 1); |
| 590 | if ((data & 0x03) != 0x03) { |
| 591 | aprint_verbose_dev(sc->sc_dev, |
| 592 | "May need receiver lock-up workaround\n" ); |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | static void |
| 597 | fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int len) |
| 598 | { |
| 599 | uint16_t reg; |
| 600 | int x; |
| 601 | |
| 602 | for (x = 1 << (len - 1); x != 0; x >>= 1) { |
| 603 | DELAY(40); |
| 604 | if (data & x) |
| 605 | reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; |
| 606 | else |
| 607 | reg = FXP_EEPROM_EECS; |
| 608 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); |
| 609 | DELAY(40); |
| 610 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, |
| 611 | reg | FXP_EEPROM_EESK); |
| 612 | DELAY(40); |
| 613 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); |
| 614 | } |
| 615 | DELAY(40); |
| 616 | } |
| 617 | |
| 618 | /* |
| 619 | * Figure out EEPROM size. |
| 620 | * |
| 621 | * 559's can have either 64-word or 256-word EEPROMs, the 558 |
| 622 | * datasheet only talks about 64-word EEPROMs, and the 557 datasheet |
| 623 | * talks about the existence of 16 to 256 word EEPROMs. |
| 624 | * |
| 625 | * The only known sizes are 64 and 256, where the 256 version is used |
| 626 | * by CardBus cards to store CIS information. |
| 627 | * |
| 628 | * The address is shifted in msb-to-lsb, and after the last |
| 629 | * address-bit the EEPROM is supposed to output a `dummy zero' bit, |
| 630 | * after which follows the actual data. We try to detect this zero, by |
| 631 | * probing the data-out bit in the EEPROM control register just after |
| 632 | * having shifted in a bit. If the bit is zero, we assume we've |
| 633 | * shifted enough address bits. The data-out should be tri-state, |
| 634 | * before this, which should translate to a logical one. |
| 635 | * |
| 636 | * Other ways to do this would be to try to read a register with known |
| 637 | * contents with a varying number of address bits, but no such |
| 638 | * register seem to be available. The high bits of register 10 are 01 |
| 639 | * on the 558 and 559, but apparently not on the 557. |
| 640 | * |
| 641 | * The Linux driver computes a checksum on the EEPROM data, but the |
| 642 | * value of this checksum is not very well documented. |
| 643 | */ |
| 644 | |
| 645 | void |
| 646 | fxp_autosize_eeprom(struct fxp_softc *sc) |
| 647 | { |
| 648 | int x; |
| 649 | |
| 650 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 651 | DELAY(40); |
| 652 | |
| 653 | /* Shift in read opcode. */ |
| 654 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); |
| 655 | |
| 656 | /* |
| 657 | * Shift in address, wait for the dummy zero following a correct |
| 658 | * address shift. |
| 659 | */ |
| 660 | for (x = 1; x <= 8; x++) { |
| 661 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 662 | DELAY(40); |
| 663 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, |
| 664 | FXP_EEPROM_EECS | FXP_EEPROM_EESK); |
| 665 | DELAY(40); |
| 666 | if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & |
| 667 | FXP_EEPROM_EEDO) == 0) |
| 668 | break; |
| 669 | DELAY(40); |
| 670 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 671 | DELAY(40); |
| 672 | } |
| 673 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
| 674 | DELAY(40); |
| 675 | if (x != 6 && x != 8) { |
| 676 | #ifdef DEBUG |
| 677 | printf("%s: strange EEPROM size (%d)\n" , |
| 678 | device_xname(sc->sc_dev), 1 << x); |
| 679 | #endif |
| 680 | } else |
| 681 | sc->sc_eeprom_size = x; |
| 682 | } |
| 683 | |
| 684 | /* |
| 685 | * Read from the serial EEPROM. Basically, you manually shift in |
| 686 | * the read opcode (one bit at a time) and then shift in the address, |
| 687 | * and then you shift out the data (all of this one bit at a time). |
| 688 | * The word size is 16 bits, so you have to provide the address for |
| 689 | * every 16 bits of data. |
| 690 | */ |
| 691 | void |
| 692 | fxp_read_eeprom(struct fxp_softc *sc, uint16_t *data, int offset, int words) |
| 693 | { |
| 694 | uint16_t reg; |
| 695 | int i, x; |
| 696 | |
| 697 | for (i = 0; i < words; i++) { |
| 698 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 699 | |
| 700 | /* Shift in read opcode. */ |
| 701 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); |
| 702 | |
| 703 | /* Shift in address. */ |
| 704 | fxp_eeprom_shiftin(sc, i + offset, sc->sc_eeprom_size); |
| 705 | |
| 706 | reg = FXP_EEPROM_EECS; |
| 707 | data[i] = 0; |
| 708 | |
| 709 | /* Shift out data. */ |
| 710 | for (x = 16; x > 0; x--) { |
| 711 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, |
| 712 | reg | FXP_EEPROM_EESK); |
| 713 | DELAY(40); |
| 714 | if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & |
| 715 | FXP_EEPROM_EEDO) |
| 716 | data[i] |= (1 << (x - 1)); |
| 717 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); |
| 718 | DELAY(40); |
| 719 | } |
| 720 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
| 721 | DELAY(40); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | /* |
| 726 | * Write data to the serial EEPROM. |
| 727 | */ |
| 728 | void |
| 729 | fxp_write_eeprom(struct fxp_softc *sc, uint16_t *data, int offset, int words) |
| 730 | { |
| 731 | int i, j; |
| 732 | |
| 733 | for (i = 0; i < words; i++) { |
| 734 | /* Erase/write enable. */ |
| 735 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 736 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_ERASE, 3); |
| 737 | fxp_eeprom_shiftin(sc, 0x3 << (sc->sc_eeprom_size - 2), |
| 738 | sc->sc_eeprom_size); |
| 739 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
| 740 | DELAY(4); |
| 741 | |
| 742 | /* Shift in write opcode, address, data. */ |
| 743 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 744 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); |
| 745 | fxp_eeprom_shiftin(sc, i + offset, sc->sc_eeprom_size); |
| 746 | fxp_eeprom_shiftin(sc, data[i], 16); |
| 747 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
| 748 | DELAY(4); |
| 749 | |
| 750 | /* Wait for the EEPROM to finish up. */ |
| 751 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 752 | DELAY(4); |
| 753 | for (j = 0; j < 1000; j++) { |
| 754 | if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & |
| 755 | FXP_EEPROM_EEDO) |
| 756 | break; |
| 757 | DELAY(50); |
| 758 | } |
| 759 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
| 760 | DELAY(4); |
| 761 | |
| 762 | /* Erase/write disable. */ |
| 763 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); |
| 764 | fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_ERASE, 3); |
| 765 | fxp_eeprom_shiftin(sc, 0, sc->sc_eeprom_size); |
| 766 | CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); |
| 767 | DELAY(4); |
| 768 | } |
| 769 | } |
| 770 | |
| 771 | /* |
| 772 | * Update the checksum of the EEPROM. |
| 773 | */ |
| 774 | void |
| 775 | fxp_eeprom_update_cksum(struct fxp_softc *sc) |
| 776 | { |
| 777 | int i; |
| 778 | uint16_t data, cksum; |
| 779 | |
| 780 | cksum = 0; |
| 781 | for (i = 0; i < (1 << sc->sc_eeprom_size) - 1; i++) { |
| 782 | fxp_read_eeprom(sc, &data, i, 1); |
| 783 | cksum += data; |
| 784 | } |
| 785 | i = (1 << sc->sc_eeprom_size) - 1; |
| 786 | cksum = 0xbaba - cksum; |
| 787 | fxp_read_eeprom(sc, &data, i, 1); |
| 788 | fxp_write_eeprom(sc, &cksum, i, 1); |
| 789 | log(LOG_INFO, "%s: EEPROM checksum @ 0x%x: 0x%04x -> 0x%04x\n" , |
| 790 | device_xname(sc->sc_dev), i, data, cksum); |
| 791 | } |
| 792 | |
| 793 | /* |
| 794 | * Start packet transmission on the interface. |
| 795 | */ |
| 796 | void |
| 797 | fxp_start(struct ifnet *ifp) |
| 798 | { |
| 799 | struct fxp_softc *sc = ifp->if_softc; |
| 800 | struct mbuf *m0, *m; |
| 801 | struct fxp_txdesc *txd; |
| 802 | struct fxp_txsoft *txs; |
| 803 | bus_dmamap_t dmamap; |
| 804 | int error, lasttx, nexttx, opending, seg, nsegs, len; |
| 805 | |
| 806 | /* |
| 807 | * If we want a re-init, bail out now. |
| 808 | */ |
| 809 | if (sc->sc_flags & FXPF_WANTINIT) { |
| 810 | ifp->if_flags |= IFF_OACTIVE; |
| 811 | return; |
| 812 | } |
| 813 | |
| 814 | if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) |
| 815 | return; |
| 816 | |
| 817 | /* |
| 818 | * Remember the previous txpending and the current lasttx. |
| 819 | */ |
| 820 | opending = sc->sc_txpending; |
| 821 | lasttx = sc->sc_txlast; |
| 822 | |
| 823 | /* |
| 824 | * Loop through the send queue, setting up transmit descriptors |
| 825 | * until we drain the queue, or use up all available transmit |
| 826 | * descriptors. |
| 827 | */ |
| 828 | for (;;) { |
| 829 | struct fxp_tbd *tbdp; |
| 830 | int csum_flags; |
| 831 | |
| 832 | /* |
| 833 | * Grab a packet off the queue. |
| 834 | */ |
| 835 | IFQ_POLL(&ifp->if_snd, m0); |
| 836 | if (m0 == NULL) |
| 837 | break; |
| 838 | m = NULL; |
| 839 | |
| 840 | if (sc->sc_txpending == FXP_NTXCB - 1) { |
| 841 | FXP_EVCNT_INCR(&sc->sc_ev_txstall); |
| 842 | break; |
| 843 | } |
| 844 | |
| 845 | /* |
| 846 | * Get the next available transmit descriptor. |
| 847 | */ |
| 848 | nexttx = FXP_NEXTTX(sc->sc_txlast); |
| 849 | txd = FXP_CDTX(sc, nexttx); |
| 850 | txs = FXP_DSTX(sc, nexttx); |
| 851 | dmamap = txs->txs_dmamap; |
| 852 | |
| 853 | /* |
| 854 | * Load the DMA map. If this fails, the packet either |
| 855 | * didn't fit in the allotted number of frags, or we were |
| 856 | * short on resources. In this case, we'll copy and try |
| 857 | * again. |
| 858 | */ |
| 859 | if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
| 860 | BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { |
| 861 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
| 862 | if (m == NULL) { |
| 863 | log(LOG_ERR, "%s: unable to allocate Tx mbuf\n" , |
| 864 | device_xname(sc->sc_dev)); |
| 865 | break; |
| 866 | } |
| 867 | MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); |
| 868 | if (m0->m_pkthdr.len > MHLEN) { |
| 869 | MCLGET(m, M_DONTWAIT); |
| 870 | if ((m->m_flags & M_EXT) == 0) { |
| 871 | log(LOG_ERR, "%s: unable to allocate " |
| 872 | "Tx cluster\n" , |
| 873 | device_xname(sc->sc_dev)); |
| 874 | m_freem(m); |
| 875 | break; |
| 876 | } |
| 877 | } |
| 878 | m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); |
| 879 | m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; |
| 880 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, |
| 881 | m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); |
| 882 | if (error) { |
| 883 | log(LOG_ERR, "%s: unable to load Tx buffer, " |
| 884 | "error = %d\n" , |
| 885 | device_xname(sc->sc_dev), error); |
| 886 | break; |
| 887 | } |
| 888 | } |
| 889 | |
| 890 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
| 891 | csum_flags = m0->m_pkthdr.csum_flags; |
| 892 | if (m != NULL) { |
| 893 | m_freem(m0); |
| 894 | m0 = m; |
| 895 | } |
| 896 | |
| 897 | /* Initialize the fraglist. */ |
| 898 | tbdp = txd->txd_tbd; |
| 899 | len = m0->m_pkthdr.len; |
| 900 | nsegs = dmamap->dm_nsegs; |
| 901 | if (sc->sc_flags & FXPF_EXT_RFA) |
| 902 | tbdp++; |
| 903 | for (seg = 0; seg < nsegs; seg++) { |
| 904 | tbdp[seg].tb_addr = |
| 905 | htole32(dmamap->dm_segs[seg].ds_addr); |
| 906 | tbdp[seg].tb_size = |
| 907 | htole32(dmamap->dm_segs[seg].ds_len); |
| 908 | } |
| 909 | if (__predict_false(len <= FXP_IP4CSUMTX_PADLEN && |
| 910 | (csum_flags & M_CSUM_IPv4) != 0)) { |
| 911 | /* |
| 912 | * Pad short packets to avoid ip4csum-tx bug. |
| 913 | * |
| 914 | * XXX Should we still consider if such short |
| 915 | * (36 bytes or less) packets might already |
| 916 | * occupy FXP_IPCB_NTXSEG (15) fragments here? |
| 917 | */ |
| 918 | KASSERT(nsegs < FXP_IPCB_NTXSEG); |
| 919 | nsegs++; |
| 920 | tbdp[seg].tb_addr = htole32(FXP_CDTXPADADDR(sc)); |
| 921 | tbdp[seg].tb_size = |
| 922 | htole32(FXP_IP4CSUMTX_PADLEN + 1 - len); |
| 923 | } |
| 924 | |
| 925 | /* Sync the DMA map. */ |
| 926 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
| 927 | BUS_DMASYNC_PREWRITE); |
| 928 | |
| 929 | /* |
| 930 | * Store a pointer to the packet so we can free it later. |
| 931 | */ |
| 932 | txs->txs_mbuf = m0; |
| 933 | |
| 934 | /* |
| 935 | * Initialize the transmit descriptor. |
| 936 | */ |
| 937 | /* BIG_ENDIAN: no need to swap to store 0 */ |
| 938 | txd->txd_txcb.cb_status = 0; |
| 939 | txd->txd_txcb.cb_command = |
| 940 | sc->sc_txcmd | htole16(FXP_CB_COMMAND_SF); |
| 941 | txd->txd_txcb.tx_threshold = tx_threshold; |
| 942 | txd->txd_txcb.tbd_number = nsegs; |
| 943 | |
| 944 | KASSERT((csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0); |
| 945 | if (sc->sc_flags & FXPF_EXT_RFA) { |
| 946 | struct m_tag *vtag; |
| 947 | struct fxp_ipcb *ipcb; |
| 948 | /* |
| 949 | * Deal with TCP/IP checksum offload. Note that |
| 950 | * in order for TCP checksum offload to work, |
| 951 | * the pseudo header checksum must have already |
| 952 | * been computed and stored in the checksum field |
| 953 | * in the TCP header. The stack should have |
| 954 | * already done this for us. |
| 955 | */ |
| 956 | ipcb = &txd->txd_u.txdu_ipcb; |
| 957 | memset(ipcb, 0, sizeof(*ipcb)); |
| 958 | /* |
| 959 | * always do hardware parsing. |
| 960 | */ |
| 961 | ipcb->ipcb_ip_activation_high = |
| 962 | FXP_IPCB_HARDWAREPARSING_ENABLE; |
| 963 | /* |
| 964 | * ip checksum offloading. |
| 965 | */ |
| 966 | if (csum_flags & M_CSUM_IPv4) { |
| 967 | ipcb->ipcb_ip_schedule |= |
| 968 | FXP_IPCB_IP_CHECKSUM_ENABLE; |
| 969 | } |
| 970 | /* |
| 971 | * TCP/UDP checksum offloading. |
| 972 | */ |
| 973 | if (csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { |
| 974 | ipcb->ipcb_ip_schedule |= |
| 975 | FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; |
| 976 | } |
| 977 | |
| 978 | /* |
| 979 | * request VLAN tag insertion if needed. |
| 980 | */ |
| 981 | vtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0); |
| 982 | if (vtag) { |
| 983 | ipcb->ipcb_vlan_id = |
| 984 | htobe16(*(u_int *)(vtag + 1)); |
| 985 | ipcb->ipcb_ip_activation_high |= |
| 986 | FXP_IPCB_INSERTVLAN_ENABLE; |
| 987 | } |
| 988 | } else { |
| 989 | KASSERT((csum_flags & |
| 990 | (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) == 0); |
| 991 | } |
| 992 | |
| 993 | FXP_CDTXSYNC(sc, nexttx, |
| 994 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 995 | |
| 996 | /* Advance the tx pointer. */ |
| 997 | sc->sc_txpending++; |
| 998 | sc->sc_txlast = nexttx; |
| 999 | |
| 1000 | /* |
| 1001 | * Pass packet to bpf if there is a listener. |
| 1002 | */ |
| 1003 | bpf_mtap(ifp, m0); |
| 1004 | } |
| 1005 | |
| 1006 | if (sc->sc_txpending == FXP_NTXCB - 1) { |
| 1007 | /* No more slots; notify upper layer. */ |
| 1008 | ifp->if_flags |= IFF_OACTIVE; |
| 1009 | } |
| 1010 | |
| 1011 | if (sc->sc_txpending != opending) { |
| 1012 | /* |
| 1013 | * We enqueued packets. If the transmitter was idle, |
| 1014 | * reset the txdirty pointer. |
| 1015 | */ |
| 1016 | if (opending == 0) |
| 1017 | sc->sc_txdirty = FXP_NEXTTX(lasttx); |
| 1018 | |
| 1019 | /* |
| 1020 | * Cause the chip to interrupt and suspend command |
| 1021 | * processing once the last packet we've enqueued |
| 1022 | * has been transmitted. |
| 1023 | * |
| 1024 | * To avoid a race between updating status bits |
| 1025 | * by the fxp chip and clearing command bits |
| 1026 | * by this function on machines which don't have |
| 1027 | * atomic methods to clear/set bits in memory |
| 1028 | * smaller than 32bits (both cb_status and cb_command |
| 1029 | * members are uint16_t and in the same 32bit word), |
| 1030 | * we have to prepare a dummy TX descriptor which has |
| 1031 | * NOP command and just causes a TX completion interrupt. |
| 1032 | */ |
| 1033 | sc->sc_txpending++; |
| 1034 | sc->sc_txlast = FXP_NEXTTX(sc->sc_txlast); |
| 1035 | txd = FXP_CDTX(sc, sc->sc_txlast); |
| 1036 | /* BIG_ENDIAN: no need to swap to store 0 */ |
| 1037 | txd->txd_txcb.cb_status = 0; |
| 1038 | txd->txd_txcb.cb_command = htole16(FXP_CB_COMMAND_NOP | |
| 1039 | FXP_CB_COMMAND_I | FXP_CB_COMMAND_S); |
| 1040 | FXP_CDTXSYNC(sc, sc->sc_txlast, |
| 1041 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 1042 | |
| 1043 | /* |
| 1044 | * The entire packet chain is set up. Clear the suspend bit |
| 1045 | * on the command prior to the first packet we set up. |
| 1046 | */ |
| 1047 | FXP_CDTXSYNC(sc, lasttx, |
| 1048 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 1049 | FXP_CDTX(sc, lasttx)->txd_txcb.cb_command &= |
| 1050 | htole16(~FXP_CB_COMMAND_S); |
| 1051 | FXP_CDTXSYNC(sc, lasttx, |
| 1052 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 1053 | |
| 1054 | /* |
| 1055 | * Issue a Resume command in case the chip was suspended. |
| 1056 | */ |
| 1057 | fxp_scb_wait(sc); |
| 1058 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); |
| 1059 | |
| 1060 | /* Set a watchdog timer in case the chip flakes out. */ |
| 1061 | ifp->if_timer = 5; |
| 1062 | } |
| 1063 | } |
| 1064 | |
| 1065 | /* |
| 1066 | * Process interface interrupts. |
| 1067 | */ |
| 1068 | int |
| 1069 | fxp_intr(void *arg) |
| 1070 | { |
| 1071 | struct fxp_softc *sc = arg; |
| 1072 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 1073 | bus_dmamap_t rxmap; |
| 1074 | int claimed = 0, rnr; |
| 1075 | uint8_t statack; |
| 1076 | |
| 1077 | if (!device_is_active(sc->sc_dev) || sc->sc_enabled == 0) |
| 1078 | return (0); |
| 1079 | /* |
| 1080 | * If the interface isn't running, don't try to |
| 1081 | * service the interrupt.. just ack it and bail. |
| 1082 | */ |
| 1083 | if ((ifp->if_flags & IFF_RUNNING) == 0) { |
| 1084 | statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); |
| 1085 | if (statack) { |
| 1086 | claimed = 1; |
| 1087 | CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); |
| 1088 | } |
| 1089 | return (claimed); |
| 1090 | } |
| 1091 | |
| 1092 | while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { |
| 1093 | claimed = 1; |
| 1094 | |
| 1095 | /* |
| 1096 | * First ACK all the interrupts in this pass. |
| 1097 | */ |
| 1098 | CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); |
| 1099 | |
| 1100 | /* |
| 1101 | * Process receiver interrupts. If a no-resource (RNR) |
| 1102 | * condition exists, get whatever packets we can and |
| 1103 | * re-start the receiver. |
| 1104 | */ |
| 1105 | rnr = (statack & (FXP_SCB_STATACK_RNR | FXP_SCB_STATACK_SWI)) ? |
| 1106 | 1 : 0; |
| 1107 | if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | |
| 1108 | FXP_SCB_STATACK_SWI)) { |
| 1109 | FXP_EVCNT_INCR(&sc->sc_ev_rxintr); |
| 1110 | rnr |= fxp_rxintr(sc); |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * Free any finished transmit mbuf chains. |
| 1115 | */ |
| 1116 | if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { |
| 1117 | FXP_EVCNT_INCR(&sc->sc_ev_txintr); |
| 1118 | fxp_txintr(sc); |
| 1119 | |
| 1120 | /* |
| 1121 | * Try to get more packets going. |
| 1122 | */ |
| 1123 | fxp_start(ifp); |
| 1124 | |
| 1125 | if (sc->sc_txpending == 0) { |
| 1126 | /* |
| 1127 | * Tell them that they can re-init now. |
| 1128 | */ |
| 1129 | if (sc->sc_flags & FXPF_WANTINIT) |
| 1130 | wakeup(sc); |
| 1131 | } |
| 1132 | } |
| 1133 | |
| 1134 | if (rnr) { |
| 1135 | fxp_scb_wait(sc); |
| 1136 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_ABORT); |
| 1137 | rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); |
| 1138 | fxp_scb_wait(sc); |
| 1139 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, |
| 1140 | rxmap->dm_segs[0].ds_addr + |
| 1141 | RFA_ALIGNMENT_FUDGE); |
| 1142 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); |
| 1143 | } |
| 1144 | } |
| 1145 | |
| 1146 | if (claimed) |
| 1147 | rnd_add_uint32(&sc->rnd_source, statack); |
| 1148 | return (claimed); |
| 1149 | } |
| 1150 | |
| 1151 | /* |
| 1152 | * Handle transmit completion interrupts. |
| 1153 | */ |
| 1154 | void |
| 1155 | fxp_txintr(struct fxp_softc *sc) |
| 1156 | { |
| 1157 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 1158 | struct fxp_txdesc *txd; |
| 1159 | struct fxp_txsoft *txs; |
| 1160 | int i; |
| 1161 | uint16_t txstat; |
| 1162 | |
| 1163 | ifp->if_flags &= ~IFF_OACTIVE; |
| 1164 | for (i = sc->sc_txdirty; sc->sc_txpending != 0; |
| 1165 | i = FXP_NEXTTX(i), sc->sc_txpending--) { |
| 1166 | txd = FXP_CDTX(sc, i); |
| 1167 | txs = FXP_DSTX(sc, i); |
| 1168 | |
| 1169 | FXP_CDTXSYNC(sc, i, |
| 1170 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 1171 | |
| 1172 | /* skip dummy NOP TX descriptor */ |
| 1173 | if ((le16toh(txd->txd_txcb.cb_command) & FXP_CB_COMMAND_CMD) |
| 1174 | == FXP_CB_COMMAND_NOP) |
| 1175 | continue; |
| 1176 | |
| 1177 | txstat = le16toh(txd->txd_txcb.cb_status); |
| 1178 | |
| 1179 | if ((txstat & FXP_CB_STATUS_C) == 0) |
| 1180 | break; |
| 1181 | |
| 1182 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
| 1183 | 0, txs->txs_dmamap->dm_mapsize, |
| 1184 | BUS_DMASYNC_POSTWRITE); |
| 1185 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
| 1186 | m_freem(txs->txs_mbuf); |
| 1187 | txs->txs_mbuf = NULL; |
| 1188 | } |
| 1189 | |
| 1190 | /* Update the dirty transmit buffer pointer. */ |
| 1191 | sc->sc_txdirty = i; |
| 1192 | |
| 1193 | /* |
| 1194 | * Cancel the watchdog timer if there are no pending |
| 1195 | * transmissions. |
| 1196 | */ |
| 1197 | if (sc->sc_txpending == 0) |
| 1198 | ifp->if_timer = 0; |
| 1199 | } |
| 1200 | |
| 1201 | /* |
| 1202 | * fxp_rx_hwcksum: check status of H/W offloading for received packets. |
| 1203 | */ |
| 1204 | |
| 1205 | void |
| 1206 | fxp_rx_hwcksum(struct fxp_softc *sc, struct mbuf *m, const struct fxp_rfa *rfa, |
| 1207 | u_int len) |
| 1208 | { |
| 1209 | uint32_t csum_data; |
| 1210 | int csum_flags; |
| 1211 | |
| 1212 | /* |
| 1213 | * check H/W Checksumming. |
| 1214 | */ |
| 1215 | |
| 1216 | csum_flags = 0; |
| 1217 | csum_data = 0; |
| 1218 | |
| 1219 | if ((sc->sc_flags & FXPF_EXT_RFA) != 0) { |
| 1220 | uint8_t csum_stat; |
| 1221 | |
| 1222 | csum_stat = rfa->cksum_stat; |
| 1223 | if ((rfa->rfa_status & htole16(FXP_RFA_STATUS_PARSE)) == 0) |
| 1224 | goto out; |
| 1225 | |
| 1226 | if (csum_stat & FXP_RFDX_CS_IP_CSUM_BIT_VALID) { |
| 1227 | csum_flags = M_CSUM_IPv4; |
| 1228 | if ((csum_stat & FXP_RFDX_CS_IP_CSUM_VALID) == 0) |
| 1229 | csum_flags |= M_CSUM_IPv4_BAD; |
| 1230 | } |
| 1231 | |
| 1232 | if (csum_stat & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) { |
| 1233 | csum_flags |= (M_CSUM_TCPv4|M_CSUM_UDPv4); /* XXX */ |
| 1234 | if ((csum_stat & FXP_RFDX_CS_TCPUDP_CSUM_VALID) == 0) |
| 1235 | csum_flags |= M_CSUM_TCP_UDP_BAD; |
| 1236 | } |
| 1237 | |
| 1238 | } else if ((sc->sc_flags & FXPF_82559_RXCSUM) != 0) { |
| 1239 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 1240 | struct ether_header *eh; |
| 1241 | struct ip *ip; |
| 1242 | struct udphdr *uh; |
| 1243 | u_int hlen, pktlen; |
| 1244 | |
| 1245 | if (len < ETHER_HDR_LEN + sizeof(struct ip)) |
| 1246 | goto out; |
| 1247 | pktlen = len - ETHER_HDR_LEN; |
| 1248 | eh = mtod(m, struct ether_header *); |
| 1249 | if (ntohs(eh->ether_type) != ETHERTYPE_IP) |
| 1250 | goto out; |
| 1251 | ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN); |
| 1252 | if (ip->ip_v != IPVERSION) |
| 1253 | goto out; |
| 1254 | |
| 1255 | hlen = ip->ip_hl << 2; |
| 1256 | if (hlen < sizeof(struct ip)) |
| 1257 | goto out; |
| 1258 | |
| 1259 | /* |
| 1260 | * Bail if too short, has random trailing garbage, truncated, |
| 1261 | * fragment, or has ethernet pad. |
| 1262 | */ |
| 1263 | if (ntohs(ip->ip_len) < hlen || |
| 1264 | ntohs(ip->ip_len) != pktlen || |
| 1265 | (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) |
| 1266 | goto out; |
| 1267 | |
| 1268 | switch (ip->ip_p) { |
| 1269 | case IPPROTO_TCP: |
| 1270 | if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 || |
| 1271 | pktlen < (hlen + sizeof(struct tcphdr))) |
| 1272 | goto out; |
| 1273 | csum_flags = |
| 1274 | M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; |
| 1275 | break; |
| 1276 | case IPPROTO_UDP: |
| 1277 | if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 || |
| 1278 | pktlen < (hlen + sizeof(struct udphdr))) |
| 1279 | goto out; |
| 1280 | uh = (struct udphdr *)((uint8_t *)ip + hlen); |
| 1281 | if (uh->uh_sum == 0) |
| 1282 | goto out; /* no checksum */ |
| 1283 | csum_flags = |
| 1284 | M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; |
| 1285 | break; |
| 1286 | default: |
| 1287 | goto out; |
| 1288 | } |
| 1289 | |
| 1290 | /* Extract computed checksum. */ |
| 1291 | csum_data = be16dec(mtod(m, uint8_t *) + len); |
| 1292 | |
| 1293 | /* |
| 1294 | * The computed checksum includes IP headers, |
| 1295 | * so we have to deduct them. |
| 1296 | */ |
| 1297 | #if 0 |
| 1298 | /* |
| 1299 | * But in TCP/UDP layer we can assume the IP header is valid, |
| 1300 | * i.e. a sum of the whole IP header should be 0xffff, |
| 1301 | * so we don't have to bother to deduct it. |
| 1302 | */ |
| 1303 | if (hlen > 0) { |
| 1304 | uint32_t hsum; |
| 1305 | const uint16_t *iphdr; |
| 1306 | hsum = 0; |
| 1307 | iphdr = (uint16_t *)ip; |
| 1308 | |
| 1309 | while (hlen > 1) { |
| 1310 | hsum += ntohs(*iphdr++); |
| 1311 | hlen -= sizeof(uint16_t); |
| 1312 | } |
| 1313 | while (hsum >> 16) |
| 1314 | hsum = (hsum >> 16) + (hsum & 0xffff); |
| 1315 | |
| 1316 | csum_data += (uint16_t)~hsum; |
| 1317 | |
| 1318 | while (csum_data >> 16) |
| 1319 | csum_data = |
| 1320 | (csum_data >> 16) + (csum_data & 0xffff); |
| 1321 | } |
| 1322 | #endif |
| 1323 | } |
| 1324 | out: |
| 1325 | m->m_pkthdr.csum_flags = csum_flags; |
| 1326 | m->m_pkthdr.csum_data = csum_data; |
| 1327 | } |
| 1328 | |
| 1329 | /* |
| 1330 | * Handle receive interrupts. |
| 1331 | */ |
| 1332 | int |
| 1333 | fxp_rxintr(struct fxp_softc *sc) |
| 1334 | { |
| 1335 | struct ethercom *ec = &sc->sc_ethercom; |
| 1336 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 1337 | struct mbuf *m, *m0; |
| 1338 | bus_dmamap_t rxmap; |
| 1339 | struct fxp_rfa *rfa; |
| 1340 | int rnr; |
| 1341 | uint16_t len, rxstat; |
| 1342 | |
| 1343 | rnr = 0; |
| 1344 | |
| 1345 | for (;;) { |
| 1346 | m = sc->sc_rxq.ifq_head; |
| 1347 | rfa = FXP_MTORFA(m); |
| 1348 | rxmap = M_GETCTX(m, bus_dmamap_t); |
| 1349 | |
| 1350 | FXP_RFASYNC(sc, m, |
| 1351 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 1352 | |
| 1353 | rxstat = le16toh(rfa->rfa_status); |
| 1354 | |
| 1355 | if ((rxstat & FXP_RFA_STATUS_RNR) != 0) |
| 1356 | rnr = 1; |
| 1357 | |
| 1358 | if ((rxstat & FXP_RFA_STATUS_C) == 0) { |
| 1359 | /* |
| 1360 | * We have processed all of the |
| 1361 | * receive buffers. |
| 1362 | */ |
| 1363 | FXP_RFASYNC(sc, m, BUS_DMASYNC_PREREAD); |
| 1364 | return rnr; |
| 1365 | } |
| 1366 | |
| 1367 | IF_DEQUEUE(&sc->sc_rxq, m); |
| 1368 | |
| 1369 | FXP_RXBUFSYNC(sc, m, BUS_DMASYNC_POSTREAD); |
| 1370 | |
| 1371 | len = le16toh(rfa->actual_size) & |
| 1372 | (m->m_ext.ext_size - 1); |
| 1373 | if ((sc->sc_flags & FXPF_82559_RXCSUM) != 0) { |
| 1374 | /* Adjust for appended checksum bytes. */ |
| 1375 | len -= sizeof(uint16_t); |
| 1376 | } |
| 1377 | |
| 1378 | if (len < sizeof(struct ether_header)) { |
| 1379 | /* |
| 1380 | * Runt packet; drop it now. |
| 1381 | */ |
| 1382 | FXP_INIT_RFABUF(sc, m); |
| 1383 | continue; |
| 1384 | } |
| 1385 | |
| 1386 | /* |
| 1387 | * If support for 802.1Q VLAN sized frames is |
| 1388 | * enabled, we need to do some additional error |
| 1389 | * checking (as we are saving bad frames, in |
| 1390 | * order to receive the larger ones). |
| 1391 | */ |
| 1392 | if ((ec->ec_capenable & ETHERCAP_VLAN_MTU) != 0 && |
| 1393 | (rxstat & (FXP_RFA_STATUS_OVERRUN| |
| 1394 | FXP_RFA_STATUS_RNR| |
| 1395 | FXP_RFA_STATUS_ALIGN| |
| 1396 | FXP_RFA_STATUS_CRC)) != 0) { |
| 1397 | FXP_INIT_RFABUF(sc, m); |
| 1398 | continue; |
| 1399 | } |
| 1400 | |
| 1401 | /* |
| 1402 | * check VLAN tag stripping. |
| 1403 | */ |
| 1404 | if ((sc->sc_flags & FXPF_EXT_RFA) != 0 && |
| 1405 | (rfa->rfa_status & htole16(FXP_RFA_STATUS_VLAN)) != 0) { |
| 1406 | struct m_tag *vtag; |
| 1407 | |
| 1408 | vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), |
| 1409 | M_NOWAIT); |
| 1410 | if (vtag == NULL) |
| 1411 | goto dropit; |
| 1412 | *(u_int *)(vtag + 1) = be16toh(rfa->vlan_id); |
| 1413 | m_tag_prepend(m, vtag); |
| 1414 | } |
| 1415 | |
| 1416 | /* Do checksum checking. */ |
| 1417 | if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) |
| 1418 | fxp_rx_hwcksum(sc, m, rfa, len); |
| 1419 | |
| 1420 | /* |
| 1421 | * If the packet is small enough to fit in a |
| 1422 | * single header mbuf, allocate one and copy |
| 1423 | * the data into it. This greatly reduces |
| 1424 | * memory consumption when we receive lots |
| 1425 | * of small packets. |
| 1426 | * |
| 1427 | * Otherwise, we add a new buffer to the receive |
| 1428 | * chain. If this fails, we drop the packet and |
| 1429 | * recycle the old buffer. |
| 1430 | */ |
| 1431 | if (fxp_copy_small != 0 && len <= MHLEN) { |
| 1432 | MGETHDR(m0, M_DONTWAIT, MT_DATA); |
| 1433 | if (m0 == NULL) |
| 1434 | goto dropit; |
| 1435 | MCLAIM(m0, &sc->sc_ethercom.ec_rx_mowner); |
| 1436 | memcpy(mtod(m0, void *), |
| 1437 | mtod(m, void *), len); |
| 1438 | m0->m_pkthdr.csum_flags = m->m_pkthdr.csum_flags; |
| 1439 | m0->m_pkthdr.csum_data = m->m_pkthdr.csum_data; |
| 1440 | FXP_INIT_RFABUF(sc, m); |
| 1441 | m = m0; |
| 1442 | } else { |
| 1443 | if (fxp_add_rfabuf(sc, rxmap, 1) != 0) { |
| 1444 | dropit: |
| 1445 | ifp->if_ierrors++; |
| 1446 | FXP_INIT_RFABUF(sc, m); |
| 1447 | continue; |
| 1448 | } |
| 1449 | } |
| 1450 | |
| 1451 | m_set_rcvif(m, ifp); |
| 1452 | m->m_pkthdr.len = m->m_len = len; |
| 1453 | |
| 1454 | /* |
| 1455 | * Pass this up to any BPF listeners, but only |
| 1456 | * pass it up the stack if it's for us. |
| 1457 | */ |
| 1458 | bpf_mtap(ifp, m); |
| 1459 | |
| 1460 | /* Pass it on. */ |
| 1461 | if_percpuq_enqueue(ifp->if_percpuq, m); |
| 1462 | } |
| 1463 | } |
| 1464 | |
| 1465 | /* |
| 1466 | * Update packet in/out/collision statistics. The i82557 doesn't |
| 1467 | * allow you to access these counters without doing a fairly |
| 1468 | * expensive DMA to get _all_ of the statistics it maintains, so |
| 1469 | * we do this operation here only once per second. The statistics |
| 1470 | * counters in the kernel are updated from the previous dump-stats |
| 1471 | * DMA and then a new dump-stats DMA is started. The on-chip |
| 1472 | * counters are zeroed when the DMA completes. If we can't start |
| 1473 | * the DMA immediately, we don't wait - we just prepare to read |
| 1474 | * them again next time. |
| 1475 | */ |
| 1476 | void |
| 1477 | fxp_tick(void *arg) |
| 1478 | { |
| 1479 | struct fxp_softc *sc = arg; |
| 1480 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 1481 | struct fxp_stats *sp = &sc->sc_control_data->fcd_stats; |
| 1482 | int s; |
| 1483 | |
| 1484 | if (!device_is_active(sc->sc_dev)) |
| 1485 | return; |
| 1486 | |
| 1487 | s = splnet(); |
| 1488 | |
| 1489 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_POSTREAD); |
| 1490 | |
| 1491 | ifp->if_opackets += le32toh(sp->tx_good); |
| 1492 | ifp->if_collisions += le32toh(sp->tx_total_collisions); |
| 1493 | if (sp->rx_good) { |
| 1494 | ifp->if_ipackets += le32toh(sp->rx_good); |
| 1495 | sc->sc_rxidle = 0; |
| 1496 | } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) { |
| 1497 | sc->sc_rxidle++; |
| 1498 | } |
| 1499 | ifp->if_ierrors += |
| 1500 | le32toh(sp->rx_crc_errors) + |
| 1501 | le32toh(sp->rx_alignment_errors) + |
| 1502 | le32toh(sp->rx_rnr_errors) + |
| 1503 | le32toh(sp->rx_overrun_errors); |
| 1504 | /* |
| 1505 | * If any transmit underruns occurred, bump up the transmit |
| 1506 | * threshold by another 512 bytes (64 * 8). |
| 1507 | */ |
| 1508 | if (sp->tx_underruns) { |
| 1509 | ifp->if_oerrors += le32toh(sp->tx_underruns); |
| 1510 | if (tx_threshold < 192) |
| 1511 | tx_threshold += 64; |
| 1512 | } |
| 1513 | #ifdef FXP_EVENT_COUNTERS |
| 1514 | if (sc->sc_flags & FXPF_FC) { |
| 1515 | sc->sc_ev_txpause.ev_count += sp->tx_pauseframes; |
| 1516 | sc->sc_ev_rxpause.ev_count += sp->rx_pauseframes; |
| 1517 | } |
| 1518 | #endif |
| 1519 | |
| 1520 | /* |
| 1521 | * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, |
| 1522 | * then assume the receiver has locked up and attempt to clear |
| 1523 | * the condition by reprogramming the multicast filter (actually, |
| 1524 | * resetting the interface). This is a work-around for a bug in |
| 1525 | * the 82557 where the receiver locks up if it gets certain types |
| 1526 | * of garbage in the synchronization bits prior to the packet header. |
| 1527 | * This bug is supposed to only occur in 10Mbps mode, but has been |
| 1528 | * seen to occur in 100Mbps mode as well (perhaps due to a 10/100 |
| 1529 | * speed transition). |
| 1530 | */ |
| 1531 | if (sc->sc_rxidle > FXP_MAX_RX_IDLE) { |
| 1532 | (void) fxp_init(ifp); |
| 1533 | splx(s); |
| 1534 | return; |
| 1535 | } |
| 1536 | /* |
| 1537 | * If there is no pending command, start another stats |
| 1538 | * dump. Otherwise punt for now. |
| 1539 | */ |
| 1540 | if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { |
| 1541 | /* |
| 1542 | * Start another stats dump. |
| 1543 | */ |
| 1544 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_PREREAD); |
| 1545 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); |
| 1546 | } else { |
| 1547 | /* |
| 1548 | * A previous command is still waiting to be accepted. |
| 1549 | * Just zero our copy of the stats and wait for the |
| 1550 | * next timer event to update them. |
| 1551 | */ |
| 1552 | /* BIG_ENDIAN: no swap required to store 0 */ |
| 1553 | sp->tx_good = 0; |
| 1554 | sp->tx_underruns = 0; |
| 1555 | sp->tx_total_collisions = 0; |
| 1556 | |
| 1557 | sp->rx_good = 0; |
| 1558 | sp->rx_crc_errors = 0; |
| 1559 | sp->rx_alignment_errors = 0; |
| 1560 | sp->rx_rnr_errors = 0; |
| 1561 | sp->rx_overrun_errors = 0; |
| 1562 | if (sc->sc_flags & FXPF_FC) { |
| 1563 | sp->tx_pauseframes = 0; |
| 1564 | sp->rx_pauseframes = 0; |
| 1565 | } |
| 1566 | } |
| 1567 | |
| 1568 | if (sc->sc_flags & FXPF_MII) { |
| 1569 | /* Tick the MII clock. */ |
| 1570 | mii_tick(&sc->sc_mii); |
| 1571 | } |
| 1572 | |
| 1573 | splx(s); |
| 1574 | |
| 1575 | /* |
| 1576 | * Schedule another timeout one second from now. |
| 1577 | */ |
| 1578 | callout_reset(&sc->sc_callout, hz, fxp_tick, sc); |
| 1579 | } |
| 1580 | |
| 1581 | /* |
| 1582 | * Drain the receive queue. |
| 1583 | */ |
| 1584 | void |
| 1585 | fxp_rxdrain(struct fxp_softc *sc) |
| 1586 | { |
| 1587 | bus_dmamap_t rxmap; |
| 1588 | struct mbuf *m; |
| 1589 | |
| 1590 | for (;;) { |
| 1591 | IF_DEQUEUE(&sc->sc_rxq, m); |
| 1592 | if (m == NULL) |
| 1593 | break; |
| 1594 | rxmap = M_GETCTX(m, bus_dmamap_t); |
| 1595 | bus_dmamap_unload(sc->sc_dmat, rxmap); |
| 1596 | FXP_RXMAP_PUT(sc, rxmap); |
| 1597 | m_freem(m); |
| 1598 | } |
| 1599 | } |
| 1600 | |
| 1601 | /* |
| 1602 | * Stop the interface. Cancels the statistics updater and resets |
| 1603 | * the interface. |
| 1604 | */ |
| 1605 | void |
| 1606 | fxp_stop(struct ifnet *ifp, int disable) |
| 1607 | { |
| 1608 | struct fxp_softc *sc = ifp->if_softc; |
| 1609 | struct fxp_txsoft *txs; |
| 1610 | int i; |
| 1611 | |
| 1612 | /* |
| 1613 | * Turn down interface (done early to avoid bad interactions |
| 1614 | * between panics, shutdown hooks, and the watchdog timer) |
| 1615 | */ |
| 1616 | ifp->if_timer = 0; |
| 1617 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
| 1618 | |
| 1619 | /* |
| 1620 | * Cancel stats updater. |
| 1621 | */ |
| 1622 | callout_stop(&sc->sc_callout); |
| 1623 | if (sc->sc_flags & FXPF_MII) { |
| 1624 | /* Down the MII. */ |
| 1625 | mii_down(&sc->sc_mii); |
| 1626 | } |
| 1627 | |
| 1628 | /* |
| 1629 | * Issue software reset. This unloads any microcode that |
| 1630 | * might already be loaded. |
| 1631 | */ |
| 1632 | sc->sc_flags &= ~FXPF_UCODE_LOADED; |
| 1633 | CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); |
| 1634 | DELAY(50); |
| 1635 | |
| 1636 | /* |
| 1637 | * Release any xmit buffers. |
| 1638 | */ |
| 1639 | for (i = 0; i < FXP_NTXCB; i++) { |
| 1640 | txs = FXP_DSTX(sc, i); |
| 1641 | if (txs->txs_mbuf != NULL) { |
| 1642 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
| 1643 | m_freem(txs->txs_mbuf); |
| 1644 | txs->txs_mbuf = NULL; |
| 1645 | } |
| 1646 | } |
| 1647 | sc->sc_txpending = 0; |
| 1648 | |
| 1649 | if (disable) { |
| 1650 | fxp_rxdrain(sc); |
| 1651 | fxp_disable(sc); |
| 1652 | } |
| 1653 | |
| 1654 | } |
| 1655 | |
| 1656 | /* |
| 1657 | * Watchdog/transmission transmit timeout handler. Called when a |
| 1658 | * transmission is started on the interface, but no interrupt is |
| 1659 | * received before the timeout. This usually indicates that the |
| 1660 | * card has wedged for some reason. |
| 1661 | */ |
| 1662 | void |
| 1663 | fxp_watchdog(struct ifnet *ifp) |
| 1664 | { |
| 1665 | struct fxp_softc *sc = ifp->if_softc; |
| 1666 | |
| 1667 | log(LOG_ERR, "%s: device timeout\n" , device_xname(sc->sc_dev)); |
| 1668 | ifp->if_oerrors++; |
| 1669 | |
| 1670 | (void) fxp_init(ifp); |
| 1671 | } |
| 1672 | |
| 1673 | /* |
| 1674 | * Initialize the interface. Must be called at splnet(). |
| 1675 | */ |
| 1676 | int |
| 1677 | fxp_init(struct ifnet *ifp) |
| 1678 | { |
| 1679 | struct fxp_softc *sc = ifp->if_softc; |
| 1680 | struct fxp_cb_config *cbp; |
| 1681 | struct fxp_cb_ias *cb_ias; |
| 1682 | struct fxp_txdesc *txd; |
| 1683 | bus_dmamap_t rxmap; |
| 1684 | int i, prm, save_bf, lrxen, vlan_drop, allm, error = 0; |
| 1685 | uint16_t status; |
| 1686 | |
| 1687 | if ((error = fxp_enable(sc)) != 0) |
| 1688 | goto out; |
| 1689 | |
| 1690 | /* |
| 1691 | * Cancel any pending I/O |
| 1692 | */ |
| 1693 | fxp_stop(ifp, 0); |
| 1694 | |
| 1695 | /* |
| 1696 | * XXX just setting sc_flags to 0 here clears any FXPF_MII |
| 1697 | * flag, and this prevents the MII from detaching resulting in |
| 1698 | * a panic. The flags field should perhaps be split in runtime |
| 1699 | * flags and more static information. For now, just clear the |
| 1700 | * only other flag set. |
| 1701 | */ |
| 1702 | |
| 1703 | sc->sc_flags &= ~FXPF_WANTINIT; |
| 1704 | |
| 1705 | /* |
| 1706 | * Initialize base of CBL and RFA memory. Loading with zero |
| 1707 | * sets it up for regular linear addressing. |
| 1708 | */ |
| 1709 | fxp_scb_wait(sc); |
| 1710 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); |
| 1711 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); |
| 1712 | |
| 1713 | fxp_scb_wait(sc); |
| 1714 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); |
| 1715 | |
| 1716 | /* |
| 1717 | * Initialize the multicast filter. Do this now, since we might |
| 1718 | * have to setup the config block differently. |
| 1719 | */ |
| 1720 | fxp_mc_setup(sc); |
| 1721 | |
| 1722 | prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; |
| 1723 | allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; |
| 1724 | |
| 1725 | /* |
| 1726 | * In order to support receiving 802.1Q VLAN frames, we have to |
| 1727 | * enable "save bad frames", since they are 4 bytes larger than |
| 1728 | * the normal Ethernet maximum frame length. On i82558 and later, |
| 1729 | * we have a better mechanism for this. |
| 1730 | */ |
| 1731 | save_bf = 0; |
| 1732 | lrxen = 0; |
| 1733 | vlan_drop = 0; |
| 1734 | if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) { |
| 1735 | if (sc->sc_rev < FXP_REV_82558_A4) |
| 1736 | save_bf = 1; |
| 1737 | else |
| 1738 | lrxen = 1; |
| 1739 | if (sc->sc_rev >= FXP_REV_82550) |
| 1740 | vlan_drop = 1; |
| 1741 | } |
| 1742 | |
| 1743 | /* |
| 1744 | * Initialize base of dump-stats buffer. |
| 1745 | */ |
| 1746 | fxp_scb_wait(sc); |
| 1747 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, |
| 1748 | sc->sc_cddma + FXP_CDSTATSOFF); |
| 1749 | FXP_CDSTATSSYNC(sc, BUS_DMASYNC_PREREAD); |
| 1750 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); |
| 1751 | |
| 1752 | cbp = &sc->sc_control_data->fcd_configcb; |
| 1753 | memset(cbp, 0, sizeof(struct fxp_cb_config)); |
| 1754 | |
| 1755 | /* |
| 1756 | * Load microcode for this controller. |
| 1757 | */ |
| 1758 | fxp_load_ucode(sc); |
| 1759 | |
| 1760 | if ((sc->sc_ethercom.ec_if.if_flags & IFF_LINK1)) |
| 1761 | sc->sc_flags |= FXPF_RECV_WORKAROUND; |
| 1762 | else |
| 1763 | sc->sc_flags &= ~FXPF_RECV_WORKAROUND; |
| 1764 | |
| 1765 | /* |
| 1766 | * This copy is kind of disgusting, but there are a bunch of must be |
| 1767 | * zero and must be one bits in this structure and this is the easiest |
| 1768 | * way to initialize them all to proper values. |
| 1769 | */ |
| 1770 | memcpy(cbp, fxp_cb_config_template, sizeof(fxp_cb_config_template)); |
| 1771 | |
| 1772 | /* BIG_ENDIAN: no need to swap to store 0 */ |
| 1773 | cbp->cb_status = 0; |
| 1774 | cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | |
| 1775 | FXP_CB_COMMAND_EL); |
| 1776 | /* BIG_ENDIAN: no need to swap to store 0xffffffff */ |
| 1777 | cbp->link_addr = 0xffffffff; /* (no) next command */ |
| 1778 | /* bytes in config block */ |
| 1779 | cbp->byte_count = (sc->sc_flags & FXPF_EXT_RFA) ? |
| 1780 | FXP_EXT_CONFIG_LEN : FXP_CONFIG_LEN; |
| 1781 | cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ |
| 1782 | cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ |
| 1783 | cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ |
| 1784 | cbp->mwi_enable = (sc->sc_flags & FXPF_MWI) ? 1 : 0; |
| 1785 | cbp->type_enable = 0; /* actually reserved */ |
| 1786 | cbp->read_align_en = (sc->sc_flags & FXPF_READ_ALIGN) ? 1 : 0; |
| 1787 | cbp->end_wr_on_cl = (sc->sc_flags & FXPF_WRITE_ALIGN) ? 1 : 0; |
| 1788 | cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ |
| 1789 | cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ |
| 1790 | cbp->dma_mbce = 0; /* (disable) dma max counters */ |
| 1791 | cbp->late_scb = 0; /* (don't) defer SCB update */ |
| 1792 | cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ |
| 1793 | cbp->ci_int = 1; /* interrupt on CU idle */ |
| 1794 | cbp->ext_txcb_dis = (sc->sc_flags & FXPF_EXT_TXCB) ? 0 : 1; |
| 1795 | cbp->ext_stats_dis = 1; /* disable extended counters */ |
| 1796 | cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ |
| 1797 | cbp->save_bf = save_bf;/* save bad frames */ |
| 1798 | cbp->disc_short_rx = !prm; /* discard short packets */ |
| 1799 | cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ |
| 1800 | cbp->ext_rfa = (sc->sc_flags & FXPF_EXT_RFA) ? 1 : 0; |
| 1801 | cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ |
| 1802 | cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */ |
| 1803 | /* interface mode */ |
| 1804 | cbp->mediatype = (sc->sc_flags & FXPF_MII) ? 1 : 0; |
| 1805 | cbp->csma_dis = 0; /* (don't) disable link */ |
| 1806 | cbp->tcp_udp_cksum = (sc->sc_flags & FXPF_82559_RXCSUM) ? 1 : 0; |
| 1807 | /* (don't) enable RX checksum */ |
| 1808 | cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ |
| 1809 | cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ |
| 1810 | cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ |
| 1811 | cbp->mc_wake_en = 0; /* (don't) assert PME# on mcmatch */ |
| 1812 | cbp->nsai = 1; /* (don't) disable source addr insert */ |
| 1813 | cbp->preamble_length = 2; /* (7 byte) preamble */ |
| 1814 | cbp->loopback = 0; /* (don't) loopback */ |
| 1815 | cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ |
| 1816 | cbp->linear_pri_mode = 0; /* (wait after xmit only) */ |
| 1817 | cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ |
| 1818 | cbp->promiscuous = prm; /* promiscuous mode */ |
| 1819 | cbp->bcast_disable = 0; /* (don't) disable broadcasts */ |
| 1820 | cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ |
| 1821 | cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ |
| 1822 | cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ |
| 1823 | cbp->crscdt = (sc->sc_flags & FXPF_MII) ? 0 : 1; |
| 1824 | cbp->stripping = !prm; /* truncate rx packet to byte count */ |
| 1825 | cbp->padding = 1; /* (do) pad short tx packets */ |
| 1826 | cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ |
| 1827 | cbp->long_rx_en = lrxen; /* long packet receive enable */ |
| 1828 | cbp->ia_wake_en = 0; /* (don't) wake up on address match */ |
| 1829 | cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */ |
| 1830 | /* must set wake_en in PMCSR also */ |
| 1831 | cbp->force_fdx = 0; /* (don't) force full duplex */ |
| 1832 | cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ |
| 1833 | cbp->multi_ia = 0; /* (don't) accept multiple IAs */ |
| 1834 | cbp->mc_all = allm; /* accept all multicasts */ |
| 1835 | cbp->ext_rx_mode = (sc->sc_flags & FXPF_EXT_RFA) ? 1 : 0; |
| 1836 | cbp->vlan_drop_en = vlan_drop; |
| 1837 | |
| 1838 | if (!(sc->sc_flags & FXPF_FC)) { |
| 1839 | /* |
| 1840 | * The i82557 has no hardware flow control, the values |
| 1841 | * here are the defaults for the chip. |
| 1842 | */ |
| 1843 | cbp->fc_delay_lsb = 0; |
| 1844 | cbp->fc_delay_msb = 0x40; |
| 1845 | cbp->pri_fc_thresh = 3; |
| 1846 | cbp->tx_fc_dis = 0; |
| 1847 | cbp->rx_fc_restop = 0; |
| 1848 | cbp->rx_fc_restart = 0; |
| 1849 | cbp->fc_filter = 0; |
| 1850 | cbp->pri_fc_loc = 1; |
| 1851 | } else { |
| 1852 | cbp->fc_delay_lsb = 0x1f; |
| 1853 | cbp->fc_delay_msb = 0x01; |
| 1854 | cbp->pri_fc_thresh = 3; |
| 1855 | cbp->tx_fc_dis = 0; /* enable transmit FC */ |
| 1856 | cbp->rx_fc_restop = 1; /* enable FC restop frames */ |
| 1857 | cbp->rx_fc_restart = 1; /* enable FC restart frames */ |
| 1858 | cbp->fc_filter = !prm; /* drop FC frames to host */ |
| 1859 | cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ |
| 1860 | cbp->ext_stats_dis = 0; /* enable extended stats */ |
| 1861 | } |
| 1862 | |
| 1863 | FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 1864 | |
| 1865 | /* |
| 1866 | * Start the config command/DMA. |
| 1867 | */ |
| 1868 | fxp_scb_wait(sc); |
| 1869 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDCONFIGOFF); |
| 1870 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
| 1871 | /* ...and wait for it to complete. */ |
| 1872 | for (i = 1000; i > 0; i--) { |
| 1873 | FXP_CDCONFIGSYNC(sc, |
| 1874 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 1875 | status = le16toh(cbp->cb_status); |
| 1876 | FXP_CDCONFIGSYNC(sc, BUS_DMASYNC_PREREAD); |
| 1877 | if ((status & FXP_CB_STATUS_C) != 0) |
| 1878 | break; |
| 1879 | DELAY(1); |
| 1880 | } |
| 1881 | if (i == 0) { |
| 1882 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n" , |
| 1883 | device_xname(sc->sc_dev), __LINE__); |
| 1884 | return (ETIMEDOUT); |
| 1885 | } |
| 1886 | |
| 1887 | /* |
| 1888 | * Initialize the station address. |
| 1889 | */ |
| 1890 | cb_ias = &sc->sc_control_data->fcd_iascb; |
| 1891 | /* BIG_ENDIAN: no need to swap to store 0 */ |
| 1892 | cb_ias->cb_status = 0; |
| 1893 | cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); |
| 1894 | /* BIG_ENDIAN: no need to swap to store 0xffffffff */ |
| 1895 | cb_ias->link_addr = 0xffffffff; |
| 1896 | memcpy(cb_ias->macaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); |
| 1897 | |
| 1898 | FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 1899 | |
| 1900 | /* |
| 1901 | * Start the IAS (Individual Address Setup) command/DMA. |
| 1902 | */ |
| 1903 | fxp_scb_wait(sc); |
| 1904 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDIASOFF); |
| 1905 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
| 1906 | /* ...and wait for it to complete. */ |
| 1907 | for (i = 1000; i > 0; i++) { |
| 1908 | FXP_CDIASSYNC(sc, |
| 1909 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 1910 | status = le16toh(cb_ias->cb_status); |
| 1911 | FXP_CDIASSYNC(sc, BUS_DMASYNC_PREREAD); |
| 1912 | if ((status & FXP_CB_STATUS_C) != 0) |
| 1913 | break; |
| 1914 | DELAY(1); |
| 1915 | } |
| 1916 | if (i == 0) { |
| 1917 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n" , |
| 1918 | device_xname(sc->sc_dev), __LINE__); |
| 1919 | return (ETIMEDOUT); |
| 1920 | } |
| 1921 | |
| 1922 | /* |
| 1923 | * Initialize the transmit descriptor ring. txlast is initialized |
| 1924 | * to the end of the list so that it will wrap around to the first |
| 1925 | * descriptor when the first packet is transmitted. |
| 1926 | */ |
| 1927 | for (i = 0; i < FXP_NTXCB; i++) { |
| 1928 | txd = FXP_CDTX(sc, i); |
| 1929 | memset(txd, 0, sizeof(*txd)); |
| 1930 | txd->txd_txcb.cb_command = |
| 1931 | htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); |
| 1932 | txd->txd_txcb.link_addr = |
| 1933 | htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(i))); |
| 1934 | if (sc->sc_flags & FXPF_EXT_TXCB) |
| 1935 | txd->txd_txcb.tbd_array_addr = |
| 1936 | htole32(FXP_CDTBDADDR(sc, i) + |
| 1937 | (2 * sizeof(struct fxp_tbd))); |
| 1938 | else |
| 1939 | txd->txd_txcb.tbd_array_addr = |
| 1940 | htole32(FXP_CDTBDADDR(sc, i)); |
| 1941 | FXP_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 1942 | } |
| 1943 | sc->sc_txpending = 0; |
| 1944 | sc->sc_txdirty = 0; |
| 1945 | sc->sc_txlast = FXP_NTXCB - 1; |
| 1946 | |
| 1947 | /* |
| 1948 | * Initialize the receive buffer list. |
| 1949 | */ |
| 1950 | sc->sc_rxq.ifq_maxlen = FXP_NRFABUFS; |
| 1951 | while (sc->sc_rxq.ifq_len < FXP_NRFABUFS) { |
| 1952 | rxmap = FXP_RXMAP_GET(sc); |
| 1953 | if ((error = fxp_add_rfabuf(sc, rxmap, 0)) != 0) { |
| 1954 | log(LOG_ERR, "%s: unable to allocate or map rx " |
| 1955 | "buffer %d, error = %d\n" , |
| 1956 | device_xname(sc->sc_dev), |
| 1957 | sc->sc_rxq.ifq_len, error); |
| 1958 | /* |
| 1959 | * XXX Should attempt to run with fewer receive |
| 1960 | * XXX buffers instead of just failing. |
| 1961 | */ |
| 1962 | FXP_RXMAP_PUT(sc, rxmap); |
| 1963 | fxp_rxdrain(sc); |
| 1964 | goto out; |
| 1965 | } |
| 1966 | } |
| 1967 | sc->sc_rxidle = 0; |
| 1968 | |
| 1969 | /* |
| 1970 | * Give the transmit ring to the chip. We do this by pointing |
| 1971 | * the chip at the last descriptor (which is a NOP|SUSPEND), and |
| 1972 | * issuing a start command. It will execute the NOP and then |
| 1973 | * suspend, pointing at the first descriptor. |
| 1974 | */ |
| 1975 | fxp_scb_wait(sc); |
| 1976 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, FXP_CDTXADDR(sc, sc->sc_txlast)); |
| 1977 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
| 1978 | |
| 1979 | /* |
| 1980 | * Initialize receiver buffer area - RFA. |
| 1981 | */ |
| 1982 | #if 0 /* initialization will be done by FXP_SCB_INTRCNTL_REQUEST_SWI later */ |
| 1983 | rxmap = M_GETCTX(sc->sc_rxq.ifq_head, bus_dmamap_t); |
| 1984 | fxp_scb_wait(sc); |
| 1985 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, |
| 1986 | rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); |
| 1987 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); |
| 1988 | #endif |
| 1989 | |
| 1990 | if (sc->sc_flags & FXPF_MII) { |
| 1991 | /* |
| 1992 | * Set current media. |
| 1993 | */ |
| 1994 | if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) |
| 1995 | goto out; |
| 1996 | } |
| 1997 | |
| 1998 | /* |
| 1999 | * ...all done! |
| 2000 | */ |
| 2001 | ifp->if_flags |= IFF_RUNNING; |
| 2002 | ifp->if_flags &= ~IFF_OACTIVE; |
| 2003 | |
| 2004 | /* |
| 2005 | * Request a software generated interrupt that will be used to |
| 2006 | * (re)start the RU processing. If we direct the chip to start |
| 2007 | * receiving from the start of queue now, instead of letting the |
| 2008 | * interrupt handler first process all received packets, we run |
| 2009 | * the risk of having it overwrite mbuf clusters while they are |
| 2010 | * being processed or after they have been returned to the pool. |
| 2011 | */ |
| 2012 | CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTRCNTL_REQUEST_SWI); |
| 2013 | |
| 2014 | /* |
| 2015 | * Start the one second timer. |
| 2016 | */ |
| 2017 | callout_reset(&sc->sc_callout, hz, fxp_tick, sc); |
| 2018 | |
| 2019 | /* |
| 2020 | * Attempt to start output on the interface. |
| 2021 | */ |
| 2022 | fxp_start(ifp); |
| 2023 | |
| 2024 | out: |
| 2025 | if (error) { |
| 2026 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
| 2027 | ifp->if_timer = 0; |
| 2028 | log(LOG_ERR, "%s: interface not running\n" , |
| 2029 | device_xname(sc->sc_dev)); |
| 2030 | } |
| 2031 | return (error); |
| 2032 | } |
| 2033 | |
| 2034 | /* |
| 2035 | * Notify the world which media we're using. |
| 2036 | */ |
| 2037 | void |
| 2038 | fxp_mii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 2039 | { |
| 2040 | struct fxp_softc *sc = ifp->if_softc; |
| 2041 | |
| 2042 | if (sc->sc_enabled == 0) { |
| 2043 | ifmr->ifm_active = IFM_ETHER | IFM_NONE; |
| 2044 | ifmr->ifm_status = 0; |
| 2045 | return; |
| 2046 | } |
| 2047 | |
| 2048 | ether_mediastatus(ifp, ifmr); |
| 2049 | } |
| 2050 | |
| 2051 | int |
| 2052 | fxp_80c24_mediachange(struct ifnet *ifp) |
| 2053 | { |
| 2054 | |
| 2055 | /* Nothing to do here. */ |
| 2056 | return (0); |
| 2057 | } |
| 2058 | |
| 2059 | void |
| 2060 | fxp_80c24_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 2061 | { |
| 2062 | struct fxp_softc *sc = ifp->if_softc; |
| 2063 | |
| 2064 | /* |
| 2065 | * Media is currently-selected media. We cannot determine |
| 2066 | * the link status. |
| 2067 | */ |
| 2068 | ifmr->ifm_status = 0; |
| 2069 | ifmr->ifm_active = sc->sc_mii.mii_media.ifm_cur->ifm_media; |
| 2070 | } |
| 2071 | |
| 2072 | /* |
| 2073 | * Add a buffer to the end of the RFA buffer list. |
| 2074 | * Return 0 if successful, error code on failure. |
| 2075 | * |
| 2076 | * The RFA struct is stuck at the beginning of mbuf cluster and the |
| 2077 | * data pointer is fixed up to point just past it. |
| 2078 | */ |
| 2079 | int |
| 2080 | fxp_add_rfabuf(struct fxp_softc *sc, bus_dmamap_t rxmap, int unload) |
| 2081 | { |
| 2082 | struct mbuf *m; |
| 2083 | int error; |
| 2084 | |
| 2085 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
| 2086 | if (m == NULL) |
| 2087 | return (ENOBUFS); |
| 2088 | |
| 2089 | MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); |
| 2090 | MCLGET(m, M_DONTWAIT); |
| 2091 | if ((m->m_flags & M_EXT) == 0) { |
| 2092 | m_freem(m); |
| 2093 | return (ENOBUFS); |
| 2094 | } |
| 2095 | |
| 2096 | if (unload) |
| 2097 | bus_dmamap_unload(sc->sc_dmat, rxmap); |
| 2098 | |
| 2099 | M_SETCTX(m, rxmap); |
| 2100 | |
| 2101 | m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; |
| 2102 | error = bus_dmamap_load_mbuf(sc->sc_dmat, rxmap, m, |
| 2103 | BUS_DMA_READ|BUS_DMA_NOWAIT); |
| 2104 | if (error) { |
| 2105 | /* XXX XXX XXX */ |
| 2106 | aprint_error_dev(sc->sc_dev, |
| 2107 | "can't load rx DMA map %d, error = %d\n" , |
| 2108 | sc->sc_rxq.ifq_len, error); |
| 2109 | panic("fxp_add_rfabuf" ); |
| 2110 | } |
| 2111 | |
| 2112 | FXP_INIT_RFABUF(sc, m); |
| 2113 | |
| 2114 | return (0); |
| 2115 | } |
| 2116 | |
| 2117 | int |
| 2118 | fxp_mdi_read(device_t self, int phy, int reg) |
| 2119 | { |
| 2120 | struct fxp_softc *sc = device_private(self); |
| 2121 | int count = 10000; |
| 2122 | int value; |
| 2123 | |
| 2124 | CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, |
| 2125 | (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); |
| 2126 | |
| 2127 | while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & |
| 2128 | 0x10000000) == 0 && count--) |
| 2129 | DELAY(10); |
| 2130 | |
| 2131 | if (count <= 0) |
| 2132 | log(LOG_WARNING, |
| 2133 | "%s: fxp_mdi_read: timed out\n" , device_xname(self)); |
| 2134 | |
| 2135 | return (value & 0xffff); |
| 2136 | } |
| 2137 | |
| 2138 | void |
| 2139 | fxp_statchg(struct ifnet *ifp) |
| 2140 | { |
| 2141 | |
| 2142 | /* Nothing to do. */ |
| 2143 | } |
| 2144 | |
| 2145 | void |
| 2146 | fxp_mdi_write(device_t self, int phy, int reg, int value) |
| 2147 | { |
| 2148 | struct fxp_softc *sc = device_private(self); |
| 2149 | int count = 10000; |
| 2150 | |
| 2151 | CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, |
| 2152 | (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | |
| 2153 | (value & 0xffff)); |
| 2154 | |
| 2155 | while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && |
| 2156 | count--) |
| 2157 | DELAY(10); |
| 2158 | |
| 2159 | if (count <= 0) |
| 2160 | log(LOG_WARNING, |
| 2161 | "%s: fxp_mdi_write: timed out\n" , device_xname(self)); |
| 2162 | } |
| 2163 | |
| 2164 | int |
| 2165 | fxp_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
| 2166 | { |
| 2167 | struct fxp_softc *sc = ifp->if_softc; |
| 2168 | struct ifreq *ifr = (struct ifreq *)data; |
| 2169 | int s, error; |
| 2170 | |
| 2171 | s = splnet(); |
| 2172 | |
| 2173 | switch (cmd) { |
| 2174 | case SIOCSIFMEDIA: |
| 2175 | case SIOCGIFMEDIA: |
| 2176 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
| 2177 | break; |
| 2178 | |
| 2179 | default: |
| 2180 | if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) |
| 2181 | break; |
| 2182 | |
| 2183 | error = 0; |
| 2184 | |
| 2185 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
| 2186 | ; |
| 2187 | else if (ifp->if_flags & IFF_RUNNING) { |
| 2188 | /* |
| 2189 | * Multicast list has changed; set the |
| 2190 | * hardware filter accordingly. |
| 2191 | */ |
| 2192 | while (sc->sc_txpending) { |
| 2193 | sc->sc_flags |= FXPF_WANTINIT; |
| 2194 | tsleep(sc, PSOCK, "fxp_init" , 0); |
| 2195 | } |
| 2196 | error = fxp_init(ifp); |
| 2197 | } |
| 2198 | break; |
| 2199 | } |
| 2200 | |
| 2201 | /* Try to get more packets going. */ |
| 2202 | if (sc->sc_enabled) |
| 2203 | fxp_start(ifp); |
| 2204 | |
| 2205 | splx(s); |
| 2206 | return (error); |
| 2207 | } |
| 2208 | |
| 2209 | /* |
| 2210 | * Program the multicast filter. |
| 2211 | * |
| 2212 | * This function must be called at splnet(). |
| 2213 | */ |
| 2214 | void |
| 2215 | fxp_mc_setup(struct fxp_softc *sc) |
| 2216 | { |
| 2217 | struct fxp_cb_mcs *mcsp = &sc->sc_control_data->fcd_mcscb; |
| 2218 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 2219 | struct ethercom *ec = &sc->sc_ethercom; |
| 2220 | struct ether_multi *enm; |
| 2221 | struct ether_multistep step; |
| 2222 | int count, nmcasts; |
| 2223 | uint16_t status; |
| 2224 | |
| 2225 | #ifdef DIAGNOSTIC |
| 2226 | if (sc->sc_txpending) |
| 2227 | panic("fxp_mc_setup: pending transmissions" ); |
| 2228 | #endif |
| 2229 | |
| 2230 | |
| 2231 | if (ifp->if_flags & IFF_PROMISC) { |
| 2232 | ifp->if_flags |= IFF_ALLMULTI; |
| 2233 | return; |
| 2234 | } else { |
| 2235 | ifp->if_flags &= ~IFF_ALLMULTI; |
| 2236 | } |
| 2237 | |
| 2238 | /* |
| 2239 | * Initialize multicast setup descriptor. |
| 2240 | */ |
| 2241 | nmcasts = 0; |
| 2242 | ETHER_FIRST_MULTI(step, ec, enm); |
| 2243 | while (enm != NULL) { |
| 2244 | /* |
| 2245 | * Check for too many multicast addresses or if we're |
| 2246 | * listening to a range. Either way, we simply have |
| 2247 | * to accept all multicasts. |
| 2248 | */ |
| 2249 | if (nmcasts >= MAXMCADDR || |
| 2250 | memcmp(enm->enm_addrlo, enm->enm_addrhi, |
| 2251 | ETHER_ADDR_LEN) != 0) { |
| 2252 | /* |
| 2253 | * Callers of this function must do the |
| 2254 | * right thing with this. If we're called |
| 2255 | * from outside fxp_init(), the caller must |
| 2256 | * detect if the state if IFF_ALLMULTI changes. |
| 2257 | * If it does, the caller must then call |
| 2258 | * fxp_init(), since allmulti is handled by |
| 2259 | * the config block. |
| 2260 | */ |
| 2261 | ifp->if_flags |= IFF_ALLMULTI; |
| 2262 | return; |
| 2263 | } |
| 2264 | memcpy(&mcsp->mc_addr[nmcasts][0], enm->enm_addrlo, |
| 2265 | ETHER_ADDR_LEN); |
| 2266 | nmcasts++; |
| 2267 | ETHER_NEXT_MULTI(step, enm); |
| 2268 | } |
| 2269 | |
| 2270 | /* BIG_ENDIAN: no need to swap to store 0 */ |
| 2271 | mcsp->cb_status = 0; |
| 2272 | mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); |
| 2273 | mcsp->link_addr = htole32(FXP_CDTXADDR(sc, FXP_NEXTTX(sc->sc_txlast))); |
| 2274 | mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); |
| 2275 | |
| 2276 | FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 2277 | |
| 2278 | /* |
| 2279 | * Wait until the command unit is not active. This should never |
| 2280 | * happen since nothing is queued, but make sure anyway. |
| 2281 | */ |
| 2282 | count = 100; |
| 2283 | while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == |
| 2284 | FXP_SCB_CUS_ACTIVE && --count) |
| 2285 | DELAY(1); |
| 2286 | if (count == 0) { |
| 2287 | log(LOG_WARNING, "%s: line %d: command queue timeout\n" , |
| 2288 | device_xname(sc->sc_dev), __LINE__); |
| 2289 | return; |
| 2290 | } |
| 2291 | |
| 2292 | /* |
| 2293 | * Start the multicast setup command/DMA. |
| 2294 | */ |
| 2295 | fxp_scb_wait(sc); |
| 2296 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDMCSOFF); |
| 2297 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
| 2298 | |
| 2299 | /* ...and wait for it to complete. */ |
| 2300 | for (count = 1000; count > 0; count--) { |
| 2301 | FXP_CDMCSSYNC(sc, |
| 2302 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 2303 | status = le16toh(mcsp->cb_status); |
| 2304 | FXP_CDMCSSYNC(sc, BUS_DMASYNC_PREREAD); |
| 2305 | if ((status & FXP_CB_STATUS_C) != 0) |
| 2306 | break; |
| 2307 | DELAY(1); |
| 2308 | } |
| 2309 | if (count == 0) { |
| 2310 | log(LOG_WARNING, "%s: line %d: dmasync timeout\n" , |
| 2311 | device_xname(sc->sc_dev), __LINE__); |
| 2312 | return; |
| 2313 | } |
| 2314 | } |
| 2315 | |
| 2316 | static const uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; |
| 2317 | static const uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; |
| 2318 | static const uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; |
| 2319 | static const uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; |
| 2320 | static const uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; |
| 2321 | static const uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; |
| 2322 | static const uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE; |
| 2323 | |
| 2324 | #define UCODE(x) x, sizeof(x)/sizeof(uint32_t) |
| 2325 | |
| 2326 | static const struct ucode { |
| 2327 | int32_t revision; |
| 2328 | const uint32_t *ucode; |
| 2329 | size_t length; |
| 2330 | uint16_t int_delay_offset; |
| 2331 | uint16_t bundle_max_offset; |
| 2332 | } ucode_table[] = { |
| 2333 | { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), |
| 2334 | D101_CPUSAVER_DWORD, 0 }, |
| 2335 | |
| 2336 | { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), |
| 2337 | D101_CPUSAVER_DWORD, 0 }, |
| 2338 | |
| 2339 | { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), |
| 2340 | D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, |
| 2341 | |
| 2342 | { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), |
| 2343 | D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, |
| 2344 | |
| 2345 | { FXP_REV_82550, UCODE(fxp_ucode_d102), |
| 2346 | D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, |
| 2347 | |
| 2348 | { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), |
| 2349 | D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, |
| 2350 | |
| 2351 | { FXP_REV_82551_F, UCODE(fxp_ucode_d102e), |
| 2352 | D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, |
| 2353 | |
| 2354 | { FXP_REV_82551_10, UCODE(fxp_ucode_d102e), |
| 2355 | D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, |
| 2356 | |
| 2357 | { 0, NULL, 0, 0, 0 } |
| 2358 | }; |
| 2359 | |
| 2360 | void |
| 2361 | fxp_load_ucode(struct fxp_softc *sc) |
| 2362 | { |
| 2363 | const struct ucode *uc; |
| 2364 | struct fxp_cb_ucode *cbp = &sc->sc_control_data->fcd_ucode; |
| 2365 | int count, i; |
| 2366 | uint16_t status; |
| 2367 | |
| 2368 | if (sc->sc_flags & FXPF_UCODE_LOADED) |
| 2369 | return; |
| 2370 | |
| 2371 | /* |
| 2372 | * Only load the uCode if the user has requested that |
| 2373 | * we do so. |
| 2374 | */ |
| 2375 | if ((sc->sc_ethercom.ec_if.if_flags & IFF_LINK0) == 0) { |
| 2376 | sc->sc_int_delay = 0; |
| 2377 | sc->sc_bundle_max = 0; |
| 2378 | return; |
| 2379 | } |
| 2380 | |
| 2381 | for (uc = ucode_table; uc->ucode != NULL; uc++) { |
| 2382 | if (sc->sc_rev == uc->revision) |
| 2383 | break; |
| 2384 | } |
| 2385 | if (uc->ucode == NULL) |
| 2386 | return; |
| 2387 | |
| 2388 | /* BIG ENDIAN: no need to swap to store 0 */ |
| 2389 | cbp->cb_status = 0; |
| 2390 | cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); |
| 2391 | cbp->link_addr = 0xffffffff; /* (no) next command */ |
| 2392 | for (i = 0; i < uc->length; i++) |
| 2393 | cbp->ucode[i] = htole32(uc->ucode[i]); |
| 2394 | |
| 2395 | if (uc->int_delay_offset) |
| 2396 | *(volatile uint16_t *) &cbp->ucode[uc->int_delay_offset] = |
| 2397 | htole16(fxp_int_delay + (fxp_int_delay / 2)); |
| 2398 | |
| 2399 | if (uc->bundle_max_offset) |
| 2400 | *(volatile uint16_t *) &cbp->ucode[uc->bundle_max_offset] = |
| 2401 | htole16(fxp_bundle_max); |
| 2402 | |
| 2403 | FXP_CDUCODESYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
| 2404 | |
| 2405 | /* |
| 2406 | * Download the uCode to the chip. |
| 2407 | */ |
| 2408 | fxp_scb_wait(sc); |
| 2409 | CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->sc_cddma + FXP_CDUCODEOFF); |
| 2410 | fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); |
| 2411 | |
| 2412 | /* ...and wait for it to complete. */ |
| 2413 | for (count = 10000; count > 0; count--) { |
| 2414 | FXP_CDUCODESYNC(sc, |
| 2415 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
| 2416 | status = le16toh(cbp->cb_status); |
| 2417 | FXP_CDUCODESYNC(sc, BUS_DMASYNC_PREREAD); |
| 2418 | if ((status & FXP_CB_STATUS_C) != 0) |
| 2419 | break; |
| 2420 | DELAY(2); |
| 2421 | } |
| 2422 | if (count == 0) { |
| 2423 | sc->sc_int_delay = 0; |
| 2424 | sc->sc_bundle_max = 0; |
| 2425 | log(LOG_WARNING, "%s: timeout loading microcode\n" , |
| 2426 | device_xname(sc->sc_dev)); |
| 2427 | return; |
| 2428 | } |
| 2429 | |
| 2430 | if (sc->sc_int_delay != fxp_int_delay || |
| 2431 | sc->sc_bundle_max != fxp_bundle_max) { |
| 2432 | sc->sc_int_delay = fxp_int_delay; |
| 2433 | sc->sc_bundle_max = fxp_bundle_max; |
| 2434 | log(LOG_INFO, "%s: Microcode loaded: int delay: %d usec, " |
| 2435 | "max bundle: %d\n" , device_xname(sc->sc_dev), |
| 2436 | sc->sc_int_delay, |
| 2437 | uc->bundle_max_offset == 0 ? 0 : sc->sc_bundle_max); |
| 2438 | } |
| 2439 | |
| 2440 | sc->sc_flags |= FXPF_UCODE_LOADED; |
| 2441 | } |
| 2442 | |
| 2443 | int |
| 2444 | fxp_enable(struct fxp_softc *sc) |
| 2445 | { |
| 2446 | |
| 2447 | if (sc->sc_enabled == 0 && sc->sc_enable != NULL) { |
| 2448 | if ((*sc->sc_enable)(sc) != 0) { |
| 2449 | log(LOG_ERR, "%s: device enable failed\n" , |
| 2450 | device_xname(sc->sc_dev)); |
| 2451 | return (EIO); |
| 2452 | } |
| 2453 | } |
| 2454 | |
| 2455 | sc->sc_enabled = 1; |
| 2456 | return (0); |
| 2457 | } |
| 2458 | |
| 2459 | void |
| 2460 | fxp_disable(struct fxp_softc *sc) |
| 2461 | { |
| 2462 | |
| 2463 | if (sc->sc_enabled != 0 && sc->sc_disable != NULL) { |
| 2464 | (*sc->sc_disable)(sc); |
| 2465 | sc->sc_enabled = 0; |
| 2466 | } |
| 2467 | } |
| 2468 | |
| 2469 | /* |
| 2470 | * fxp_activate: |
| 2471 | * |
| 2472 | * Handle device activation/deactivation requests. |
| 2473 | */ |
| 2474 | int |
| 2475 | fxp_activate(device_t self, enum devact act) |
| 2476 | { |
| 2477 | struct fxp_softc *sc = device_private(self); |
| 2478 | |
| 2479 | switch (act) { |
| 2480 | case DVACT_DEACTIVATE: |
| 2481 | if_deactivate(&sc->sc_ethercom.ec_if); |
| 2482 | return 0; |
| 2483 | default: |
| 2484 | return EOPNOTSUPP; |
| 2485 | } |
| 2486 | } |
| 2487 | |
| 2488 | /* |
| 2489 | * fxp_detach: |
| 2490 | * |
| 2491 | * Detach an i82557 interface. |
| 2492 | */ |
| 2493 | int |
| 2494 | fxp_detach(struct fxp_softc *sc, int flags) |
| 2495 | { |
| 2496 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
| 2497 | int i, s; |
| 2498 | |
| 2499 | /* Succeed now if there's no work to do. */ |
| 2500 | if ((sc->sc_flags & FXPF_ATTACHED) == 0) |
| 2501 | return (0); |
| 2502 | |
| 2503 | s = splnet(); |
| 2504 | /* Stop the interface. Callouts are stopped in it. */ |
| 2505 | fxp_stop(ifp, 1); |
| 2506 | splx(s); |
| 2507 | |
| 2508 | /* Destroy our callout. */ |
| 2509 | callout_destroy(&sc->sc_callout); |
| 2510 | |
| 2511 | if (sc->sc_flags & FXPF_MII) { |
| 2512 | /* Detach all PHYs */ |
| 2513 | mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); |
| 2514 | } |
| 2515 | |
| 2516 | /* Delete all remaining media. */ |
| 2517 | ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); |
| 2518 | |
| 2519 | rnd_detach_source(&sc->rnd_source); |
| 2520 | ether_ifdetach(ifp); |
| 2521 | if_detach(ifp); |
| 2522 | |
| 2523 | for (i = 0; i < FXP_NRFABUFS; i++) { |
| 2524 | bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmaps[i]); |
| 2525 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmaps[i]); |
| 2526 | } |
| 2527 | |
| 2528 | for (i = 0; i < FXP_NTXCB; i++) { |
| 2529 | bus_dmamap_unload(sc->sc_dmat, FXP_DSTX(sc, i)->txs_dmamap); |
| 2530 | bus_dmamap_destroy(sc->sc_dmat, FXP_DSTX(sc, i)->txs_dmamap); |
| 2531 | } |
| 2532 | |
| 2533 | bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); |
| 2534 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); |
| 2535 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
| 2536 | sizeof(struct fxp_control_data)); |
| 2537 | bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); |
| 2538 | |
| 2539 | return (0); |
| 2540 | } |
| 2541 | |