| 1 | /* $NetBSD: mtd803.c,v 1.32 2016/10/02 14:16:02 christos Exp $ */ |
| 2 | |
| 3 | /*- |
| 4 | * |
| 5 | * Copyright (c) 2002 The NetBSD Foundation, Inc. |
| 6 | * All rights reserved. |
| 7 | * |
| 8 | * This code is derived from software contributed to The NetBSD Foundation |
| 9 | * by Peter Bex <Peter.Bex@student.kun.nl>. |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or without |
| 12 | * modification, are permitted provided that the following conditions |
| 13 | * are met: |
| 14 | * 1. Redistributions of source code must retain the above copyright |
| 15 | * notice, this list of conditions and the following disclaimer. |
| 16 | * 2. Redistributions in binary form must reproduce the above copyright |
| 17 | * notice, this list of conditions and the following disclaimer in the |
| 18 | * documentation and/or other materials provided with the distribution. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
| 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
| 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
| 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 30 | * POSSIBILITY OF SUCH DAMAGE. |
| 31 | */ |
| 32 | |
| 33 | /* |
| 34 | * TODO: |
| 35 | * - Most importantly, get some bus_dmamap_syncs in the correct places. |
| 36 | * I don't have access to a computer with PCI other than i386, and i386 |
| 37 | * is just such a machine where dmamap_syncs don't do anything. |
| 38 | * - Powerhook for when resuming after standby. |
| 39 | * - Watchdog stuff doesn't work yet, the system crashes. |
| 40 | * - There seems to be a CardBus version of the card. (see datasheet) |
| 41 | * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc) |
| 42 | * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets |
| 43 | * raised every time a packet is sent. Strange, since everything works anyway |
| 44 | */ |
| 45 | |
| 46 | #include <sys/cdefs.h> |
| 47 | __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.32 2016/10/02 14:16:02 christos Exp $" ); |
| 48 | |
| 49 | |
| 50 | #include <sys/param.h> |
| 51 | #include <sys/mbuf.h> |
| 52 | #include <sys/systm.h> |
| 53 | #include <sys/device.h> |
| 54 | #include <sys/socket.h> |
| 55 | #include <sys/ioctl.h> |
| 56 | #include <sys/syslog.h> |
| 57 | |
| 58 | #include <net/if.h> |
| 59 | #include <net/if_ether.h> |
| 60 | #include <net/if_media.h> |
| 61 | |
| 62 | #ifdef INET |
| 63 | #include <netinet/in.h> |
| 64 | #include <netinet/if_inarp.h> |
| 65 | #include <netinet/in_systm.h> |
| 66 | #include <netinet/in_var.h> |
| 67 | #include <netinet/ip.h> |
| 68 | #endif |
| 69 | |
| 70 | #include <net/bpf.h> |
| 71 | #include <net/bpfdesc.h> |
| 72 | |
| 73 | #include <sys/bus.h> |
| 74 | |
| 75 | #include <dev/ic/mtd803reg.h> |
| 76 | #include <dev/ic/mtd803var.h> |
| 77 | #include <dev/mii/mii.h> |
| 78 | #include <dev/mii/miivar.h> |
| 79 | |
| 80 | /* |
| 81 | * Device driver for the MTD803 3-in-1 Fast Ethernet Controller |
| 82 | * Written by Peter Bex (peter.bex@student.kun.nl) |
| 83 | * |
| 84 | * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com |
| 85 | */ |
| 86 | |
| 87 | #define MTD_READ_1(sc, reg) \ |
| 88 | bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg)) |
| 89 | #define MTD_WRITE_1(sc, reg, data) \ |
| 90 | bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data)) |
| 91 | |
| 92 | #define MTD_READ_2(sc, reg) \ |
| 93 | bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg)) |
| 94 | #define MTD_WRITE_2(sc, reg, data) \ |
| 95 | bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data)) |
| 96 | |
| 97 | #define MTD_READ_4(sc, reg) \ |
| 98 | bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg)) |
| 99 | #define MTD_WRITE_4(sc, reg, data) \ |
| 100 | bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data)) |
| 101 | |
| 102 | #define MTD_SETBIT(sc, reg, x) \ |
| 103 | MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x)) |
| 104 | #define MTD_CLRBIT(sc, reg, x) \ |
| 105 | MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x)) |
| 106 | |
| 107 | #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len))) |
| 108 | |
| 109 | int mtd_mii_readreg(device_t, int, int); |
| 110 | void mtd_mii_writereg(device_t, int, int, int); |
| 111 | void mtd_mii_statchg(struct ifnet *); |
| 112 | |
| 113 | void mtd_start(struct ifnet *); |
| 114 | void mtd_stop(struct ifnet *, int); |
| 115 | int mtd_ioctl(struct ifnet *, u_long, void *); |
| 116 | void mtd_setmulti(struct mtd_softc *); |
| 117 | void mtd_watchdog(struct ifnet *); |
| 118 | |
| 119 | int mtd_init(struct ifnet *); |
| 120 | void mtd_reset(struct mtd_softc *); |
| 121 | void mtd_shutdown(void *); |
| 122 | int mtd_init_desc(struct mtd_softc *); |
| 123 | int mtd_put(struct mtd_softc *, int, struct mbuf *); |
| 124 | struct mbuf *mtd_get(struct mtd_softc *, int, int); |
| 125 | |
| 126 | int mtd_rxirq(struct mtd_softc *); |
| 127 | int mtd_txirq(struct mtd_softc *); |
| 128 | int mtd_bufirq(struct mtd_softc *); |
| 129 | |
| 130 | |
| 131 | int |
| 132 | mtd_config(struct mtd_softc *sc) |
| 133 | { |
| 134 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 135 | int i; |
| 136 | |
| 137 | /* Read station address */ |
| 138 | for (i = 0; i < ETHER_ADDR_LEN; ++i) |
| 139 | sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i); |
| 140 | |
| 141 | /* Initialize ifnet structure */ |
| 142 | memcpy(ifp->if_xname, device_xname(sc->dev), IFNAMSIZ); |
| 143 | ifp->if_softc = sc; |
| 144 | ifp->if_init = mtd_init; |
| 145 | ifp->if_start = mtd_start; |
| 146 | ifp->if_stop = mtd_stop; |
| 147 | ifp->if_ioctl = mtd_ioctl; |
| 148 | ifp->if_watchdog = mtd_watchdog; |
| 149 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
| 150 | IFQ_SET_READY(&ifp->if_snd); |
| 151 | |
| 152 | /* Setup MII interface */ |
| 153 | sc->mii.mii_ifp = ifp; |
| 154 | sc->mii.mii_readreg = mtd_mii_readreg; |
| 155 | sc->mii.mii_writereg = mtd_mii_writereg; |
| 156 | sc->mii.mii_statchg = mtd_mii_statchg; |
| 157 | |
| 158 | sc->ethercom.ec_mii = &sc->mii; |
| 159 | ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange, |
| 160 | ether_mediastatus); |
| 161 | |
| 162 | mii_attach(sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0); |
| 163 | |
| 164 | if (LIST_FIRST(&sc->mii.mii_phys) == NULL) { |
| 165 | aprint_error_dev(sc->dev, "Unable to configure MII\n" ); |
| 166 | return 1; |
| 167 | } else { |
| 168 | ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO); |
| 169 | } |
| 170 | |
| 171 | if (mtd_init_desc(sc)) |
| 172 | return 1; |
| 173 | |
| 174 | /* Attach interface */ |
| 175 | if_attach(ifp); |
| 176 | ether_ifattach(ifp, sc->eaddr); |
| 177 | |
| 178 | /* Initialise random source */ |
| 179 | rnd_attach_source(&sc->rnd_src, device_xname(sc->dev), |
| 180 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
| 181 | |
| 182 | /* Add shutdown hook to reset card when we reboot */ |
| 183 | sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc); |
| 184 | |
| 185 | return 0; |
| 186 | } |
| 187 | |
| 188 | |
| 189 | /* |
| 190 | * mtd_init |
| 191 | * Must be called at splnet() |
| 192 | */ |
| 193 | int |
| 194 | mtd_init(struct ifnet *ifp) |
| 195 | { |
| 196 | struct mtd_softc *sc = ifp->if_softc; |
| 197 | |
| 198 | mtd_reset(sc); |
| 199 | |
| 200 | /* |
| 201 | * Set cache alignment and burst length. Don't really know what these |
| 202 | * mean, so their values are probably suboptimal. |
| 203 | */ |
| 204 | MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16); |
| 205 | |
| 206 | MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX); |
| 207 | |
| 208 | /* Promiscuous mode? */ |
| 209 | if (ifp->if_flags & IFF_PROMISC) |
| 210 | MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM); |
| 211 | else |
| 212 | MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM); |
| 213 | |
| 214 | /* Broadcast mode? */ |
| 215 | if (ifp->if_flags & IFF_BROADCAST) |
| 216 | MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD); |
| 217 | else |
| 218 | MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD); |
| 219 | |
| 220 | mtd_setmulti(sc); |
| 221 | |
| 222 | /* Enable interrupts */ |
| 223 | MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK); |
| 224 | MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE); |
| 225 | |
| 226 | /* Set descriptor base addresses */ |
| 227 | MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr |
| 228 | + sizeof(struct mtd_desc) * MTD_NUM_RXD)); |
| 229 | MTD_WRITE_4(sc, MTD_RXLBA, |
| 230 | htole32(sc->desc_dma_map->dm_segs[0].ds_addr)); |
| 231 | |
| 232 | /* Enable receiver and transmitter */ |
| 233 | MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE); |
| 234 | MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE); |
| 235 | |
| 236 | /* Interface is running */ |
| 237 | ifp->if_flags |= IFF_RUNNING; |
| 238 | ifp->if_flags &= ~IFF_OACTIVE; |
| 239 | |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | |
| 244 | int |
| 245 | mtd_init_desc(struct mtd_softc *sc) |
| 246 | { |
| 247 | int rseg, err, i; |
| 248 | bus_dma_segment_t seg; |
| 249 | bus_size_t size; |
| 250 | |
| 251 | /* Allocate memory for descriptors */ |
| 252 | size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc); |
| 253 | |
| 254 | /* Allocate DMA-safe memory */ |
| 255 | if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN, |
| 256 | 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { |
| 257 | aprint_error_dev(sc->dev, "unable to allocate DMA buffer, error = %d\n" , err); |
| 258 | return 1; |
| 259 | } |
| 260 | |
| 261 | /* Map memory to kernel addressable space */ |
| 262 | if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size, |
| 263 | (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { |
| 264 | aprint_error_dev(sc->dev, "unable to map DMA buffer, error = %d\n" , err); |
| 265 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 266 | return 1; |
| 267 | } |
| 268 | |
| 269 | /* Create a DMA map */ |
| 270 | if ((err = bus_dmamap_create(sc->dma_tag, size, 1, |
| 271 | size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) { |
| 272 | aprint_error_dev(sc->dev, "unable to create DMA map, error = %d\n" , err); |
| 273 | bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); |
| 274 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 275 | return 1; |
| 276 | } |
| 277 | |
| 278 | /* Load the DMA map */ |
| 279 | if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc, |
| 280 | size, NULL, BUS_DMA_NOWAIT)) != 0) { |
| 281 | aprint_error_dev(sc->dev, "unable to load DMA map, error = %d\n" , |
| 282 | err); |
| 283 | bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); |
| 284 | bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); |
| 285 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 286 | return 1; |
| 287 | } |
| 288 | |
| 289 | /* Allocate memory for the buffers */ |
| 290 | size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE; |
| 291 | |
| 292 | /* Allocate DMA-safe memory */ |
| 293 | if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN, |
| 294 | 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { |
| 295 | aprint_error_dev(sc->dev, "unable to allocate DMA buffer, error = %d\n" , |
| 296 | err); |
| 297 | |
| 298 | /* Undo DMA map for descriptors */ |
| 299 | bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); |
| 300 | bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); |
| 301 | bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); |
| 302 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 303 | return 1; |
| 304 | } |
| 305 | |
| 306 | /* Map memory to kernel addressable space */ |
| 307 | if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size, |
| 308 | &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { |
| 309 | aprint_error_dev(sc->dev, "unable to map DMA buffer, error = %d\n" , |
| 310 | err); |
| 311 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 312 | |
| 313 | /* Undo DMA map for descriptors */ |
| 314 | bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); |
| 315 | bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); |
| 316 | bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); |
| 317 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 318 | return 1; |
| 319 | } |
| 320 | |
| 321 | /* Create a DMA map */ |
| 322 | if ((err = bus_dmamap_create(sc->dma_tag, size, 1, |
| 323 | size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) { |
| 324 | aprint_error_dev(sc->dev, "unable to create DMA map, error = %d\n" , |
| 325 | err); |
| 326 | bus_dmamem_unmap(sc->dma_tag, sc->buf, size); |
| 327 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 328 | |
| 329 | /* Undo DMA map for descriptors */ |
| 330 | bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); |
| 331 | bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); |
| 332 | bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); |
| 333 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 334 | return 1; |
| 335 | } |
| 336 | |
| 337 | /* Load the DMA map */ |
| 338 | if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf, |
| 339 | size, NULL, BUS_DMA_NOWAIT)) != 0) { |
| 340 | aprint_error_dev(sc->dev, "unable to load DMA map, error = %d\n" , |
| 341 | err); |
| 342 | bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map); |
| 343 | bus_dmamem_unmap(sc->dma_tag, sc->buf, size); |
| 344 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 345 | |
| 346 | /* Undo DMA map for descriptors */ |
| 347 | bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); |
| 348 | bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); |
| 349 | bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); |
| 350 | bus_dmamem_free(sc->dma_tag, &seg, rseg); |
| 351 | return 1; |
| 352 | } |
| 353 | |
| 354 | /* Descriptors are stored as a circular linked list */ |
| 355 | /* Fill in rx descriptors */ |
| 356 | for (i = 0; i < MTD_NUM_RXD; ++i) { |
| 357 | sc->desc[i].stat = MTD_RXD_OWNER; |
| 358 | if (i == MTD_NUM_RXD - 1) { /* Last descriptor */ |
| 359 | /* Link back to first rx descriptor */ |
| 360 | sc->desc[i].next = |
| 361 | htole32(sc->desc_dma_map->dm_segs[0].ds_addr); |
| 362 | } else { |
| 363 | /* Link forward to next rx descriptor */ |
| 364 | sc->desc[i].next = |
| 365 | htole32(sc->desc_dma_map->dm_segs[0].ds_addr |
| 366 | + (i + 1) * sizeof(struct mtd_desc)); |
| 367 | } |
| 368 | sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS; |
| 369 | /* Set buffer's address */ |
| 370 | sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr |
| 371 | + i * MTD_RXBUF_SIZE); |
| 372 | } |
| 373 | |
| 374 | /* Fill in tx descriptors */ |
| 375 | for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) { |
| 376 | sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */ |
| 377 | if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */ |
| 378 | /* Link back to first tx descriptor */ |
| 379 | sc->desc[i].next = |
| 380 | htole32(sc->desc_dma_map->dm_segs[0].ds_addr |
| 381 | +MTD_NUM_RXD * sizeof(struct mtd_desc)); |
| 382 | } else { |
| 383 | /* Link forward to next tx descriptor */ |
| 384 | sc->desc[i].next = |
| 385 | htole32(sc->desc_dma_map->dm_segs[0].ds_addr |
| 386 | + (i + 1) * sizeof(struct mtd_desc)); |
| 387 | } |
| 388 | /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */ |
| 389 | /* Set buffer's address */ |
| 390 | sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr |
| 391 | + MTD_NUM_RXD * MTD_RXBUF_SIZE |
| 392 | + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE); |
| 393 | } |
| 394 | |
| 395 | return 0; |
| 396 | } |
| 397 | |
| 398 | |
| 399 | void |
| 400 | mtd_mii_statchg(struct ifnet *ifp) |
| 401 | { |
| 402 | /* Should we do something here? :) */ |
| 403 | } |
| 404 | |
| 405 | |
| 406 | int |
| 407 | mtd_mii_readreg(device_t self, int phy, int reg) |
| 408 | { |
| 409 | struct mtd_softc *sc = device_private(self); |
| 410 | |
| 411 | return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2)); |
| 412 | } |
| 413 | |
| 414 | |
| 415 | void |
| 416 | mtd_mii_writereg(device_t self, int phy, int reg, int val) |
| 417 | { |
| 418 | struct mtd_softc *sc = device_private(self); |
| 419 | |
| 420 | MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val); |
| 421 | } |
| 422 | |
| 423 | |
| 424 | int |
| 425 | mtd_put(struct mtd_softc *sc, int index, struct mbuf *m) |
| 426 | { |
| 427 | int len, tlen; |
| 428 | char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE |
| 429 | + index * MTD_TXBUF_SIZE; |
| 430 | struct mbuf *n; |
| 431 | |
| 432 | for (tlen = 0; m != NULL; m = n) { |
| 433 | len = m->m_len; |
| 434 | if (len == 0) { |
| 435 | n = m_free(m); |
| 436 | continue; |
| 437 | } else if (tlen > MTD_TXBUF_SIZE) { |
| 438 | /* XXX FIXME: No idea what to do here. */ |
| 439 | aprint_error_dev(sc->dev, "packet too large! Size = %i\n" , |
| 440 | tlen); |
| 441 | n = m_free(m); |
| 442 | continue; |
| 443 | } |
| 444 | memcpy(buf, mtod(m, void *), len); |
| 445 | buf += len; |
| 446 | tlen += len; |
| 447 | n = m_free(m); |
| 448 | } |
| 449 | sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC |
| 450 | | MTD_TXD_CONF_IRQC |
| 451 | | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS) |
| 452 | | (tlen & MTD_TXD_CONF_BUFS); |
| 453 | |
| 454 | return tlen; |
| 455 | } |
| 456 | |
| 457 | |
| 458 | void |
| 459 | mtd_start(struct ifnet *ifp) |
| 460 | { |
| 461 | struct mtd_softc *sc = ifp->if_softc; |
| 462 | struct mbuf *m; |
| 463 | int first_tx = sc->cur_tx; |
| 464 | |
| 465 | /* Don't transmit when the interface is busy or inactive */ |
| 466 | if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) |
| 467 | return; |
| 468 | |
| 469 | for (;;) { |
| 470 | IF_DEQUEUE(&ifp->if_snd, m); |
| 471 | |
| 472 | if (m == NULL) |
| 473 | break; |
| 474 | |
| 475 | bpf_mtap(ifp, m); |
| 476 | |
| 477 | /* Copy mbuf chain into tx buffer */ |
| 478 | (void)mtd_put(sc, sc->cur_tx, m); |
| 479 | |
| 480 | if (sc->cur_tx != first_tx) |
| 481 | sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER; |
| 482 | |
| 483 | if (++sc->cur_tx >= MTD_NUM_TXD) |
| 484 | sc->cur_tx = 0; |
| 485 | } |
| 486 | /* Mark first & last descriptor */ |
| 487 | sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD; |
| 488 | |
| 489 | if (sc->cur_tx == 0) { |
| 490 | sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD; |
| 491 | } else { |
| 492 | sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD; |
| 493 | } |
| 494 | |
| 495 | /* Give first descriptor to chip to complete transaction */ |
| 496 | sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER; |
| 497 | |
| 498 | /* Transmit polling demand */ |
| 499 | MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND); |
| 500 | |
| 501 | /* XXX FIXME: Set up a watchdog timer */ |
| 502 | /* ifp->if_timer = 5; */ |
| 503 | } |
| 504 | |
| 505 | |
| 506 | void |
| 507 | mtd_stop(struct ifnet *ifp, int disable) |
| 508 | { |
| 509 | struct mtd_softc *sc = ifp->if_softc; |
| 510 | |
| 511 | /* Disable transmitter and receiver */ |
| 512 | MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE); |
| 513 | MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE); |
| 514 | |
| 515 | /* Disable interrupts */ |
| 516 | MTD_WRITE_4(sc, MTD_IMR, 0x00000000); |
| 517 | |
| 518 | /* Must do more at disable??... */ |
| 519 | if (disable) { |
| 520 | /* Delete tx and rx descriptor base addresses */ |
| 521 | MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000); |
| 522 | MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000); |
| 523 | } |
| 524 | |
| 525 | ifp->if_timer = 0; |
| 526 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
| 527 | } |
| 528 | |
| 529 | |
| 530 | void |
| 531 | mtd_watchdog(struct ifnet *ifp) |
| 532 | { |
| 533 | struct mtd_softc *sc = ifp->if_softc; |
| 534 | int s; |
| 535 | |
| 536 | log(LOG_ERR, "%s: device timeout\n" , device_xname(sc->dev)); |
| 537 | ++sc->ethercom.ec_if.if_oerrors; |
| 538 | |
| 539 | mtd_stop(ifp, 0); |
| 540 | |
| 541 | s = splnet(); |
| 542 | mtd_init(ifp); |
| 543 | splx(s); |
| 544 | |
| 545 | return; |
| 546 | } |
| 547 | |
| 548 | |
| 549 | int |
| 550 | mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
| 551 | { |
| 552 | struct mtd_softc *sc = ifp->if_softc; |
| 553 | int s, error = 0; |
| 554 | |
| 555 | s = splnet(); |
| 556 | |
| 557 | if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { |
| 558 | /* |
| 559 | * Multicast list has changed; set the hardware |
| 560 | * filter accordingly. |
| 561 | */ |
| 562 | if (ifp->if_flags & IFF_RUNNING) |
| 563 | mtd_setmulti(sc); |
| 564 | error = 0; |
| 565 | } |
| 566 | |
| 567 | splx(s); |
| 568 | return error; |
| 569 | } |
| 570 | |
| 571 | |
| 572 | struct mbuf * |
| 573 | mtd_get(struct mtd_softc *sc, int index, int totlen) |
| 574 | { |
| 575 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 576 | struct mbuf *m, *m0, *newm; |
| 577 | int len; |
| 578 | char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE; |
| 579 | |
| 580 | MGETHDR(m0, M_DONTWAIT, MT_DATA); |
| 581 | if (m0 == NULL) |
| 582 | return NULL; |
| 583 | |
| 584 | m_set_rcvif(m0, ifp); |
| 585 | m0->m_pkthdr.len = totlen; |
| 586 | m = m0; |
| 587 | len = MHLEN; |
| 588 | |
| 589 | while (totlen > 0) { |
| 590 | if (totlen >= MINCLSIZE) { |
| 591 | MCLGET(m, M_DONTWAIT); |
| 592 | if (!(m->m_flags & M_EXT)) { |
| 593 | m_freem(m0); |
| 594 | return NULL; |
| 595 | } |
| 596 | len = MCLBYTES; |
| 597 | } |
| 598 | |
| 599 | if (m == m0) { |
| 600 | char *newdata = (char *) |
| 601 | ALIGN(m->m_data + sizeof(struct ether_header)) - |
| 602 | sizeof(struct ether_header); |
| 603 | len -= newdata - m->m_data; |
| 604 | m->m_data = newdata; |
| 605 | } |
| 606 | |
| 607 | m->m_len = len = min(totlen, len); |
| 608 | memcpy(mtod(m, void *), buf, len); |
| 609 | buf += len; |
| 610 | |
| 611 | totlen -= len; |
| 612 | if (totlen > 0) { |
| 613 | MGET(newm, M_DONTWAIT, MT_DATA); |
| 614 | if (newm == NULL) { |
| 615 | m_freem(m0); |
| 616 | return NULL; |
| 617 | } |
| 618 | len = MLEN; |
| 619 | m = m->m_next = newm; |
| 620 | } |
| 621 | } |
| 622 | |
| 623 | return m0; |
| 624 | } |
| 625 | |
| 626 | |
| 627 | int |
| 628 | mtd_rxirq(struct mtd_softc *sc) |
| 629 | { |
| 630 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 631 | int len; |
| 632 | struct mbuf *m; |
| 633 | |
| 634 | for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) { |
| 635 | /* Error summary set? */ |
| 636 | if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) { |
| 637 | aprint_error_dev(sc->dev, "received packet with errors\n" ); |
| 638 | /* Give up packet, since an error occurred */ |
| 639 | sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER; |
| 640 | sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & |
| 641 | MTD_RXD_CONF_BUFS; |
| 642 | ++ifp->if_ierrors; |
| 643 | if (++sc->cur_rx >= MTD_NUM_RXD) |
| 644 | sc->cur_rx = 0; |
| 645 | continue; |
| 646 | } |
| 647 | /* Get buffer length */ |
| 648 | len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN) |
| 649 | >> MTD_RXD_FLEN_SHIFT; |
| 650 | len -= ETHER_CRC_LEN; |
| 651 | |
| 652 | /* Check packet size */ |
| 653 | if (len <= sizeof(struct ether_header)) { |
| 654 | aprint_error_dev(sc->dev, "invalid packet size %d; dropping\n" , |
| 655 | len); |
| 656 | sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER; |
| 657 | sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & |
| 658 | MTD_RXD_CONF_BUFS; |
| 659 | ++ifp->if_ierrors; |
| 660 | if (++sc->cur_rx >= MTD_NUM_RXD) |
| 661 | sc->cur_rx = 0; |
| 662 | continue; |
| 663 | } |
| 664 | |
| 665 | m = mtd_get(sc, (sc->cur_rx), len); |
| 666 | |
| 667 | /* Give descriptor back to card */ |
| 668 | sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS; |
| 669 | sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER; |
| 670 | |
| 671 | if (++sc->cur_rx >= MTD_NUM_RXD) |
| 672 | sc->cur_rx = 0; |
| 673 | |
| 674 | if (m == NULL) { |
| 675 | aprint_error_dev(sc->dev, "error pulling packet off interface\n" ); |
| 676 | ++ifp->if_ierrors; |
| 677 | continue; |
| 678 | } |
| 679 | |
| 680 | ++ifp->if_ipackets; |
| 681 | |
| 682 | bpf_mtap(ifp, m); |
| 683 | /* Pass the packet up */ |
| 684 | if_percpuq_enqueue(ifp->if_percpuq, m); |
| 685 | } |
| 686 | |
| 687 | return 1; |
| 688 | } |
| 689 | |
| 690 | |
| 691 | int |
| 692 | mtd_txirq(struct mtd_softc *sc) |
| 693 | { |
| 694 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 695 | |
| 696 | /* Clear timeout */ |
| 697 | ifp->if_timer = 0; |
| 698 | |
| 699 | ifp->if_flags &= ~IFF_OACTIVE; |
| 700 | ++ifp->if_opackets; |
| 701 | |
| 702 | /* XXX FIXME If there is some queued, do an mtd_start? */ |
| 703 | |
| 704 | return 1; |
| 705 | } |
| 706 | |
| 707 | |
| 708 | int |
| 709 | mtd_bufirq(struct mtd_softc *sc) |
| 710 | { |
| 711 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 712 | |
| 713 | /* Clear timeout */ |
| 714 | ifp->if_timer = 0; |
| 715 | |
| 716 | /* XXX FIXME: Do something here to make sure we get some buffers! */ |
| 717 | |
| 718 | return 1; |
| 719 | } |
| 720 | |
| 721 | |
| 722 | int |
| 723 | mtd_irq_h(void *args) |
| 724 | { |
| 725 | struct mtd_softc *sc = args; |
| 726 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 727 | u_int32_t status; |
| 728 | int r = 0; |
| 729 | |
| 730 | if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(sc->dev)) |
| 731 | return 0; |
| 732 | |
| 733 | /* Disable interrupts */ |
| 734 | MTD_WRITE_4(sc, MTD_IMR, 0x00000000); |
| 735 | |
| 736 | for(;;) { |
| 737 | status = MTD_READ_4(sc, MTD_ISR); |
| 738 | |
| 739 | /* Add random seed before masking out bits */ |
| 740 | if (status) |
| 741 | rnd_add_uint32(&sc->rnd_src, status); |
| 742 | |
| 743 | status &= MTD_ISR_MASK; |
| 744 | if (!status) /* We didn't ask for this */ |
| 745 | break; |
| 746 | |
| 747 | MTD_WRITE_4(sc, MTD_ISR, status); |
| 748 | |
| 749 | /* NOTE: Perhaps we should reset with some of these errors? */ |
| 750 | |
| 751 | if (status & MTD_ISR_RXBUN) { |
| 752 | aprint_error_dev(sc->dev, "receive buffer unavailable\n" ); |
| 753 | ++ifp->if_ierrors; |
| 754 | } |
| 755 | |
| 756 | if (status & MTD_ISR_RXERR) { |
| 757 | aprint_error_dev(sc->dev, "receive error\n" ); |
| 758 | ++ifp->if_ierrors; |
| 759 | } |
| 760 | |
| 761 | if (status & MTD_ISR_TXBUN) { |
| 762 | aprint_error_dev(sc->dev, "transmit buffer unavailable\n" ); |
| 763 | ++ifp->if_ierrors; |
| 764 | } |
| 765 | |
| 766 | if ((status & MTD_ISR_PDF)) { |
| 767 | aprint_error_dev(sc->dev, "parallel detection fault\n" ); |
| 768 | ++ifp->if_ierrors; |
| 769 | } |
| 770 | |
| 771 | if (status & MTD_ISR_FBUSERR) { |
| 772 | aprint_error_dev(sc->dev, "fatal bus error\n" ); |
| 773 | ++ifp->if_ierrors; |
| 774 | } |
| 775 | |
| 776 | if (status & MTD_ISR_TARERR) { |
| 777 | aprint_error_dev(sc->dev, "target error\n" ); |
| 778 | ++ifp->if_ierrors; |
| 779 | } |
| 780 | |
| 781 | if (status & MTD_ISR_MASTERR) { |
| 782 | aprint_error_dev(sc->dev, "master error\n" ); |
| 783 | ++ifp->if_ierrors; |
| 784 | } |
| 785 | |
| 786 | if (status & MTD_ISR_PARERR) { |
| 787 | aprint_error_dev(sc->dev, "parity error\n" ); |
| 788 | ++ifp->if_ierrors; |
| 789 | } |
| 790 | |
| 791 | if (status & MTD_ISR_RXIRQ) /* Receive interrupt */ |
| 792 | r |= mtd_rxirq(sc); |
| 793 | |
| 794 | if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */ |
| 795 | r |= mtd_txirq(sc); |
| 796 | |
| 797 | if (status & MTD_ISR_TXEARLY) /* Transmit early */ |
| 798 | r |= mtd_txirq(sc); |
| 799 | |
| 800 | if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */ |
| 801 | r |= mtd_bufirq(sc); |
| 802 | |
| 803 | } |
| 804 | |
| 805 | /* Enable interrupts */ |
| 806 | MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK); |
| 807 | |
| 808 | return r; |
| 809 | } |
| 810 | |
| 811 | |
| 812 | void |
| 813 | mtd_setmulti(struct mtd_softc *sc) |
| 814 | { |
| 815 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 816 | u_int32_t rxtx_stat; |
| 817 | u_int32_t hash[2] = {0, 0}; |
| 818 | u_int32_t crc; |
| 819 | struct ether_multi *enm; |
| 820 | struct ether_multistep step; |
| 821 | int mcnt = 0; |
| 822 | |
| 823 | /* Get old status */ |
| 824 | rxtx_stat = MTD_READ_4(sc, MTD_RXTXR); |
| 825 | |
| 826 | if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) { |
| 827 | rxtx_stat |= MTD_RX_AMULTI; |
| 828 | MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat); |
| 829 | MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR); |
| 830 | MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR); |
| 831 | return; |
| 832 | } |
| 833 | |
| 834 | ETHER_FIRST_MULTI(step, &sc->ethercom, enm); |
| 835 | while (enm != NULL) { |
| 836 | /* We need the 6 most significant bits of the CRC */ |
| 837 | crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; |
| 838 | |
| 839 | hash[crc >> 5] |= 1 << (crc & 0xf); |
| 840 | |
| 841 | ++mcnt; |
| 842 | ETHER_NEXT_MULTI(step, enm); |
| 843 | } |
| 844 | |
| 845 | /* Accept multicast bit needs to be on? */ |
| 846 | if (mcnt) |
| 847 | rxtx_stat |= MTD_RX_AMULTI; |
| 848 | else |
| 849 | rxtx_stat &= ~MTD_RX_AMULTI; |
| 850 | |
| 851 | /* Write out the hash */ |
| 852 | MTD_WRITE_4(sc, MTD_MAR0, hash[0]); |
| 853 | MTD_WRITE_4(sc, MTD_MAR1, hash[1]); |
| 854 | MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat); |
| 855 | } |
| 856 | |
| 857 | |
| 858 | void |
| 859 | mtd_reset(struct mtd_softc *sc) |
| 860 | { |
| 861 | int i; |
| 862 | |
| 863 | MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET); |
| 864 | |
| 865 | /* Reset descriptor status */ |
| 866 | sc->cur_tx = 0; |
| 867 | sc->cur_rx = 0; |
| 868 | |
| 869 | /* Wait until done with reset */ |
| 870 | for (i = 0; i < MTD_TIMEOUT; ++i) { |
| 871 | DELAY(10); |
| 872 | if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET)) |
| 873 | break; |
| 874 | } |
| 875 | |
| 876 | if (i == MTD_TIMEOUT) { |
| 877 | aprint_error_dev(sc->dev, "reset timed out\n" ); |
| 878 | } |
| 879 | |
| 880 | /* Wait a little so chip can stabilize */ |
| 881 | DELAY(1000); |
| 882 | } |
| 883 | |
| 884 | |
| 885 | void |
| 886 | mtd_shutdown (void *arg) |
| 887 | { |
| 888 | struct mtd_softc *sc = arg; |
| 889 | struct ifnet *ifp = &sc->ethercom.ec_if; |
| 890 | |
| 891 | rnd_detach_source(&sc->rnd_src); |
| 892 | mtd_stop(ifp, 1); |
| 893 | } |
| 894 | |