1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 1997, 1998
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 /*
37 * VIA Rhine fast ethernet PCI NIC driver
38 *
39 * Supports various network adapters based on the VIA Rhine
40 * and Rhine II PCI controllers, including the D-Link DFE530TX.
41 * Datasheets are available at http://www.via.com.tw.
42 *
43 * Written by Bill Paul <wpaul@ctr.columbia.edu>
44 * Electrical Engineering Department
45 * Columbia University, New York City
46 */
47
48 /*
49 * The VIA Rhine controllers are similar in some respects to the
50 * the DEC tulip chips, except less complicated. The controller
51 * uses an MII bus and an external physical layer interface. The
52 * receiver has a one entry perfect filter and a 64-bit hash table
53 * multicast filter. Transmit and receive descriptors are similar
54 * to the tulip.
55 *
56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism:
57 * transmit buffers must be longword aligned. Unfortunately,
58 * FreeBSD doesn't guarantee that mbufs will be filled in starting
59 * at longword boundaries, so we have to do a buffer copy before
60 * transmission.
61 */
62
63 #ifdef HAVE_KERNEL_OPTION_HEADERS
64 #include "opt_device_polling.h"
65 #endif
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/bus.h>
70 #include <sys/endian.h>
71 #include <sys/kernel.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/module.h>
75 #include <sys/rman.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
78 #include <sys/sysctl.h>
79 #include <sys/taskqueue.h>
80
81 #include <net/bpf.h>
82 #include <net/if.h>
83 #include <net/if_var.h>
84 #include <net/ethernet.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87 #include <net/if_types.h>
88 #include <net/if_vlan_var.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95
96 #include <machine/bus.h>
97
98 #include <dev/vr/if_vrreg.h>
99
100 /* "device miibus" required. See GENERIC if you get errors here. */
101 #include "miibus_if.h"
102
103 MODULE_DEPEND(vr, pci, 1, 1, 1);
104 MODULE_DEPEND(vr, ether, 1, 1, 1);
105 MODULE_DEPEND(vr, miibus, 1, 1, 1);
106
107 /* Define to show Rx/Tx error status. */
108 #undef VR_SHOW_ERRORS
109 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
110
111 /*
112 * Various supported device vendors/types, their names & quirks.
113 */
114 #define VR_Q_NEEDALIGN (1<<0)
115 #define VR_Q_CSUM (1<<1)
116 #define VR_Q_CAM (1<<2)
117
118 static const struct vr_type {
119 u_int16_t vr_vid;
120 u_int16_t vr_did;
121 int vr_quirks;
122 const char *vr_name;
123 } vr_devs[] = {
124 { VIA_VENDORID, VIA_DEVICEID_RHINE,
125 VR_Q_NEEDALIGN,
126 "VIA VT3043 Rhine I 10/100BaseTX" },
127 { VIA_VENDORID, VIA_DEVICEID_RHINE_II,
128 VR_Q_NEEDALIGN,
129 "VIA VT86C100A Rhine II 10/100BaseTX" },
130 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2,
131 0,
132 "VIA VT6102 Rhine II 10/100BaseTX" },
133 { VIA_VENDORID, VIA_DEVICEID_RHINE_III,
134 0,
135 "VIA VT6105 Rhine III 10/100BaseTX" },
136 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M,
137 VR_Q_CSUM,
138 "VIA VT6105M Rhine III 10/100BaseTX" },
139 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II,
140 VR_Q_NEEDALIGN,
141 "Delta Electronics Rhine II 10/100BaseTX" },
142 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II,
143 VR_Q_NEEDALIGN,
144 "Addtron Technology Rhine II 10/100BaseTX" },
145 { 0, 0, 0, NULL }
146 };
147
148 static int vr_probe(device_t);
149 static int vr_attach(device_t);
150 static int vr_detach(device_t);
151 static int vr_shutdown(device_t);
152 static int vr_suspend(device_t);
153 static int vr_resume(device_t);
154
155 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
156 static int vr_dma_alloc(struct vr_softc *);
157 static void vr_dma_free(struct vr_softc *);
158 static __inline void vr_discard_rxbuf(struct vr_rxdesc *);
159 static int vr_newbuf(struct vr_softc *, int);
160
161 #ifndef __NO_STRICT_ALIGNMENT
162 static __inline void vr_fixup_rx(struct mbuf *);
163 #endif
164 static int vr_rxeof(struct vr_softc *);
165 static void vr_txeof(struct vr_softc *);
166 static void vr_tick(void *);
167 static int vr_error(struct vr_softc *, uint16_t);
168 static void vr_tx_underrun(struct vr_softc *);
169 static int vr_intr(void *);
170 static void vr_int_task(void *, int);
171 static void vr_start(if_t);
172 static void vr_start_locked(if_t);
173 static int vr_encap(struct vr_softc *, struct mbuf **);
174 static int vr_ioctl(if_t, u_long, caddr_t);
175 static void vr_init(void *);
176 static void vr_init_locked(struct vr_softc *);
177 static void vr_tx_start(struct vr_softc *);
178 static void vr_rx_start(struct vr_softc *);
179 static int vr_tx_stop(struct vr_softc *);
180 static int vr_rx_stop(struct vr_softc *);
181 static void vr_stop(struct vr_softc *);
182 static void vr_watchdog(struct vr_softc *);
183 static int vr_ifmedia_upd(if_t);
184 static void vr_ifmedia_sts(if_t, struct ifmediareq *);
185
186 static int vr_miibus_readreg(device_t, int, int);
187 static int vr_miibus_writereg(device_t, int, int, int);
188 static void vr_miibus_statchg(device_t);
189
190 static void vr_cam_mask(struct vr_softc *, uint32_t, int);
191 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *);
192 static void vr_set_filter(struct vr_softc *);
193 static void vr_reset(const struct vr_softc *);
194 static int vr_tx_ring_init(struct vr_softc *);
195 static int vr_rx_ring_init(struct vr_softc *);
196 static void vr_setwol(struct vr_softc *);
197 static void vr_clrwol(struct vr_softc *);
198 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS);
199
200 static const struct vr_tx_threshold_table {
201 int tx_cfg;
202 int bcr_cfg;
203 int value;
204 } vr_tx_threshold_tables[] = {
205 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 },
206 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 },
207 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 },
208 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 },
209 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 },
210 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 }
211 };
212
213 static device_method_t vr_methods[] = {
214 /* Device interface */
215 DEVMETHOD(device_probe, vr_probe),
216 DEVMETHOD(device_attach, vr_attach),
217 DEVMETHOD(device_detach, vr_detach),
218 DEVMETHOD(device_shutdown, vr_shutdown),
219 DEVMETHOD(device_suspend, vr_suspend),
220 DEVMETHOD(device_resume, vr_resume),
221
222 /* MII interface */
223 DEVMETHOD(miibus_readreg, vr_miibus_readreg),
224 DEVMETHOD(miibus_writereg, vr_miibus_writereg),
225 DEVMETHOD(miibus_statchg, vr_miibus_statchg),
226
227 DEVMETHOD_END
228 };
229
230 static driver_t vr_driver = {
231 "vr",
232 vr_methods,
233 sizeof(struct vr_softc)
234 };
235
236 DRIVER_MODULE(vr, pci, vr_driver, 0, 0);
237 DRIVER_MODULE(miibus, vr, miibus_driver, 0, 0);
238
239 static int
vr_miibus_readreg(device_t dev,int phy,int reg)240 vr_miibus_readreg(device_t dev, int phy, int reg)
241 {
242 struct vr_softc *sc;
243 int i;
244
245 sc = device_get_softc(dev);
246
247 /* Set the register address. */
248 CSR_WRITE_1(sc, VR_MIIADDR, reg);
249 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
250
251 for (i = 0; i < VR_MII_TIMEOUT; i++) {
252 DELAY(1);
253 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
254 break;
255 }
256 if (i == VR_MII_TIMEOUT)
257 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg);
258
259 return (CSR_READ_2(sc, VR_MIIDATA));
260 }
261
262 static int
vr_miibus_writereg(device_t dev,int phy,int reg,int data)263 vr_miibus_writereg(device_t dev, int phy, int reg, int data)
264 {
265 struct vr_softc *sc;
266 int i;
267
268 sc = device_get_softc(dev);
269
270 /* Set the register address and data to write. */
271 CSR_WRITE_1(sc, VR_MIIADDR, reg);
272 CSR_WRITE_2(sc, VR_MIIDATA, data);
273 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
274
275 for (i = 0; i < VR_MII_TIMEOUT; i++) {
276 DELAY(1);
277 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
278 break;
279 }
280 if (i == VR_MII_TIMEOUT)
281 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy,
282 reg);
283
284 return (0);
285 }
286
287 /*
288 * In order to fiddle with the
289 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
290 * first have to put the transmit and/or receive logic in the idle state.
291 */
292 static void
vr_miibus_statchg(device_t dev)293 vr_miibus_statchg(device_t dev)
294 {
295 struct vr_softc *sc;
296 struct mii_data *mii;
297 if_t ifp;
298 int lfdx, mfdx;
299 uint8_t cr0, cr1, fc;
300
301 sc = device_get_softc(dev);
302 mii = device_get_softc(sc->vr_miibus);
303 ifp = sc->vr_ifp;
304 if (mii == NULL || ifp == NULL ||
305 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
306 return;
307
308 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
309 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
310 (IFM_ACTIVE | IFM_AVALID)) {
311 switch (IFM_SUBTYPE(mii->mii_media_active)) {
312 case IFM_10_T:
313 case IFM_100_TX:
314 sc->vr_flags |= VR_F_LINK;
315 break;
316 default:
317 break;
318 }
319 }
320
321 if ((sc->vr_flags & VR_F_LINK) != 0) {
322 cr0 = CSR_READ_1(sc, VR_CR0);
323 cr1 = CSR_READ_1(sc, VR_CR1);
324 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0;
325 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0;
326 if (mfdx != lfdx) {
327 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) {
328 if (vr_tx_stop(sc) != 0 ||
329 vr_rx_stop(sc) != 0) {
330 device_printf(sc->vr_dev,
331 "%s: Tx/Rx shutdown error -- "
332 "resetting\n", __func__);
333 sc->vr_flags |= VR_F_RESTART;
334 VR_UNLOCK(sc);
335 return;
336 }
337 }
338 if (lfdx)
339 cr1 |= VR_CR1_FULLDUPLEX;
340 else
341 cr1 &= ~VR_CR1_FULLDUPLEX;
342 CSR_WRITE_1(sc, VR_CR1, cr1);
343 }
344 fc = 0;
345 /* Configure flow-control. */
346 if (sc->vr_revid >= REV_ID_VT6105_A0) {
347 fc = CSR_READ_1(sc, VR_FLOWCR1);
348 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE);
349 if ((IFM_OPTIONS(mii->mii_media_active) &
350 IFM_ETH_RXPAUSE) != 0)
351 fc |= VR_FLOWCR1_RXPAUSE;
352 if ((IFM_OPTIONS(mii->mii_media_active) &
353 IFM_ETH_TXPAUSE) != 0) {
354 fc |= VR_FLOWCR1_TXPAUSE;
355 sc->vr_flags |= VR_F_TXPAUSE;
356 }
357 CSR_WRITE_1(sc, VR_FLOWCR1, fc);
358 } else if (sc->vr_revid >= REV_ID_VT6102_A) {
359 /* No Tx puase capability available for Rhine II. */
360 fc = CSR_READ_1(sc, VR_MISC_CR0);
361 fc &= ~VR_MISCCR0_RXPAUSE;
362 if ((IFM_OPTIONS(mii->mii_media_active) &
363 IFM_ETH_RXPAUSE) != 0)
364 fc |= VR_MISCCR0_RXPAUSE;
365 CSR_WRITE_1(sc, VR_MISC_CR0, fc);
366 }
367 vr_rx_start(sc);
368 vr_tx_start(sc);
369 } else {
370 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) {
371 device_printf(sc->vr_dev,
372 "%s: Tx/Rx shutdown error -- resetting\n",
373 __func__);
374 sc->vr_flags |= VR_F_RESTART;
375 }
376 }
377 }
378
379 static void
vr_cam_mask(struct vr_softc * sc,uint32_t mask,int type)380 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type)
381 {
382
383 if (type == VR_MCAST_CAM)
384 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
385 else
386 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
387 CSR_WRITE_4(sc, VR_CAMMASK, mask);
388 CSR_WRITE_1(sc, VR_CAMCTL, 0);
389 }
390
391 static int
vr_cam_data(struct vr_softc * sc,int type,int idx,uint8_t * mac)392 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac)
393 {
394 int i;
395
396 if (type == VR_MCAST_CAM) {
397 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL)
398 return (EINVAL);
399 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST);
400 } else
401 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN);
402
403 /* Set CAM entry address. */
404 CSR_WRITE_1(sc, VR_CAMADDR, idx);
405 /* Set CAM entry data. */
406 if (type == VR_MCAST_CAM) {
407 for (i = 0; i < ETHER_ADDR_LEN; i++)
408 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]);
409 } else {
410 CSR_WRITE_1(sc, VR_VCAM0, mac[0]);
411 CSR_WRITE_1(sc, VR_VCAM1, mac[1]);
412 }
413 DELAY(10);
414 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */
415 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE);
416 for (i = 0; i < VR_TIMEOUT; i++) {
417 DELAY(1);
418 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0)
419 break;
420 }
421
422 if (i == VR_TIMEOUT)
423 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n",
424 __func__);
425 CSR_WRITE_1(sc, VR_CAMCTL, 0);
426
427 return (i == VR_TIMEOUT ? ETIMEDOUT : 0);
428 }
429
430 struct vr_hash_maddr_cam_ctx {
431 struct vr_softc *sc;
432 uint32_t mask;
433 int error;
434 };
435
436 static u_int
vr_hash_maddr_cam(void * arg,struct sockaddr_dl * sdl,u_int mcnt)437 vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
438 {
439 struct vr_hash_maddr_cam_ctx *ctx = arg;
440
441 if (ctx->error != 0)
442 return (0);
443 ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl));
444 if (ctx->error != 0) {
445 ctx->mask = 0;
446 return (0);
447 }
448 ctx->mask |= 1 << mcnt;
449
450 return (1);
451 }
452
453 static u_int
vr_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)454 vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
455 {
456 uint32_t *hashes = arg;
457 int h;
458
459 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
460 if (h < 32)
461 hashes[0] |= (1 << h);
462 else
463 hashes[1] |= (1 << (h - 32));
464
465 return (1);
466 }
467
468 /*
469 * Program the 64-bit multicast hash filter.
470 */
471 static void
vr_set_filter(struct vr_softc * sc)472 vr_set_filter(struct vr_softc *sc)
473 {
474 if_t ifp;
475 uint32_t hashes[2] = { 0, 0 };
476 uint8_t rxfilt;
477 int error, mcnt;
478
479 VR_LOCK_ASSERT(sc);
480
481 ifp = sc->vr_ifp;
482 rxfilt = CSR_READ_1(sc, VR_RXCFG);
483 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD |
484 VR_RXCFG_RX_MULTI);
485 if (if_getflags(ifp) & IFF_BROADCAST)
486 rxfilt |= VR_RXCFG_RX_BROAD;
487 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
488 rxfilt |= VR_RXCFG_RX_MULTI;
489 if (if_getflags(ifp) & IFF_PROMISC)
490 rxfilt |= VR_RXCFG_RX_PROMISC;
491 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
492 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
493 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
494 return;
495 }
496
497 /* Now program new ones. */
498 error = 0;
499 if ((sc->vr_quirks & VR_Q_CAM) != 0) {
500 struct vr_hash_maddr_cam_ctx ctx;
501
502 /*
503 * For hardwares that have CAM capability, use
504 * 32 entries multicast perfect filter.
505 */
506 ctx.sc = sc;
507 ctx.mask = 0;
508 ctx.error = 0;
509 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx);
510 vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask);
511 }
512
513 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) {
514 /*
515 * If there are too many multicast addresses or
516 * setting multicast CAM filter failed, use hash
517 * table based filtering.
518 */
519 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes);
520 }
521
522 if (mcnt > 0)
523 rxfilt |= VR_RXCFG_RX_MULTI;
524
525 CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
526 CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
527 CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
528 }
529
530 static void
vr_reset(const struct vr_softc * sc)531 vr_reset(const struct vr_softc *sc)
532 {
533 int i;
534
535 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */
536
537 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET);
538 if (sc->vr_revid < REV_ID_VT6102_A) {
539 /* VT86C100A needs more delay after reset. */
540 DELAY(100);
541 }
542 for (i = 0; i < VR_TIMEOUT; i++) {
543 DELAY(10);
544 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET))
545 break;
546 }
547 if (i == VR_TIMEOUT) {
548 if (sc->vr_revid < REV_ID_VT6102_A)
549 device_printf(sc->vr_dev, "reset never completed!\n");
550 else {
551 /* Use newer force reset command. */
552 device_printf(sc->vr_dev,
553 "Using force reset command.\n");
554 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
555 /*
556 * Wait a little while for the chip to get its brains
557 * in order.
558 */
559 DELAY(2000);
560 }
561 }
562
563 }
564
565 /*
566 * Probe for a VIA Rhine chip. Check the PCI vendor and device
567 * IDs against our list and return a match or NULL
568 */
569 static const struct vr_type *
vr_match(device_t dev)570 vr_match(device_t dev)
571 {
572 const struct vr_type *t = vr_devs;
573
574 for (t = vr_devs; t->vr_name != NULL; t++)
575 if ((pci_get_vendor(dev) == t->vr_vid) &&
576 (pci_get_device(dev) == t->vr_did))
577 return (t);
578 return (NULL);
579 }
580
581 /*
582 * Probe for a VIA Rhine chip. Check the PCI vendor and device
583 * IDs against our list and return a device name if we find a match.
584 */
585 static int
vr_probe(device_t dev)586 vr_probe(device_t dev)
587 {
588 const struct vr_type *t;
589
590 t = vr_match(dev);
591 if (t != NULL) {
592 device_set_desc(dev, t->vr_name);
593 return (BUS_PROBE_DEFAULT);
594 }
595 return (ENXIO);
596 }
597
598 /*
599 * Attach the interface. Allocate softc structures, do ifmedia
600 * setup and ethernet/BPF attach.
601 */
602 static int
vr_attach(device_t dev)603 vr_attach(device_t dev)
604 {
605 struct vr_softc *sc;
606 if_t ifp;
607 const struct vr_type *t;
608 uint8_t eaddr[ETHER_ADDR_LEN];
609 int error, rid;
610 int i, phy, pmc;
611
612 sc = device_get_softc(dev);
613 sc->vr_dev = dev;
614 t = vr_match(dev);
615 KASSERT(t != NULL, ("Lost if_vr device match"));
616 sc->vr_quirks = t->vr_quirks;
617 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks);
618
619 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
620 MTX_DEF);
621 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0);
622 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
623 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
624 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
625 sc, 0, vr_sysctl_stats, "I", "Statistics");
626
627 error = 0;
628
629 /*
630 * Map control/status registers.
631 */
632 pci_enable_busmaster(dev);
633 sc->vr_revid = pci_get_revid(dev);
634 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid);
635
636 sc->vr_res_id = PCIR_BAR(0);
637 sc->vr_res_type = SYS_RES_IOPORT;
638 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type,
639 &sc->vr_res_id, RF_ACTIVE);
640 if (sc->vr_res == NULL) {
641 device_printf(dev, "couldn't map ports\n");
642 error = ENXIO;
643 goto fail;
644 }
645
646 /* Allocate interrupt. */
647 rid = 0;
648 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
649 RF_SHAREABLE | RF_ACTIVE);
650
651 if (sc->vr_irq == NULL) {
652 device_printf(dev, "couldn't map interrupt\n");
653 error = ENXIO;
654 goto fail;
655 }
656
657 /* Allocate ifnet structure. */
658 ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
659 if (ifp == NULL) {
660 device_printf(dev, "couldn't allocate ifnet structure\n");
661 error = ENOSPC;
662 goto fail;
663 }
664 if_setsoftc(ifp, sc);
665 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
666 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
667 if_setioctlfn(ifp, vr_ioctl);
668 if_setstartfn(ifp, vr_start);
669 if_setinitfn(ifp, vr_init);
670 if_setsendqlen(ifp, VR_TX_RING_CNT - 1);
671 if_setsendqready(ifp);
672
673 NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc);
674
675 /* Configure Tx FIFO threshold. */
676 sc->vr_txthresh = VR_TXTHRESH_MIN;
677 if (sc->vr_revid < REV_ID_VT6105_A0) {
678 /*
679 * Use store and forward mode for Rhine I/II.
680 * Otherwise they produce a lot of Tx underruns and
681 * it would take a while to get working FIFO threshold
682 * value.
683 */
684 sc->vr_txthresh = VR_TXTHRESH_MAX;
685 }
686 if ((sc->vr_quirks & VR_Q_CSUM) != 0) {
687 if_sethwassist(ifp, VR_CSUM_FEATURES);
688 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
689 /*
690 * To update checksum field the hardware may need to
691 * store entire frames into FIFO before transmitting.
692 */
693 sc->vr_txthresh = VR_TXTHRESH_MAX;
694 }
695
696 if (sc->vr_revid >= REV_ID_VT6102_A &&
697 pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
698 if_setcapabilitiesbit(ifp, IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC, 0);
699
700 /* Rhine supports oversized VLAN frame. */
701 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
702 if_setcapenable(ifp, if_getcapabilities(ifp));
703 #ifdef DEVICE_POLLING
704 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
705 #endif
706
707 /*
708 * Windows may put the chip in suspend mode when it
709 * shuts down. Be sure to kick it in the head to wake it
710 * up again.
711 */
712 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
713 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
714
715 /*
716 * Get station address. The way the Rhine chips work,
717 * you're not allowed to directly access the EEPROM once
718 * they've been programmed a special way. Consequently,
719 * we need to read the node address from the PAR0 and PAR1
720 * registers.
721 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB,
722 * VR_CFGC and VR_CFGD such that memory mapped IO configured
723 * by driver is reset to default state.
724 */
725 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
726 for (i = VR_TIMEOUT; i > 0; i--) {
727 DELAY(1);
728 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0)
729 break;
730 }
731 if (i == 0)
732 device_printf(dev, "Reloading EEPROM timeout!\n");
733 for (i = 0; i < ETHER_ADDR_LEN; i++)
734 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
735
736 /* Reset the adapter. */
737 vr_reset(sc);
738 /* Ack intr & disable further interrupts. */
739 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
740 CSR_WRITE_2(sc, VR_IMR, 0);
741 if (sc->vr_revid >= REV_ID_VT6102_A)
742 CSR_WRITE_2(sc, VR_MII_IMR, 0);
743
744 if (sc->vr_revid < REV_ID_VT6102_A) {
745 pci_write_config(dev, VR_PCI_MODE2,
746 pci_read_config(dev, VR_PCI_MODE2, 1) |
747 VR_MODE2_MODE10T, 1);
748 } else {
749 /* Report error instead of retrying forever. */
750 pci_write_config(dev, VR_PCI_MODE2,
751 pci_read_config(dev, VR_PCI_MODE2, 1) |
752 VR_MODE2_PCEROPT, 1);
753 /* Detect MII coding error. */
754 pci_write_config(dev, VR_PCI_MODE3,
755 pci_read_config(dev, VR_PCI_MODE3, 1) |
756 VR_MODE3_MIION, 1);
757 if (sc->vr_revid >= REV_ID_VT6105_LOM &&
758 sc->vr_revid < REV_ID_VT6105M_A0)
759 pci_write_config(dev, VR_PCI_MODE2,
760 pci_read_config(dev, VR_PCI_MODE2, 1) |
761 VR_MODE2_MODE10T, 1);
762 /* Enable Memory-Read-Multiple. */
763 if (sc->vr_revid >= REV_ID_VT6107_A1 &&
764 sc->vr_revid < REV_ID_VT6105M_A0)
765 pci_write_config(dev, VR_PCI_MODE2,
766 pci_read_config(dev, VR_PCI_MODE2, 1) |
767 VR_MODE2_MRDPL, 1);
768 }
769 /* Disable MII AUTOPOLL. */
770 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
771
772 if (vr_dma_alloc(sc) != 0) {
773 error = ENXIO;
774 goto fail;
775 }
776
777 /* Do MII setup. */
778 if (sc->vr_revid >= REV_ID_VT6105_A0)
779 phy = 1;
780 else
781 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK;
782 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd,
783 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY,
784 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0);
785 if (error != 0) {
786 device_printf(dev, "attaching PHYs failed\n");
787 goto fail;
788 }
789
790 /* Call MI attach routine. */
791 ether_ifattach(ifp, eaddr);
792 /*
793 * Tell the upper layer(s) we support long frames.
794 * Must appear after the call to ether_ifattach() because
795 * ether_ifattach() sets ifi_hdrlen to the default value.
796 */
797 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
798
799 /* Hook interrupt last to avoid having to lock softc. */
800 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE,
801 vr_intr, NULL, sc, &sc->vr_intrhand);
802
803 if (error) {
804 device_printf(dev, "couldn't set up irq\n");
805 ether_ifdetach(ifp);
806 goto fail;
807 }
808
809 fail:
810 if (error)
811 vr_detach(dev);
812
813 return (error);
814 }
815
816 /*
817 * Shutdown hardware and free up resources. This can be called any
818 * time after the mutex has been initialized. It is called in both
819 * the error case in attach and the normal detach case so it needs
820 * to be careful about only freeing resources that have actually been
821 * allocated.
822 */
823 static int
vr_detach(device_t dev)824 vr_detach(device_t dev)
825 {
826 struct vr_softc *sc = device_get_softc(dev);
827 if_t ifp = sc->vr_ifp;
828
829 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized"));
830
831 #ifdef DEVICE_POLLING
832 if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
833 ether_poll_deregister(ifp);
834 #endif
835
836 /* These should only be active if attach succeeded. */
837 if (device_is_attached(dev)) {
838 VR_LOCK(sc);
839 sc->vr_flags |= VR_F_DETACHED;
840 vr_stop(sc);
841 VR_UNLOCK(sc);
842 callout_drain(&sc->vr_stat_callout);
843 taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
844 ether_ifdetach(ifp);
845 }
846 if (sc->vr_miibus)
847 device_delete_child(dev, sc->vr_miibus);
848 bus_generic_detach(dev);
849
850 if (sc->vr_intrhand)
851 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand);
852 if (sc->vr_irq)
853 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq);
854 if (sc->vr_res)
855 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id,
856 sc->vr_res);
857
858 if (ifp)
859 if_free(ifp);
860
861 vr_dma_free(sc);
862
863 mtx_destroy(&sc->vr_mtx);
864
865 return (0);
866 }
867
868 struct vr_dmamap_arg {
869 bus_addr_t vr_busaddr;
870 };
871
872 static void
vr_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)873 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
874 {
875 struct vr_dmamap_arg *ctx;
876
877 if (error != 0)
878 return;
879 ctx = arg;
880 ctx->vr_busaddr = segs[0].ds_addr;
881 }
882
883 static int
vr_dma_alloc(struct vr_softc * sc)884 vr_dma_alloc(struct vr_softc *sc)
885 {
886 struct vr_dmamap_arg ctx;
887 struct vr_txdesc *txd;
888 struct vr_rxdesc *rxd;
889 bus_size_t tx_alignment;
890 int error, i;
891
892 /* Create parent DMA tag. */
893 error = bus_dma_tag_create(
894 bus_get_dma_tag(sc->vr_dev), /* parent */
895 1, 0, /* alignment, boundary */
896 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
897 BUS_SPACE_MAXADDR, /* highaddr */
898 NULL, NULL, /* filter, filterarg */
899 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
900 0, /* nsegments */
901 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
902 0, /* flags */
903 NULL, NULL, /* lockfunc, lockarg */
904 &sc->vr_cdata.vr_parent_tag);
905 if (error != 0) {
906 device_printf(sc->vr_dev, "failed to create parent DMA tag\n");
907 goto fail;
908 }
909 /* Create tag for Tx ring. */
910 error = bus_dma_tag_create(
911 sc->vr_cdata.vr_parent_tag, /* parent */
912 VR_RING_ALIGN, 0, /* alignment, boundary */
913 BUS_SPACE_MAXADDR, /* lowaddr */
914 BUS_SPACE_MAXADDR, /* highaddr */
915 NULL, NULL, /* filter, filterarg */
916 VR_TX_RING_SIZE, /* maxsize */
917 1, /* nsegments */
918 VR_TX_RING_SIZE, /* maxsegsize */
919 0, /* flags */
920 NULL, NULL, /* lockfunc, lockarg */
921 &sc->vr_cdata.vr_tx_ring_tag);
922 if (error != 0) {
923 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n");
924 goto fail;
925 }
926
927 /* Create tag for Rx ring. */
928 error = bus_dma_tag_create(
929 sc->vr_cdata.vr_parent_tag, /* parent */
930 VR_RING_ALIGN, 0, /* alignment, boundary */
931 BUS_SPACE_MAXADDR, /* lowaddr */
932 BUS_SPACE_MAXADDR, /* highaddr */
933 NULL, NULL, /* filter, filterarg */
934 VR_RX_RING_SIZE, /* maxsize */
935 1, /* nsegments */
936 VR_RX_RING_SIZE, /* maxsegsize */
937 0, /* flags */
938 NULL, NULL, /* lockfunc, lockarg */
939 &sc->vr_cdata.vr_rx_ring_tag);
940 if (error != 0) {
941 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n");
942 goto fail;
943 }
944
945 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0)
946 tx_alignment = sizeof(uint32_t);
947 else
948 tx_alignment = 1;
949 /* Create tag for Tx buffers. */
950 error = bus_dma_tag_create(
951 sc->vr_cdata.vr_parent_tag, /* parent */
952 tx_alignment, 0, /* alignment, boundary */
953 BUS_SPACE_MAXADDR, /* lowaddr */
954 BUS_SPACE_MAXADDR, /* highaddr */
955 NULL, NULL, /* filter, filterarg */
956 MCLBYTES * VR_MAXFRAGS, /* maxsize */
957 VR_MAXFRAGS, /* nsegments */
958 MCLBYTES, /* maxsegsize */
959 0, /* flags */
960 NULL, NULL, /* lockfunc, lockarg */
961 &sc->vr_cdata.vr_tx_tag);
962 if (error != 0) {
963 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n");
964 goto fail;
965 }
966
967 /* Create tag for Rx buffers. */
968 error = bus_dma_tag_create(
969 sc->vr_cdata.vr_parent_tag, /* parent */
970 VR_RX_ALIGN, 0, /* alignment, boundary */
971 BUS_SPACE_MAXADDR, /* lowaddr */
972 BUS_SPACE_MAXADDR, /* highaddr */
973 NULL, NULL, /* filter, filterarg */
974 MCLBYTES, /* maxsize */
975 1, /* nsegments */
976 MCLBYTES, /* maxsegsize */
977 0, /* flags */
978 NULL, NULL, /* lockfunc, lockarg */
979 &sc->vr_cdata.vr_rx_tag);
980 if (error != 0) {
981 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n");
982 goto fail;
983 }
984
985 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
986 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag,
987 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK |
988 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map);
989 if (error != 0) {
990 device_printf(sc->vr_dev,
991 "failed to allocate DMA'able memory for Tx ring\n");
992 goto fail;
993 }
994
995 ctx.vr_busaddr = 0;
996 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag,
997 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring,
998 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
999 if (error != 0 || ctx.vr_busaddr == 0) {
1000 device_printf(sc->vr_dev,
1001 "failed to load DMA'able memory for Tx ring\n");
1002 goto fail;
1003 }
1004 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr;
1005
1006 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1007 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag,
1008 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK |
1009 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map);
1010 if (error != 0) {
1011 device_printf(sc->vr_dev,
1012 "failed to allocate DMA'able memory for Rx ring\n");
1013 goto fail;
1014 }
1015
1016 ctx.vr_busaddr = 0;
1017 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag,
1018 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring,
1019 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0);
1020 if (error != 0 || ctx.vr_busaddr == 0) {
1021 device_printf(sc->vr_dev,
1022 "failed to load DMA'able memory for Rx ring\n");
1023 goto fail;
1024 }
1025 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr;
1026
1027 /* Create DMA maps for Tx buffers. */
1028 for (i = 0; i < VR_TX_RING_CNT; i++) {
1029 txd = &sc->vr_cdata.vr_txdesc[i];
1030 txd->tx_m = NULL;
1031 txd->tx_dmamap = NULL;
1032 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0,
1033 &txd->tx_dmamap);
1034 if (error != 0) {
1035 device_printf(sc->vr_dev,
1036 "failed to create Tx dmamap\n");
1037 goto fail;
1038 }
1039 }
1040 /* Create DMA maps for Rx buffers. */
1041 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1042 &sc->vr_cdata.vr_rx_sparemap)) != 0) {
1043 device_printf(sc->vr_dev,
1044 "failed to create spare Rx dmamap\n");
1045 goto fail;
1046 }
1047 for (i = 0; i < VR_RX_RING_CNT; i++) {
1048 rxd = &sc->vr_cdata.vr_rxdesc[i];
1049 rxd->rx_m = NULL;
1050 rxd->rx_dmamap = NULL;
1051 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0,
1052 &rxd->rx_dmamap);
1053 if (error != 0) {
1054 device_printf(sc->vr_dev,
1055 "failed to create Rx dmamap\n");
1056 goto fail;
1057 }
1058 }
1059
1060 fail:
1061 return (error);
1062 }
1063
1064 static void
vr_dma_free(struct vr_softc * sc)1065 vr_dma_free(struct vr_softc *sc)
1066 {
1067 struct vr_txdesc *txd;
1068 struct vr_rxdesc *rxd;
1069 int i;
1070
1071 /* Tx ring. */
1072 if (sc->vr_cdata.vr_tx_ring_tag) {
1073 if (sc->vr_rdata.vr_tx_ring_paddr)
1074 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag,
1075 sc->vr_cdata.vr_tx_ring_map);
1076 if (sc->vr_rdata.vr_tx_ring)
1077 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag,
1078 sc->vr_rdata.vr_tx_ring,
1079 sc->vr_cdata.vr_tx_ring_map);
1080 sc->vr_rdata.vr_tx_ring = NULL;
1081 sc->vr_rdata.vr_tx_ring_paddr = 0;
1082 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag);
1083 sc->vr_cdata.vr_tx_ring_tag = NULL;
1084 }
1085 /* Rx ring. */
1086 if (sc->vr_cdata.vr_rx_ring_tag) {
1087 if (sc->vr_rdata.vr_rx_ring_paddr)
1088 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag,
1089 sc->vr_cdata.vr_rx_ring_map);
1090 if (sc->vr_rdata.vr_rx_ring)
1091 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag,
1092 sc->vr_rdata.vr_rx_ring,
1093 sc->vr_cdata.vr_rx_ring_map);
1094 sc->vr_rdata.vr_rx_ring = NULL;
1095 sc->vr_rdata.vr_rx_ring_paddr = 0;
1096 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag);
1097 sc->vr_cdata.vr_rx_ring_tag = NULL;
1098 }
1099 /* Tx buffers. */
1100 if (sc->vr_cdata.vr_tx_tag) {
1101 for (i = 0; i < VR_TX_RING_CNT; i++) {
1102 txd = &sc->vr_cdata.vr_txdesc[i];
1103 if (txd->tx_dmamap) {
1104 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag,
1105 txd->tx_dmamap);
1106 txd->tx_dmamap = NULL;
1107 }
1108 }
1109 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag);
1110 sc->vr_cdata.vr_tx_tag = NULL;
1111 }
1112 /* Rx buffers. */
1113 if (sc->vr_cdata.vr_rx_tag) {
1114 for (i = 0; i < VR_RX_RING_CNT; i++) {
1115 rxd = &sc->vr_cdata.vr_rxdesc[i];
1116 if (rxd->rx_dmamap) {
1117 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1118 rxd->rx_dmamap);
1119 rxd->rx_dmamap = NULL;
1120 }
1121 }
1122 if (sc->vr_cdata.vr_rx_sparemap) {
1123 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag,
1124 sc->vr_cdata.vr_rx_sparemap);
1125 sc->vr_cdata.vr_rx_sparemap = 0;
1126 }
1127 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag);
1128 sc->vr_cdata.vr_rx_tag = NULL;
1129 }
1130
1131 if (sc->vr_cdata.vr_parent_tag) {
1132 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag);
1133 sc->vr_cdata.vr_parent_tag = NULL;
1134 }
1135 }
1136
1137 /*
1138 * Initialize the transmit descriptors.
1139 */
1140 static int
vr_tx_ring_init(struct vr_softc * sc)1141 vr_tx_ring_init(struct vr_softc *sc)
1142 {
1143 struct vr_ring_data *rd;
1144 struct vr_txdesc *txd;
1145 bus_addr_t addr;
1146 int i;
1147
1148 sc->vr_cdata.vr_tx_prod = 0;
1149 sc->vr_cdata.vr_tx_cons = 0;
1150 sc->vr_cdata.vr_tx_cnt = 0;
1151 sc->vr_cdata.vr_tx_pkts = 0;
1152
1153 rd = &sc->vr_rdata;
1154 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE);
1155 for (i = 0; i < VR_TX_RING_CNT; i++) {
1156 if (i == VR_TX_RING_CNT - 1)
1157 addr = VR_TX_RING_ADDR(sc, 0);
1158 else
1159 addr = VR_TX_RING_ADDR(sc, i + 1);
1160 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1161 txd = &sc->vr_cdata.vr_txdesc[i];
1162 txd->tx_m = NULL;
1163 }
1164
1165 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1166 sc->vr_cdata.vr_tx_ring_map,
1167 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1168
1169 return (0);
1170 }
1171
1172 /*
1173 * Initialize the RX descriptors and allocate mbufs for them. Note that
1174 * we arrange the descriptors in a closed ring, so that the last descriptor
1175 * points back to the first.
1176 */
1177 static int
vr_rx_ring_init(struct vr_softc * sc)1178 vr_rx_ring_init(struct vr_softc *sc)
1179 {
1180 struct vr_ring_data *rd;
1181 struct vr_rxdesc *rxd;
1182 bus_addr_t addr;
1183 int i;
1184
1185 sc->vr_cdata.vr_rx_cons = 0;
1186
1187 rd = &sc->vr_rdata;
1188 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE);
1189 for (i = 0; i < VR_RX_RING_CNT; i++) {
1190 rxd = &sc->vr_cdata.vr_rxdesc[i];
1191 rxd->rx_m = NULL;
1192 rxd->desc = &rd->vr_rx_ring[i];
1193 if (i == VR_RX_RING_CNT - 1)
1194 addr = VR_RX_RING_ADDR(sc, 0);
1195 else
1196 addr = VR_RX_RING_ADDR(sc, i + 1);
1197 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr));
1198 if (vr_newbuf(sc, i) != 0)
1199 return (ENOBUFS);
1200 }
1201
1202 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1203 sc->vr_cdata.vr_rx_ring_map,
1204 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1205
1206 return (0);
1207 }
1208
1209 static __inline void
vr_discard_rxbuf(struct vr_rxdesc * rxd)1210 vr_discard_rxbuf(struct vr_rxdesc *rxd)
1211 {
1212 struct vr_desc *desc;
1213
1214 desc = rxd->desc;
1215 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t)));
1216 desc->vr_status = htole32(VR_RXSTAT_OWN);
1217 }
1218
1219 /*
1220 * Initialize an RX descriptor and attach an MBUF cluster.
1221 * Note: the length fields are only 11 bits wide, which means the
1222 * largest size we can specify is 2047. This is important because
1223 * MCLBYTES is 2048, so we have to subtract one otherwise we'll
1224 * overflow the field and make a mess.
1225 */
1226 static int
vr_newbuf(struct vr_softc * sc,int idx)1227 vr_newbuf(struct vr_softc *sc, int idx)
1228 {
1229 struct vr_desc *desc;
1230 struct vr_rxdesc *rxd;
1231 struct mbuf *m;
1232 bus_dma_segment_t segs[1];
1233 bus_dmamap_t map;
1234 int nsegs;
1235
1236 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1237 if (m == NULL)
1238 return (ENOBUFS);
1239 m->m_len = m->m_pkthdr.len = MCLBYTES;
1240 m_adj(m, sizeof(uint64_t));
1241
1242 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag,
1243 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1244 m_freem(m);
1245 return (ENOBUFS);
1246 }
1247 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1248
1249 rxd = &sc->vr_cdata.vr_rxdesc[idx];
1250 if (rxd->rx_m != NULL) {
1251 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1252 BUS_DMASYNC_POSTREAD);
1253 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap);
1254 }
1255 map = rxd->rx_dmamap;
1256 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap;
1257 sc->vr_cdata.vr_rx_sparemap = map;
1258 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap,
1259 BUS_DMASYNC_PREREAD);
1260 rxd->rx_m = m;
1261 desc = rxd->desc;
1262 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr));
1263 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len);
1264 desc->vr_status = htole32(VR_RXSTAT_OWN);
1265
1266 return (0);
1267 }
1268
1269 #ifndef __NO_STRICT_ALIGNMENT
1270 static __inline void
vr_fixup_rx(struct mbuf * m)1271 vr_fixup_rx(struct mbuf *m)
1272 {
1273 uint16_t *src, *dst;
1274 unsigned int i;
1275
1276 src = mtod(m, uint16_t *);
1277 dst = src - 1;
1278
1279 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1280 *dst++ = *src++;
1281
1282 m->m_data -= ETHER_ALIGN;
1283 }
1284 #endif
1285
1286 /*
1287 * A frame has been uploaded: pass the resulting mbuf chain up to
1288 * the higher level protocols.
1289 */
1290 static int
vr_rxeof(struct vr_softc * sc)1291 vr_rxeof(struct vr_softc *sc)
1292 {
1293 struct vr_rxdesc *rxd;
1294 struct mbuf *m;
1295 if_t ifp;
1296 struct vr_desc *cur_rx;
1297 int cons, prog, total_len, rx_npkts;
1298 uint32_t rxstat, rxctl;
1299
1300 VR_LOCK_ASSERT(sc);
1301 ifp = sc->vr_ifp;
1302 cons = sc->vr_cdata.vr_rx_cons;
1303 rx_npkts = 0;
1304
1305 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1306 sc->vr_cdata.vr_rx_ring_map,
1307 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1308
1309 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) {
1310 #ifdef DEVICE_POLLING
1311 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1312 if (sc->rxcycles <= 0)
1313 break;
1314 sc->rxcycles--;
1315 }
1316 #endif
1317 cur_rx = &sc->vr_rdata.vr_rx_ring[cons];
1318 rxstat = le32toh(cur_rx->vr_status);
1319 rxctl = le32toh(cur_rx->vr_ctl);
1320 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN)
1321 break;
1322
1323 prog++;
1324 rxd = &sc->vr_cdata.vr_rxdesc[cons];
1325 m = rxd->rx_m;
1326
1327 /*
1328 * If an error occurs, update stats, clear the
1329 * status word and leave the mbuf cluster in place:
1330 * it should simply get re-used next time this descriptor
1331 * comes up in the ring.
1332 * We don't support SG in Rx path yet, so discard
1333 * partial frame.
1334 */
1335 if ((rxstat & VR_RXSTAT_RX_OK) == 0 ||
1336 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) !=
1337 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) {
1338 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1339 sc->vr_stat.rx_errors++;
1340 if (rxstat & VR_RXSTAT_CRCERR)
1341 sc->vr_stat.rx_crc_errors++;
1342 if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1343 sc->vr_stat.rx_alignment++;
1344 if (rxstat & VR_RXSTAT_FIFOOFLOW)
1345 sc->vr_stat.rx_fifo_overflows++;
1346 if (rxstat & VR_RXSTAT_GIANT)
1347 sc->vr_stat.rx_giants++;
1348 if (rxstat & VR_RXSTAT_RUNT)
1349 sc->vr_stat.rx_runts++;
1350 if (rxstat & VR_RXSTAT_BUFFERR)
1351 sc->vr_stat.rx_no_buffers++;
1352 #ifdef VR_SHOW_ERRORS
1353 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1354 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS);
1355 #endif
1356 vr_discard_rxbuf(rxd);
1357 continue;
1358 }
1359
1360 if (vr_newbuf(sc, cons) != 0) {
1361 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1362 sc->vr_stat.rx_errors++;
1363 sc->vr_stat.rx_no_mbufs++;
1364 vr_discard_rxbuf(rxd);
1365 continue;
1366 }
1367
1368 /*
1369 * XXX The VIA Rhine chip includes the CRC with every
1370 * received frame, and there's no way to turn this
1371 * behavior off (at least, I can't find anything in
1372 * the manual that explains how to do it) so we have
1373 * to trim off the CRC manually.
1374 */
1375 total_len = VR_RXBYTES(rxstat);
1376 total_len -= ETHER_CRC_LEN;
1377 m->m_pkthdr.len = m->m_len = total_len;
1378 #ifndef __NO_STRICT_ALIGNMENT
1379 /*
1380 * RX buffers must be 32-bit aligned.
1381 * Ignore the alignment problems on the non-strict alignment
1382 * platform. The performance hit incurred due to unaligned
1383 * accesses is much smaller than the hit produced by forcing
1384 * buffer copies all the time.
1385 */
1386 vr_fixup_rx(m);
1387 #endif
1388 m->m_pkthdr.rcvif = ifp;
1389 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1390 sc->vr_stat.rx_ok++;
1391 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
1392 (rxstat & VR_RXSTAT_FRAG) == 0 &&
1393 (rxctl & VR_RXCTL_IP) != 0) {
1394 /* Checksum is valid for non-fragmented IP packets. */
1395 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1396 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) {
1397 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1398 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) {
1399 m->m_pkthdr.csum_flags |=
1400 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1401 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0)
1402 m->m_pkthdr.csum_data = 0xffff;
1403 }
1404 }
1405 }
1406 VR_UNLOCK(sc);
1407 if_input(ifp, m);
1408 VR_LOCK(sc);
1409 rx_npkts++;
1410 }
1411
1412 if (prog > 0) {
1413 /*
1414 * Let controller know how many number of RX buffers
1415 * are posted but avoid expensive register access if
1416 * TX pause capability was not negotiated with link
1417 * partner.
1418 */
1419 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) {
1420 if (prog >= VR_RX_RING_CNT)
1421 prog = VR_RX_RING_CNT - 1;
1422 CSR_WRITE_1(sc, VR_FLOWCR0, prog);
1423 }
1424 sc->vr_cdata.vr_rx_cons = cons;
1425 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag,
1426 sc->vr_cdata.vr_rx_ring_map,
1427 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1428 }
1429 return (rx_npkts);
1430 }
1431
1432 /*
1433 * A frame was downloaded to the chip. It's safe for us to clean up
1434 * the list buffers.
1435 */
1436 static void
vr_txeof(struct vr_softc * sc)1437 vr_txeof(struct vr_softc *sc)
1438 {
1439 struct vr_txdesc *txd;
1440 struct vr_desc *cur_tx;
1441 if_t ifp;
1442 uint32_t txctl, txstat;
1443 int cons, prod;
1444
1445 VR_LOCK_ASSERT(sc);
1446
1447 cons = sc->vr_cdata.vr_tx_cons;
1448 prod = sc->vr_cdata.vr_tx_prod;
1449 if (cons == prod)
1450 return;
1451
1452 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1453 sc->vr_cdata.vr_tx_ring_map,
1454 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1455
1456 ifp = sc->vr_ifp;
1457 /*
1458 * Go through our tx list and free mbufs for those
1459 * frames that have been transmitted.
1460 */
1461 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) {
1462 cur_tx = &sc->vr_rdata.vr_tx_ring[cons];
1463 txctl = le32toh(cur_tx->vr_ctl);
1464 txstat = le32toh(cur_tx->vr_status);
1465 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN)
1466 break;
1467
1468 sc->vr_cdata.vr_tx_cnt--;
1469 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1470 /* Only the first descriptor in the chain is valid. */
1471 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1472 continue;
1473
1474 txd = &sc->vr_cdata.vr_txdesc[cons];
1475 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n",
1476 __func__));
1477
1478 if ((txstat & VR_TXSTAT_ERRSUM) != 0) {
1479 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1480 sc->vr_stat.tx_errors++;
1481 if ((txstat & VR_TXSTAT_ABRT) != 0) {
1482 /* Give up and restart Tx. */
1483 sc->vr_stat.tx_abort++;
1484 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
1485 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1486 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
1487 txd->tx_dmamap);
1488 m_freem(txd->tx_m);
1489 txd->tx_m = NULL;
1490 VR_INC(cons, VR_TX_RING_CNT);
1491 sc->vr_cdata.vr_tx_cons = cons;
1492 if (vr_tx_stop(sc) != 0) {
1493 device_printf(sc->vr_dev,
1494 "%s: Tx shutdown error -- "
1495 "resetting\n", __func__);
1496 sc->vr_flags |= VR_F_RESTART;
1497 return;
1498 }
1499 vr_tx_start(sc);
1500 break;
1501 }
1502 if ((sc->vr_revid < REV_ID_VT3071_A &&
1503 (txstat & VR_TXSTAT_UNDERRUN)) ||
1504 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) {
1505 sc->vr_stat.tx_underrun++;
1506 /* Retry and restart Tx. */
1507 sc->vr_cdata.vr_tx_cnt++;
1508 sc->vr_cdata.vr_tx_cons = cons;
1509 cur_tx->vr_status = htole32(VR_TXSTAT_OWN);
1510 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1511 sc->vr_cdata.vr_tx_ring_map,
1512 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1513 vr_tx_underrun(sc);
1514 return;
1515 }
1516 if ((txstat & VR_TXSTAT_DEFER) != 0) {
1517 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1518 sc->vr_stat.tx_collisions++;
1519 }
1520 if ((txstat & VR_TXSTAT_LATECOLL) != 0) {
1521 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1522 sc->vr_stat.tx_late_collisions++;
1523 }
1524 } else {
1525 sc->vr_stat.tx_ok++;
1526 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1527 }
1528
1529 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1530 BUS_DMASYNC_POSTWRITE);
1531 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1532 if (sc->vr_revid < REV_ID_VT3071_A) {
1533 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
1534 (txstat & VR_TXSTAT_COLLCNT) >> 3);
1535 sc->vr_stat.tx_collisions +=
1536 (txstat & VR_TXSTAT_COLLCNT) >> 3;
1537 } else {
1538 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f));
1539 sc->vr_stat.tx_collisions += (txstat & 0x0f);
1540 }
1541 m_freem(txd->tx_m);
1542 txd->tx_m = NULL;
1543 }
1544
1545 sc->vr_cdata.vr_tx_cons = cons;
1546 if (sc->vr_cdata.vr_tx_cnt == 0)
1547 sc->vr_watchdog_timer = 0;
1548 }
1549
1550 static void
vr_tick(void * xsc)1551 vr_tick(void *xsc)
1552 {
1553 struct vr_softc *sc;
1554 struct mii_data *mii;
1555
1556 sc = (struct vr_softc *)xsc;
1557
1558 VR_LOCK_ASSERT(sc);
1559
1560 if ((sc->vr_flags & VR_F_RESTART) != 0) {
1561 device_printf(sc->vr_dev, "restarting\n");
1562 sc->vr_stat.num_restart++;
1563 if_setdrvflagbits(sc->vr_ifp, 0, IFF_DRV_RUNNING);
1564 vr_init_locked(sc);
1565 sc->vr_flags &= ~VR_F_RESTART;
1566 }
1567
1568 mii = device_get_softc(sc->vr_miibus);
1569 mii_tick(mii);
1570 if ((sc->vr_flags & VR_F_LINK) == 0)
1571 vr_miibus_statchg(sc->vr_dev);
1572 vr_watchdog(sc);
1573 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
1574 }
1575
1576 #ifdef DEVICE_POLLING
1577 static poll_handler_t vr_poll;
1578 static poll_handler_t vr_poll_locked;
1579
1580 static int
vr_poll(if_t ifp,enum poll_cmd cmd,int count)1581 vr_poll(if_t ifp, enum poll_cmd cmd, int count)
1582 {
1583 struct vr_softc *sc;
1584 int rx_npkts;
1585
1586 sc = if_getsoftc(ifp);
1587 rx_npkts = 0;
1588
1589 VR_LOCK(sc);
1590 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1591 rx_npkts = vr_poll_locked(ifp, cmd, count);
1592 VR_UNLOCK(sc);
1593 return (rx_npkts);
1594 }
1595
1596 static int
vr_poll_locked(if_t ifp,enum poll_cmd cmd,int count)1597 vr_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
1598 {
1599 struct vr_softc *sc;
1600 int rx_npkts;
1601
1602 sc = if_getsoftc(ifp);
1603
1604 VR_LOCK_ASSERT(sc);
1605
1606 sc->rxcycles = count;
1607 rx_npkts = vr_rxeof(sc);
1608 vr_txeof(sc);
1609 if (!if_sendq_empty(ifp))
1610 vr_start_locked(ifp);
1611
1612 if (cmd == POLL_AND_CHECK_STATUS) {
1613 uint16_t status;
1614
1615 /* Also check status register. */
1616 status = CSR_READ_2(sc, VR_ISR);
1617 if (status)
1618 CSR_WRITE_2(sc, VR_ISR, status);
1619
1620 if ((status & VR_INTRS) == 0)
1621 return (rx_npkts);
1622
1623 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1624 VR_ISR_STATSOFLOW)) != 0) {
1625 if (vr_error(sc, status) != 0)
1626 return (rx_npkts);
1627 }
1628 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1629 #ifdef VR_SHOW_ERRORS
1630 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n",
1631 __func__, status, VR_ISR_ERR_BITS);
1632 #endif
1633 vr_rx_start(sc);
1634 }
1635 }
1636 return (rx_npkts);
1637 }
1638 #endif /* DEVICE_POLLING */
1639
1640 /* Back off the transmit threshold. */
1641 static void
vr_tx_underrun(struct vr_softc * sc)1642 vr_tx_underrun(struct vr_softc *sc)
1643 {
1644 int thresh;
1645
1646 device_printf(sc->vr_dev, "Tx underrun -- ");
1647 if (sc->vr_txthresh < VR_TXTHRESH_MAX) {
1648 thresh = sc->vr_txthresh;
1649 sc->vr_txthresh++;
1650 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) {
1651 sc->vr_txthresh = VR_TXTHRESH_MAX;
1652 printf("using store and forward mode\n");
1653 } else
1654 printf("increasing Tx threshold(%d -> %d)\n",
1655 vr_tx_threshold_tables[thresh].value,
1656 vr_tx_threshold_tables[thresh + 1].value);
1657 } else
1658 printf("\n");
1659 sc->vr_stat.tx_underrun++;
1660 if (vr_tx_stop(sc) != 0) {
1661 device_printf(sc->vr_dev, "%s: Tx shutdown error -- "
1662 "resetting\n", __func__);
1663 sc->vr_flags |= VR_F_RESTART;
1664 return;
1665 }
1666 vr_tx_start(sc);
1667 }
1668
1669 static int
vr_intr(void * arg)1670 vr_intr(void *arg)
1671 {
1672 struct vr_softc *sc;
1673 uint16_t status;
1674
1675 sc = (struct vr_softc *)arg;
1676
1677 status = CSR_READ_2(sc, VR_ISR);
1678 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0)
1679 return (FILTER_STRAY);
1680
1681 /* Disable interrupts. */
1682 CSR_WRITE_2(sc, VR_IMR, 0x0000);
1683
1684 taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask);
1685
1686 return (FILTER_HANDLED);
1687 }
1688
1689 static void
vr_int_task(void * arg,int npending)1690 vr_int_task(void *arg, int npending)
1691 {
1692 struct vr_softc *sc;
1693 if_t ifp;
1694 uint16_t status;
1695
1696 sc = (struct vr_softc *)arg;
1697
1698 VR_LOCK(sc);
1699
1700 if ((sc->vr_flags & VR_F_SUSPENDED) != 0)
1701 goto done_locked;
1702
1703 status = CSR_READ_2(sc, VR_ISR);
1704 ifp = sc->vr_ifp;
1705 #ifdef DEVICE_POLLING
1706 if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0)
1707 goto done_locked;
1708 #endif
1709
1710 /* Suppress unwanted interrupts. */
1711 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
1712 (sc->vr_flags & VR_F_RESTART) != 0) {
1713 CSR_WRITE_2(sc, VR_IMR, 0);
1714 CSR_WRITE_2(sc, VR_ISR, status);
1715 goto done_locked;
1716 }
1717
1718 for (; (status & VR_INTRS) != 0;) {
1719 CSR_WRITE_2(sc, VR_ISR, status);
1720 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 |
1721 VR_ISR_STATSOFLOW)) != 0) {
1722 if (vr_error(sc, status) != 0) {
1723 VR_UNLOCK(sc);
1724 return;
1725 }
1726 }
1727 vr_rxeof(sc);
1728 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) {
1729 #ifdef VR_SHOW_ERRORS
1730 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n",
1731 __func__, status, VR_ISR_ERR_BITS);
1732 #endif
1733 /* Restart Rx if RxDMA SM was stopped. */
1734 vr_rx_start(sc);
1735 }
1736 vr_txeof(sc);
1737
1738 if (!if_sendq_empty(ifp))
1739 vr_start_locked(ifp);
1740
1741 status = CSR_READ_2(sc, VR_ISR);
1742 }
1743
1744 /* Re-enable interrupts. */
1745 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1746
1747 done_locked:
1748 VR_UNLOCK(sc);
1749 }
1750
1751 static int
vr_error(struct vr_softc * sc,uint16_t status)1752 vr_error(struct vr_softc *sc, uint16_t status)
1753 {
1754 uint16_t pcis;
1755
1756 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW;
1757 if ((status & VR_ISR_BUSERR) != 0) {
1758 status &= ~VR_ISR_BUSERR;
1759 sc->vr_stat.bus_errors++;
1760 /* Disable further interrupts. */
1761 CSR_WRITE_2(sc, VR_IMR, 0);
1762 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2);
1763 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- "
1764 "resetting\n", pcis);
1765 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2);
1766 sc->vr_flags |= VR_F_RESTART;
1767 return (EAGAIN);
1768 }
1769 if ((status & VR_ISR_LINKSTAT2) != 0) {
1770 /* Link state change, duplex changes etc. */
1771 status &= ~VR_ISR_LINKSTAT2;
1772 }
1773 if ((status & VR_ISR_STATSOFLOW) != 0) {
1774 status &= ~VR_ISR_STATSOFLOW;
1775 if (sc->vr_revid >= REV_ID_VT6105M_A0) {
1776 /* Update MIB counters. */
1777 }
1778 }
1779
1780 if (status != 0)
1781 device_printf(sc->vr_dev,
1782 "unhandled interrupt, status = 0x%04x\n", status);
1783 return (0);
1784 }
1785
1786 /*
1787 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1788 * pointers to the fragment pointers.
1789 */
1790 static int
vr_encap(struct vr_softc * sc,struct mbuf ** m_head)1791 vr_encap(struct vr_softc *sc, struct mbuf **m_head)
1792 {
1793 struct vr_txdesc *txd;
1794 struct vr_desc *desc;
1795 struct mbuf *m;
1796 bus_dma_segment_t txsegs[VR_MAXFRAGS];
1797 uint32_t csum_flags, txctl;
1798 int error, i, nsegs, prod, si;
1799 int padlen;
1800
1801 VR_LOCK_ASSERT(sc);
1802
1803 M_ASSERTPKTHDR((*m_head));
1804
1805 /*
1806 * Some VIA Rhine wants packet buffers to be longword
1807 * aligned, but very often our mbufs aren't. Rather than
1808 * waste time trying to decide when to copy and when not
1809 * to copy, just do it all the time.
1810 */
1811 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) {
1812 m = m_defrag(*m_head, M_NOWAIT);
1813 if (m == NULL) {
1814 m_freem(*m_head);
1815 *m_head = NULL;
1816 return (ENOBUFS);
1817 }
1818 *m_head = m;
1819 }
1820
1821 /*
1822 * The Rhine chip doesn't auto-pad, so we have to make
1823 * sure to pad short frames out to the minimum frame length
1824 * ourselves.
1825 */
1826 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) {
1827 m = *m_head;
1828 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len;
1829 if (M_WRITABLE(m) == 0) {
1830 /* Get a writable copy. */
1831 m = m_dup(*m_head, M_NOWAIT);
1832 m_freem(*m_head);
1833 if (m == NULL) {
1834 *m_head = NULL;
1835 return (ENOBUFS);
1836 }
1837 *m_head = m;
1838 }
1839 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
1840 m = m_defrag(m, M_NOWAIT);
1841 if (m == NULL) {
1842 m_freem(*m_head);
1843 *m_head = NULL;
1844 return (ENOBUFS);
1845 }
1846 }
1847 /*
1848 * Manually pad short frames, and zero the pad space
1849 * to avoid leaking data.
1850 */
1851 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1852 m->m_pkthdr.len += padlen;
1853 m->m_len = m->m_pkthdr.len;
1854 *m_head = m;
1855 }
1856
1857 prod = sc->vr_cdata.vr_tx_prod;
1858 txd = &sc->vr_cdata.vr_txdesc[prod];
1859 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1860 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1861 if (error == EFBIG) {
1862 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS);
1863 if (m == NULL) {
1864 m_freem(*m_head);
1865 *m_head = NULL;
1866 return (ENOBUFS);
1867 }
1868 *m_head = m;
1869 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag,
1870 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1871 if (error != 0) {
1872 m_freem(*m_head);
1873 *m_head = NULL;
1874 return (error);
1875 }
1876 } else if (error != 0)
1877 return (error);
1878 if (nsegs == 0) {
1879 m_freem(*m_head);
1880 *m_head = NULL;
1881 return (EIO);
1882 }
1883
1884 /* Check number of available descriptors. */
1885 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) {
1886 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap);
1887 return (ENOBUFS);
1888 }
1889
1890 txd->tx_m = *m_head;
1891 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap,
1892 BUS_DMASYNC_PREWRITE);
1893
1894 /* Set checksum offload. */
1895 csum_flags = 0;
1896 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) {
1897 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
1898 csum_flags |= VR_TXCTL_IPCSUM;
1899 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
1900 csum_flags |= VR_TXCTL_TCPCSUM;
1901 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
1902 csum_flags |= VR_TXCTL_UDPCSUM;
1903 }
1904
1905 /*
1906 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit
1907 * is required for all descriptors regardless of single or
1908 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for
1909 * the first descriptor for a multi-fragmented frames. Without
1910 * that VIA Rhine chip generates Tx underrun interrupts and can't
1911 * send any frames.
1912 */
1913 si = prod;
1914 for (i = 0; i < nsegs; i++) {
1915 desc = &sc->vr_rdata.vr_tx_ring[prod];
1916 desc->vr_status = 0;
1917 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags;
1918 if (i == 0)
1919 txctl |= VR_TXCTL_FIRSTFRAG;
1920 desc->vr_ctl = htole32(txctl);
1921 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr));
1922 sc->vr_cdata.vr_tx_cnt++;
1923 VR_INC(prod, VR_TX_RING_CNT);
1924 }
1925 /* Update producer index. */
1926 sc->vr_cdata.vr_tx_prod = prod;
1927
1928 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT;
1929 desc = &sc->vr_rdata.vr_tx_ring[prod];
1930
1931 /*
1932 * Set EOP on the last descriptor and request Tx completion
1933 * interrupt for every VR_TX_INTR_THRESH-th frames.
1934 */
1935 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH);
1936 if (sc->vr_cdata.vr_tx_pkts == 0)
1937 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT);
1938 else
1939 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1940
1941 /* Lastly turn the first descriptor ownership to hardware. */
1942 desc = &sc->vr_rdata.vr_tx_ring[si];
1943 desc->vr_status |= htole32(VR_TXSTAT_OWN);
1944
1945 /* Sync descriptors. */
1946 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag,
1947 sc->vr_cdata.vr_tx_ring_map,
1948 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1949
1950 return (0);
1951 }
1952
1953 static void
vr_start(if_t ifp)1954 vr_start(if_t ifp)
1955 {
1956 struct vr_softc *sc;
1957
1958 sc = if_getsoftc(ifp);
1959 VR_LOCK(sc);
1960 vr_start_locked(ifp);
1961 VR_UNLOCK(sc);
1962 }
1963
1964 static void
vr_start_locked(if_t ifp)1965 vr_start_locked(if_t ifp)
1966 {
1967 struct vr_softc *sc;
1968 struct mbuf *m_head;
1969 int enq;
1970
1971 sc = if_getsoftc(ifp);
1972
1973 VR_LOCK_ASSERT(sc);
1974
1975 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1976 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0)
1977 return;
1978
1979 for (enq = 0; !if_sendq_empty(ifp) &&
1980 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) {
1981 m_head = if_dequeue(ifp);
1982 if (m_head == NULL)
1983 break;
1984 /*
1985 * Pack the data into the transmit ring. If we
1986 * don't have room, set the OACTIVE flag and wait
1987 * for the NIC to drain the ring.
1988 */
1989 if (vr_encap(sc, &m_head)) {
1990 if (m_head == NULL)
1991 break;
1992 if_sendq_prepend(ifp, m_head);
1993 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1994 break;
1995 }
1996
1997 enq++;
1998 /*
1999 * If there's a BPF listener, bounce a copy of this frame
2000 * to him.
2001 */
2002 ETHER_BPF_MTAP(ifp, m_head);
2003 }
2004
2005 if (enq > 0) {
2006 /* Tell the chip to start transmitting. */
2007 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2008 /* Set a timeout in case the chip goes out to lunch. */
2009 sc->vr_watchdog_timer = 5;
2010 }
2011 }
2012
2013 static void
vr_init(void * xsc)2014 vr_init(void *xsc)
2015 {
2016 struct vr_softc *sc;
2017
2018 sc = (struct vr_softc *)xsc;
2019 VR_LOCK(sc);
2020 vr_init_locked(sc);
2021 VR_UNLOCK(sc);
2022 }
2023
2024 static void
vr_init_locked(struct vr_softc * sc)2025 vr_init_locked(struct vr_softc *sc)
2026 {
2027 if_t ifp;
2028 struct mii_data *mii;
2029 bus_addr_t addr;
2030 int i;
2031
2032 VR_LOCK_ASSERT(sc);
2033
2034 ifp = sc->vr_ifp;
2035 mii = device_get_softc(sc->vr_miibus);
2036
2037 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2038 return;
2039
2040 /* Cancel pending I/O and free all RX/TX buffers. */
2041 vr_stop(sc);
2042 vr_reset(sc);
2043
2044 /* Set our station address. */
2045 for (i = 0; i < ETHER_ADDR_LEN; i++)
2046 CSR_WRITE_1(sc, VR_PAR0 + i, if_getlladdr(sc->vr_ifp)[i]);
2047
2048 /* Set DMA size. */
2049 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
2050 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
2051
2052 /*
2053 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
2054 * so we must set both.
2055 */
2056 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
2057 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
2058
2059 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
2060 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg);
2061
2062 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
2063 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
2064
2065 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
2066 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg);
2067
2068 /* Init circular RX list. */
2069 if (vr_rx_ring_init(sc) != 0) {
2070 device_printf(sc->vr_dev,
2071 "initialization failed: no memory for rx buffers\n");
2072 vr_stop(sc);
2073 return;
2074 }
2075
2076 /* Init tx descriptors. */
2077 vr_tx_ring_init(sc);
2078
2079 if ((sc->vr_quirks & VR_Q_CAM) != 0) {
2080 uint8_t vcam[2] = { 0, 0 };
2081
2082 /* Disable VLAN hardware tag insertion/stripping. */
2083 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL);
2084 /* Disable VLAN hardware filtering. */
2085 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB);
2086 /* Disable all CAM entries. */
2087 vr_cam_mask(sc, VR_MCAST_CAM, 0);
2088 vr_cam_mask(sc, VR_VLAN_CAM, 0);
2089 /* Enable the first VLAN CAM. */
2090 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam);
2091 vr_cam_mask(sc, VR_VLAN_CAM, 1);
2092 }
2093
2094 /*
2095 * Set up receive filter.
2096 */
2097 vr_set_filter(sc);
2098
2099 /*
2100 * Load the address of the RX ring.
2101 */
2102 addr = VR_RX_RING_ADDR(sc, 0);
2103 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2104 /*
2105 * Load the address of the TX ring.
2106 */
2107 addr = VR_TX_RING_ADDR(sc, 0);
2108 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2109 /* Default : full-duplex, no Tx poll. */
2110 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL);
2111
2112 /* Set flow-control parameters for Rhine III. */
2113 if (sc->vr_revid >= REV_ID_VT6105_A0) {
2114 /*
2115 * Configure Rx buffer count available for incoming
2116 * packet.
2117 * Even though data sheet says almost nothing about
2118 * this register, this register should be updated
2119 * whenever driver adds new RX buffers to controller.
2120 * Otherwise, XON frame is not sent to link partner
2121 * even if controller has enough RX buffers and you
2122 * would be isolated from network.
2123 * The controller is not smart enough to know number
2124 * of available RX buffers so driver have to let
2125 * controller know how many RX buffers are posted.
2126 * In other words, this register works like a residue
2127 * counter for RX buffers and should be initialized
2128 * to the number of total RX buffers - 1 before
2129 * enabling RX MAC. Note, this register is 8bits so
2130 * it effectively limits the maximum number of RX
2131 * buffer to be configured by controller is 255.
2132 */
2133 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1);
2134 /*
2135 * Tx pause low threshold : 8 free receive buffers
2136 * Tx pause XON high threshold : 24 free receive buffers
2137 */
2138 CSR_WRITE_1(sc, VR_FLOWCR1,
2139 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF);
2140 /* Set Tx pause timer. */
2141 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff);
2142 }
2143
2144 /* Enable receiver and transmitter. */
2145 CSR_WRITE_1(sc, VR_CR0,
2146 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO);
2147
2148 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2149 #ifdef DEVICE_POLLING
2150 /*
2151 * Disable interrupts if we are polling.
2152 */
2153 if (if_getcapenable(ifp) & IFCAP_POLLING)
2154 CSR_WRITE_2(sc, VR_IMR, 0);
2155 else
2156 #endif
2157 /*
2158 * Enable interrupts and disable MII intrs.
2159 */
2160 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2161 if (sc->vr_revid > REV_ID_VT6102_A)
2162 CSR_WRITE_2(sc, VR_MII_IMR, 0);
2163
2164 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2165 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2166
2167 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2168 mii_mediachg(mii);
2169
2170 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc);
2171 }
2172
2173 /*
2174 * Set media options.
2175 */
2176 static int
vr_ifmedia_upd(if_t ifp)2177 vr_ifmedia_upd(if_t ifp)
2178 {
2179 struct vr_softc *sc;
2180 struct mii_data *mii;
2181 struct mii_softc *miisc;
2182 int error;
2183
2184 sc = if_getsoftc(ifp);
2185 VR_LOCK(sc);
2186 mii = device_get_softc(sc->vr_miibus);
2187 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2188 PHY_RESET(miisc);
2189 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE);
2190 error = mii_mediachg(mii);
2191 VR_UNLOCK(sc);
2192
2193 return (error);
2194 }
2195
2196 /*
2197 * Report current media status.
2198 */
2199 static void
vr_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)2200 vr_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2201 {
2202 struct vr_softc *sc;
2203 struct mii_data *mii;
2204
2205 sc = if_getsoftc(ifp);
2206 mii = device_get_softc(sc->vr_miibus);
2207 VR_LOCK(sc);
2208 if ((if_getflags(ifp) & IFF_UP) == 0) {
2209 VR_UNLOCK(sc);
2210 return;
2211 }
2212 mii_pollstat(mii);
2213 ifmr->ifm_active = mii->mii_media_active;
2214 ifmr->ifm_status = mii->mii_media_status;
2215 VR_UNLOCK(sc);
2216 }
2217
2218 static int
vr_ioctl(if_t ifp,u_long command,caddr_t data)2219 vr_ioctl(if_t ifp, u_long command, caddr_t data)
2220 {
2221 struct vr_softc *sc;
2222 struct ifreq *ifr;
2223 struct mii_data *mii;
2224 int error, mask;
2225
2226 sc = if_getsoftc(ifp);
2227 ifr = (struct ifreq *)data;
2228 error = 0;
2229
2230 switch (command) {
2231 case SIOCSIFFLAGS:
2232 VR_LOCK(sc);
2233 if (if_getflags(ifp) & IFF_UP) {
2234 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2235 if ((if_getflags(ifp) ^ sc->vr_if_flags) &
2236 (IFF_PROMISC | IFF_ALLMULTI))
2237 vr_set_filter(sc);
2238 } else {
2239 if ((sc->vr_flags & VR_F_DETACHED) == 0)
2240 vr_init_locked(sc);
2241 }
2242 } else {
2243 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2244 vr_stop(sc);
2245 }
2246 sc->vr_if_flags = if_getflags(ifp);
2247 VR_UNLOCK(sc);
2248 break;
2249 case SIOCADDMULTI:
2250 case SIOCDELMULTI:
2251 VR_LOCK(sc);
2252 vr_set_filter(sc);
2253 VR_UNLOCK(sc);
2254 break;
2255 case SIOCGIFMEDIA:
2256 case SIOCSIFMEDIA:
2257 mii = device_get_softc(sc->vr_miibus);
2258 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2259 break;
2260 case SIOCSIFCAP:
2261 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2262 #ifdef DEVICE_POLLING
2263 if (mask & IFCAP_POLLING) {
2264 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2265 error = ether_poll_register(vr_poll, ifp);
2266 if (error != 0)
2267 break;
2268 VR_LOCK(sc);
2269 /* Disable interrupts. */
2270 CSR_WRITE_2(sc, VR_IMR, 0x0000);
2271 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
2272 VR_UNLOCK(sc);
2273 } else {
2274 error = ether_poll_deregister(ifp);
2275 /* Enable interrupts. */
2276 VR_LOCK(sc);
2277 CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
2278 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
2279 VR_UNLOCK(sc);
2280 }
2281 }
2282 #endif /* DEVICE_POLLING */
2283 if ((mask & IFCAP_TXCSUM) != 0 &&
2284 (IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
2285 if_togglecapenable(ifp, IFCAP_TXCSUM);
2286 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
2287 if_sethwassistbits(ifp, VR_CSUM_FEATURES, 0);
2288 else
2289 if_sethwassistbits(ifp, 0, VR_CSUM_FEATURES);
2290 }
2291 if ((mask & IFCAP_RXCSUM) != 0 &&
2292 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0)
2293 if_togglecapenable(ifp, IFCAP_RXCSUM);
2294 if ((mask & IFCAP_WOL_UCAST) != 0 &&
2295 (if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0)
2296 if_togglecapenable(ifp, IFCAP_WOL_UCAST);
2297 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2298 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
2299 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
2300 break;
2301 default:
2302 error = ether_ioctl(ifp, command, data);
2303 break;
2304 }
2305
2306 return (error);
2307 }
2308
2309 static void
vr_watchdog(struct vr_softc * sc)2310 vr_watchdog(struct vr_softc *sc)
2311 {
2312 if_t ifp;
2313
2314 VR_LOCK_ASSERT(sc);
2315
2316 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer)
2317 return;
2318
2319 ifp = sc->vr_ifp;
2320 /*
2321 * Reclaim first as we don't request interrupt for every packets.
2322 */
2323 vr_txeof(sc);
2324 if (sc->vr_cdata.vr_tx_cnt == 0)
2325 return;
2326
2327 if ((sc->vr_flags & VR_F_LINK) == 0) {
2328 if (bootverbose)
2329 if_printf(sc->vr_ifp, "watchdog timeout "
2330 "(missed link)\n");
2331 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2332 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2333 vr_init_locked(sc);
2334 return;
2335 }
2336
2337 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2338 if_printf(ifp, "watchdog timeout\n");
2339
2340 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2341 vr_init_locked(sc);
2342
2343 if (!if_sendq_empty(ifp))
2344 vr_start_locked(ifp);
2345 }
2346
2347 static void
vr_tx_start(struct vr_softc * sc)2348 vr_tx_start(struct vr_softc *sc)
2349 {
2350 bus_addr_t addr;
2351 uint8_t cmd;
2352
2353 cmd = CSR_READ_1(sc, VR_CR0);
2354 if ((cmd & VR_CR0_TX_ON) == 0) {
2355 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons);
2356 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr));
2357 cmd |= VR_CR0_TX_ON;
2358 CSR_WRITE_1(sc, VR_CR0, cmd);
2359 }
2360 if (sc->vr_cdata.vr_tx_cnt != 0) {
2361 sc->vr_watchdog_timer = 5;
2362 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO);
2363 }
2364 }
2365
2366 static void
vr_rx_start(struct vr_softc * sc)2367 vr_rx_start(struct vr_softc *sc)
2368 {
2369 bus_addr_t addr;
2370 uint8_t cmd;
2371
2372 cmd = CSR_READ_1(sc, VR_CR0);
2373 if ((cmd & VR_CR0_RX_ON) == 0) {
2374 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons);
2375 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr));
2376 cmd |= VR_CR0_RX_ON;
2377 CSR_WRITE_1(sc, VR_CR0, cmd);
2378 }
2379 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO);
2380 }
2381
2382 static int
vr_tx_stop(struct vr_softc * sc)2383 vr_tx_stop(struct vr_softc *sc)
2384 {
2385 int i;
2386 uint8_t cmd;
2387
2388 cmd = CSR_READ_1(sc, VR_CR0);
2389 if ((cmd & VR_CR0_TX_ON) != 0) {
2390 cmd &= ~VR_CR0_TX_ON;
2391 CSR_WRITE_1(sc, VR_CR0, cmd);
2392 for (i = VR_TIMEOUT; i > 0; i--) {
2393 DELAY(5);
2394 cmd = CSR_READ_1(sc, VR_CR0);
2395 if ((cmd & VR_CR0_TX_ON) == 0)
2396 break;
2397 }
2398 if (i == 0)
2399 return (ETIMEDOUT);
2400 }
2401 return (0);
2402 }
2403
2404 static int
vr_rx_stop(struct vr_softc * sc)2405 vr_rx_stop(struct vr_softc *sc)
2406 {
2407 int i;
2408 uint8_t cmd;
2409
2410 cmd = CSR_READ_1(sc, VR_CR0);
2411 if ((cmd & VR_CR0_RX_ON) != 0) {
2412 cmd &= ~VR_CR0_RX_ON;
2413 CSR_WRITE_1(sc, VR_CR0, cmd);
2414 for (i = VR_TIMEOUT; i > 0; i--) {
2415 DELAY(5);
2416 cmd = CSR_READ_1(sc, VR_CR0);
2417 if ((cmd & VR_CR0_RX_ON) == 0)
2418 break;
2419 }
2420 if (i == 0)
2421 return (ETIMEDOUT);
2422 }
2423 return (0);
2424 }
2425
2426 /*
2427 * Stop the adapter and free any mbufs allocated to the
2428 * RX and TX lists.
2429 */
2430 static void
vr_stop(struct vr_softc * sc)2431 vr_stop(struct vr_softc *sc)
2432 {
2433 struct vr_txdesc *txd;
2434 struct vr_rxdesc *rxd;
2435 if_t ifp;
2436 int i;
2437
2438 VR_LOCK_ASSERT(sc);
2439
2440 ifp = sc->vr_ifp;
2441 sc->vr_watchdog_timer = 0;
2442
2443 callout_stop(&sc->vr_stat_callout);
2444 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2445
2446 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP);
2447 if (vr_rx_stop(sc) != 0)
2448 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__);
2449 if (vr_tx_stop(sc) != 0)
2450 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__);
2451 /* Clear pending interrupts. */
2452 CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
2453 CSR_WRITE_2(sc, VR_IMR, 0x0000);
2454 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
2455 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
2456
2457 /*
2458 * Free RX and TX mbufs still in the queues.
2459 */
2460 for (i = 0; i < VR_RX_RING_CNT; i++) {
2461 rxd = &sc->vr_cdata.vr_rxdesc[i];
2462 if (rxd->rx_m != NULL) {
2463 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag,
2464 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2465 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag,
2466 rxd->rx_dmamap);
2467 m_freem(rxd->rx_m);
2468 rxd->rx_m = NULL;
2469 }
2470 }
2471 for (i = 0; i < VR_TX_RING_CNT; i++) {
2472 txd = &sc->vr_cdata.vr_txdesc[i];
2473 if (txd->tx_m != NULL) {
2474 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag,
2475 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2476 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag,
2477 txd->tx_dmamap);
2478 m_freem(txd->tx_m);
2479 txd->tx_m = NULL;
2480 }
2481 }
2482 }
2483
2484 /*
2485 * Stop all chip I/O so that the kernel's probe routines don't
2486 * get confused by errant DMAs when rebooting.
2487 */
2488 static int
vr_shutdown(device_t dev)2489 vr_shutdown(device_t dev)
2490 {
2491
2492 return (vr_suspend(dev));
2493 }
2494
2495 static int
vr_suspend(device_t dev)2496 vr_suspend(device_t dev)
2497 {
2498 struct vr_softc *sc;
2499
2500 sc = device_get_softc(dev);
2501
2502 VR_LOCK(sc);
2503 vr_stop(sc);
2504 vr_setwol(sc);
2505 sc->vr_flags |= VR_F_SUSPENDED;
2506 VR_UNLOCK(sc);
2507
2508 return (0);
2509 }
2510
2511 static int
vr_resume(device_t dev)2512 vr_resume(device_t dev)
2513 {
2514 struct vr_softc *sc;
2515 if_t ifp;
2516
2517 sc = device_get_softc(dev);
2518
2519 VR_LOCK(sc);
2520 ifp = sc->vr_ifp;
2521 vr_clrwol(sc);
2522 vr_reset(sc);
2523 if (if_getflags(ifp) & IFF_UP)
2524 vr_init_locked(sc);
2525
2526 sc->vr_flags &= ~VR_F_SUSPENDED;
2527 VR_UNLOCK(sc);
2528
2529 return (0);
2530 }
2531
2532 static void
vr_setwol(struct vr_softc * sc)2533 vr_setwol(struct vr_softc *sc)
2534 {
2535 if_t ifp;
2536 int pmc;
2537 uint16_t pmstat;
2538 uint8_t v;
2539
2540 VR_LOCK_ASSERT(sc);
2541
2542 if (sc->vr_revid < REV_ID_VT6102_A ||
2543 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
2544 return;
2545
2546 ifp = sc->vr_ifp;
2547
2548 /* Clear WOL configuration. */
2549 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2550 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2551 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2552 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2553 if (sc->vr_revid > REV_ID_VT6105_B0) {
2554 /* Newer Rhine III supports two additional patterns. */
2555 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2556 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2557 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2558 }
2559 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
2560 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST);
2561 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
2562 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC);
2563 /*
2564 * It seems that multicast wakeup frames require programming pattern
2565 * registers and valid CRC as well as pattern mask for each pattern.
2566 * While it's possible to setup such a pattern it would complicate
2567 * WOL configuration so ignore multicast wakeup frames.
2568 */
2569 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
2570 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM);
2571 v = CSR_READ_1(sc, VR_STICKHW);
2572 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB);
2573 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN);
2574 }
2575
2576 /* Put hardware into sleep. */
2577 v = CSR_READ_1(sc, VR_STICKHW);
2578 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1;
2579 CSR_WRITE_1(sc, VR_STICKHW, v);
2580
2581 /* Request PME if WOL is requested. */
2582 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
2583 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2584 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2585 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2586 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2587 }
2588
2589 static void
vr_clrwol(struct vr_softc * sc)2590 vr_clrwol(struct vr_softc *sc)
2591 {
2592 uint8_t v;
2593
2594 VR_LOCK_ASSERT(sc);
2595
2596 if (sc->vr_revid < REV_ID_VT6102_A)
2597 return;
2598
2599 /* Take hardware out of sleep. */
2600 v = CSR_READ_1(sc, VR_STICKHW);
2601 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB);
2602 CSR_WRITE_1(sc, VR_STICKHW, v);
2603
2604 /* Clear WOL configuration as WOL may interfere normal operation. */
2605 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF);
2606 CSR_WRITE_1(sc, VR_WOLCFG_CLR,
2607 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR);
2608 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF);
2609 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN);
2610 if (sc->vr_revid > REV_ID_VT6105_B0) {
2611 /* Newer Rhine III supports two additional patterns. */
2612 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE);
2613 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3);
2614 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3);
2615 }
2616 }
2617
2618 static int
vr_sysctl_stats(SYSCTL_HANDLER_ARGS)2619 vr_sysctl_stats(SYSCTL_HANDLER_ARGS)
2620 {
2621 struct vr_softc *sc;
2622 struct vr_statistics *stat;
2623 int error;
2624 int result;
2625
2626 result = -1;
2627 error = sysctl_handle_int(oidp, &result, 0, req);
2628
2629 if (error != 0 || req->newptr == NULL)
2630 return (error);
2631
2632 if (result == 1) {
2633 sc = (struct vr_softc *)arg1;
2634 stat = &sc->vr_stat;
2635
2636 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev));
2637 printf("Outbound good frames : %ju\n",
2638 (uintmax_t)stat->tx_ok);
2639 printf("Inbound good frames : %ju\n",
2640 (uintmax_t)stat->rx_ok);
2641 printf("Outbound errors : %u\n", stat->tx_errors);
2642 printf("Inbound errors : %u\n", stat->rx_errors);
2643 printf("Inbound no buffers : %u\n", stat->rx_no_buffers);
2644 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs);
2645 printf("Inbound FIFO overflows : %d\n",
2646 stat->rx_fifo_overflows);
2647 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors);
2648 printf("Inbound frame alignment errors : %u\n",
2649 stat->rx_alignment);
2650 printf("Inbound giant frames : %u\n", stat->rx_giants);
2651 printf("Inbound runt frames : %u\n", stat->rx_runts);
2652 printf("Outbound aborted with excessive collisions : %u\n",
2653 stat->tx_abort);
2654 printf("Outbound collisions : %u\n", stat->tx_collisions);
2655 printf("Outbound late collisions : %u\n",
2656 stat->tx_late_collisions);
2657 printf("Outbound underrun : %u\n", stat->tx_underrun);
2658 printf("PCI bus errors : %u\n", stat->bus_errors);
2659 printf("driver restarted due to Rx/Tx shutdown failure : %u\n",
2660 stat->num_restart);
2661 }
2662
2663 return (error);
2664 }
2665