--- //depot/vendor/freebsd/src/sys/pci/if_dc.c 2003/06/08 03:15:18 +++ //depot/user/mux/busdma/pci/if_dc.c 2003/07/03 11:38:26 @@ -110,8 +110,6 @@ #include -#include /* for vtophys */ -#include /* for vtophys */ #include #include #include @@ -213,8 +211,8 @@ static int dc_resume (device_t); static void dc_acpi (device_t); static struct dc_type *dc_devtype (device_t); -static int dc_newbuf (struct dc_softc *, int, struct mbuf *); -static int dc_encap (struct dc_softc *, struct mbuf *, u_int32_t *); +static int dc_newbuf (struct dc_softc *, int); +static int dc_encap (struct dc_softc *, struct mbuf *); static void dc_pnic_rx_bug_war (struct dc_softc *, int); static int dc_rx_resync (struct dc_softc *); static void dc_rxeof (struct dc_softc *); @@ -274,6 +272,11 @@ static void dc_decode_leaf_sym (struct dc_softc *, struct dc_eblock_sym *); static void dc_apply_fixup (struct dc_softc *, int); +static void dc_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t, + int); +static void dc_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t, + int); + #ifdef DC_USEIOSPACE #define DC_RES SYS_RES_IOPORT #define DC_RID DC_PCI_CFBIO @@ -1096,14 +1099,14 @@ DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; - sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; + sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); - sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); + sframe->dc_data = sc->dc_saddr; sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT; - sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; + sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) @@ -1286,14 +1289,14 @@ DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; - sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; + sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); - sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); + sframe->dc_data = sc->dc_saddr; sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT; - sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; + sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) @@ -1808,6 +1811,16 @@ } } +static void +dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + u_int32_t *paddr; + + KASSERT(nseg == 1, ("wrong number of segments, should be 1")); + paddr = arg; + *paddr = segs->ds_addr; +} + /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. @@ -1822,6 +1835,7 @@ struct ifnet *ifp; u_int32_t revision; int unit, error = 0, rid, mac_offset; + int i; u_int8_t *mac; sc = device_get_softc(dev); @@ -2078,16 +2092,93 @@ sc->dc_unit = unit; bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); - sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, - M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); + /* + * Allocate a busdma tag and DMA safe memory for TX/RX descriptors. + */ + error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1, + sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag); + if (error) { + printf("dc%d: failed to allocate busdma tag\n", unit); + error = ENXIO; + goto fail; + } + + error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, + BUS_DMA_NOWAIT, &sc->dc_lmap); + if (error) { + printf("dc%d: failed to allocate DMA safe memory\n", unit); + error = ENXIO; + goto fail; + } + + error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, + sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, + BUS_DMA_NOWAIT); + if (error) { + printf("dc%d: cannot get address of the descriptors\n", unit); + error = ENXIO; + goto fail; + } + bzero(sc->dc_ldata, sizeof(struct dc_list_data)); + + error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, + DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); + if (error) { + printf("dc%d: failed to allocate busdma tag\n", unit); + error = ENXIO; + goto fail; + } + error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, + BUS_DMA_NOWAIT, &sc->dc_smap); + if (error) { + printf("dc%d: failed to allocate DMA safe memory\n", unit); + error = ENXIO; + goto fail; + } + error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, + DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); + if (error) { + printf("dc%d: cannot get address of the descriptors\n", unit); + error = ENXIO; + goto fail; + } - if (sc->dc_ldata == NULL) { - printf("dc%d: no memory for list buffers!\n", unit); + error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * DC_TX_LIST_CNT, + DC_TX_LIST_CNT, MCLBYTES, 0, NULL, NULL, &sc->dc_mtag); + if (error) { + printf("dc%d: failed to allocate busdma tag\n", unit); error = ENXIO; goto fail; } - bzero(sc->dc_ldata, sizeof(struct dc_list_data)); + /* Create the TX/RX busdma maps. */ + for (i = 0; i < DC_TX_LIST_CNT; i++) { + error = bus_dmamap_create(sc->dc_mtag, 0, + &sc->dc_cdata.dc_tx_map[i]); + if (error) { + printf("dc%d: failed to init TX ring\n", unit); + error = ENXIO; + goto fail; + } + } + for (i = 0; i < DC_RX_LIST_CNT; i++) { + error = bus_dmamap_create(sc->dc_mtag, 0, + &sc->dc_cdata.dc_rx_map[i]); + if (error) { + printf("dc%d: failed to init RX ring\n", unit); + error = ENXIO; + goto fail; + } + } + error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); + if (error) { + printf("dc%d: failed to init RX ring\n", unit); + error = ENXIO; + goto fail; + } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; @@ -2232,6 +2323,7 @@ struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; + int i; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); @@ -2255,8 +2347,22 @@ if (sc->dc_res) bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); - if (sc->dc_ldata) - contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); + if (sc->dc_cdata.dc_sbuf != NULL) + bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); + if (sc->dc_ldata != NULL) + bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); + for (i = 0; i < DC_TX_LIST_CNT; i++) + bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]); + for (i = 0; i < DC_RX_LIST_CNT; i++) + bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); + bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); + if (sc->dc_stag) + bus_dma_tag_destroy(sc->dc_stag); + if (sc->dc_mtag) + bus_dma_tag_destroy(sc->dc_mtag); + if (sc->dc_ltag) + bus_dma_tag_destroy(sc->dc_ltag); + free(sc->dc_pnic_rx_buf, M_DEVBUF); while (sc->dc_mi != NULL) { @@ -2285,15 +2391,19 @@ cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_TX_LIST_CNT; i++) { - nexti = (i == (DC_TX_LIST_CNT - 1)) ? 0 : i+1; - ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[nexti]); + if (i == DC_TX_LIST_CNT - 1) + nexti = 0; + else + nexti = i + 1; + ld->dc_tx_list[i].dc_next = DC_TXDESC(sc, nexti); cd->dc_tx_chain[i] = NULL; ld->dc_tx_list[i].dc_data = 0; ld->dc_tx_list[i].dc_ctl = 0; } cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; - + bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } @@ -2314,45 +2424,60 @@ ld = sc->dc_ldata; for (i = 0; i < DC_RX_LIST_CNT; i++) { - if (dc_newbuf(sc, i, NULL) == ENOBUFS) + if (dc_newbuf(sc, i) != 0) return (ENOBUFS); - nexti = (i == (DC_RX_LIST_CNT - 1)) ? 0 : i+1; - ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[nexti]); + if (i == DC_RX_LIST_CNT - 1) + nexti = 0; + else + nexti = i + 1; + ld->dc_rx_list[i].dc_next = DC_RXDESC(sc, nexti); } cd->dc_rx_prod = 0; + bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + return (0); +} + +static void +dc_dma_map_rxbuf(arg, segs, nseg, mapsize, error) + void *arg; + bus_dma_segment_t *segs; + int nseg; + bus_size_t mapsize; + int error; +{ + struct dc_softc *sc; + struct dc_desc *c; - return (0); + sc = arg; + c = &sc->dc_ldata->dc_rx_list[sc->dc_cdata.dc_rx_cur]; + if (error) { + sc->dc_cdata.dc_rx_err = error; + return; + } + + KASSERT(nseg == 1, ("wrong number of segments, should be 1")); + sc->dc_cdata.dc_rx_err = 0; + c->dc_data = segs->ds_addr; + c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; + c->dc_status = DC_RXSTAT_OWN; } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int -dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m) +dc_newbuf(struct dc_softc *sc, int i) { struct mbuf *m_new = NULL; - struct dc_desc *c; + bus_dmamap_t tmp; + int error; - c = &sc->dc_ldata->dc_rx_list[i]; - - if (m == NULL) { - MGETHDR(m_new, M_DONTWAIT, MT_DATA); - if (m_new == NULL) - return (ENOBUFS); - - MCLGET(m_new, M_DONTWAIT); - if (!(m_new->m_flags & M_EXT)) { - m_freem(m_new); - return (ENOBUFS); - } - m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; - } else { - m_new = m; - m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; - m_new->m_data = m_new->m_ext.ext_buf; - } - + m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m_new == NULL) + return (ENOBUFS); + m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, sizeof(u_int64_t)); /* @@ -2363,11 +2488,27 @@ if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) bzero(mtod(m_new, char *), m_new->m_len); + sc->dc_cdata.dc_rx_cur = i; + error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_sparemap, m_new, + dc_dma_map_rxbuf, sc, 0); + if (error) { + m_freem(m_new); + return (error); + } + if (sc->dc_cdata.dc_rx_err != 0) { + m_freem(m_new); + return (sc->dc_cdata.dc_rx_err); + } + + bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); + tmp = sc->dc_cdata.dc_rx_map[i]; + sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; + sc->dc_sparemap = tmp; sc->dc_cdata.dc_rx_chain[i] = m_new; - c->dc_data = vtophys(mtod(m_new, caddr_t)); - c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; - c->dc_status = DC_RXSTAT_OWN; - + bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], + BUS_DMASYNC_PREREAD); + bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } @@ -2449,7 +2590,7 @@ /* If this is the last buffer, break out. */ if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) break; - dc_newbuf(sc, i, m); + dc_newbuf(sc, i); DC_INC(i, DC_RX_LIST_CNT); } @@ -2474,7 +2615,7 @@ * the status word to make it look like a successful * frame reception. */ - dc_newbuf(sc, i, m); + dc_newbuf(sc, i); bcopy(ptr, mtod(m, char *), total_len); cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; } @@ -2531,6 +2672,7 @@ ifp = &sc->arpcom.ac_if; i = sc->dc_cdata.dc_rx_prod; + bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); while (!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { @@ -2542,6 +2684,8 @@ cur_rx = &sc->dc_ldata->dc_rx_list[i]; rxstat = cur_rx->dc_status; m = sc->dc_cdata.dc_rx_chain[i]; + bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], + BUS_DMASYNC_POSTREAD); total_len = DC_RXBYTES(rxstat); if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { @@ -2558,8 +2702,6 @@ } } - sc->dc_cdata.dc_rx_chain[i] = NULL; - /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: @@ -2575,7 +2717,7 @@ ifp->if_ierrors++; if (rxstat & DC_RXSTAT_COLLSEEN) ifp->if_collisions++; - dc_newbuf(sc, i, m); + dc_newbuf(sc, i); if (rxstat & DC_RXSTAT_CRCERR) { DC_INC(i, DC_RX_LIST_CNT); continue; @@ -2598,7 +2740,7 @@ * if the allocation fails, then use m_devget and leave the * existing buffer in the receive ring. */ - if (dc_quick && dc_newbuf(sc, i, NULL) == 0) { + if (dc_quick && dc_newbuf(sc, i) == 0) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; DC_INC(i, DC_RX_LIST_CNT); @@ -2609,7 +2751,7 @@ m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); - dc_newbuf(sc, i, m); + dc_newbuf(sc, i); DC_INC(i, DC_RX_LIST_CNT); if (m0 == NULL) { ifp->if_ierrors++; @@ -2644,6 +2786,7 @@ * Go through our tx list and free mbufs for those * frames that have been transmitted. */ + bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); idx = sc->dc_cdata.dc_tx_cons; while (idx != sc->dc_cdata.dc_tx_prod) { @@ -2653,7 +2796,7 @@ if (txstat & DC_TXSTAT_OWN) break; - if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || + if (!(cur_tx->dc_ctl & DC_TXCTL_FIRSTFRAG) || cur_tx->dc_ctl & DC_TXCTL_SETUP) { if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { /* @@ -2714,6 +2857,11 @@ ifp->if_opackets++; if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { + bus_dmamap_sync(sc->dc_mtag, + sc->dc_cdata.dc_tx_map[idx], + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->dc_mtag, + sc->dc_cdata.dc_tx_map[idx]); m_freem(sc->dc_cdata.dc_tx_chain[idx]); sc->dc_cdata.dc_tx_chain[idx] = NULL; } @@ -3003,16 +3151,69 @@ DC_UNLOCK(sc); } +static void +dc_dma_map_txbuf(arg, segs, nseg, mapsize, error) + void *arg; + bus_dma_segment_t *segs; + int nseg; + bus_size_t mapsize; + int error; +{ + struct dc_softc *sc; + struct dc_desc *f; + int cur, first, frag, i; + + sc = arg; + if (error) { + sc->dc_cdata.dc_tx_err = error; + return; + } + + first = cur = frag = sc->dc_cdata.dc_tx_prod; + for (i = 0; i < nseg; i++) { + if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && + (frag == (DC_TX_LIST_CNT - 1)) && + (first != sc->dc_cdata.dc_tx_first)) { + bus_dmamap_unload(sc->dc_mtag, + sc->dc_cdata.dc_tx_map[first]); + sc->dc_cdata.dc_tx_err = ENOBUFS; + return; + } + + f = &sc->dc_ldata->dc_tx_list[frag]; + f->dc_ctl = DC_TXCTL_TLINK | segs[i].ds_len; + if (i == 0) { + f->dc_status = 0; + f->dc_ctl |= DC_TXCTL_FIRSTFRAG; + } else + f->dc_status = DC_TXSTAT_OWN; + f->dc_data = segs[i].ds_addr; + cur = frag; + DC_INC(frag, DC_TX_LIST_CNT); + } + + sc->dc_cdata.dc_tx_err = 0; + sc->dc_cdata.dc_tx_prod = frag; + sc->dc_cdata.dc_tx_cnt += nseg; + sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; + if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) + sc->dc_ldata->dc_tx_list[first].dc_ctl |= DC_TXCTL_FINT; + if (sc->dc_flags & DC_TX_INTR_ALWAYS) + sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; + if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) + sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; + sc->dc_ldata->dc_tx_list[first].dc_status = DC_TXSTAT_OWN; +} + /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int -dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx) +dc_encap(struct dc_softc *sc, struct mbuf *m_head) { - struct dc_desc *f = NULL; struct mbuf *m; - int frag, cur, cnt = 0, chainlen = 0; + int error, idx, chainlen = 0; /* * If there's no way we can send any packets, return now. @@ -3026,7 +3227,6 @@ * by all packets, we'll m_defrag long chains so that they * do not use up the entire list, even if they would fit. */ - for (m = m_head; m != NULL; m = m->m_next) chainlen++; @@ -3043,49 +3243,18 @@ * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ - m = m_head; - cur = frag = *txidx; - - for (m = m_head; m != NULL; m = m->m_next) { - if (m->m_len != 0) { - if (sc->dc_flags & DC_TX_ADMTEK_WAR) { - if (*txidx != sc->dc_cdata.dc_tx_prod && - frag == (DC_TX_LIST_CNT - 1)) - return (ENOBUFS); - } - if ((DC_TX_LIST_CNT - - (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) - return (ENOBUFS); - - f = &sc->dc_ldata->dc_tx_list[frag]; - f->dc_ctl = DC_TXCTL_TLINK | m->m_len; - if (cnt == 0) { - f->dc_status = 0; - f->dc_ctl |= DC_TXCTL_FIRSTFRAG; - } else - f->dc_status = DC_TXSTAT_OWN; - f->dc_data = vtophys(mtod(m, vm_offset_t)); - cur = frag; - DC_INC(frag, DC_TX_LIST_CNT); - cnt++; - } - } - - if (m != NULL) - return (ENOBUFS); - - sc->dc_cdata.dc_tx_cnt += cnt; - sc->dc_cdata.dc_tx_chain[cur] = m_head; - sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; - if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) - sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; - if (sc->dc_flags & DC_TX_INTR_ALWAYS) - sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; - if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) - sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; - sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; - *txidx = frag; - + idx = sc->dc_cdata.dc_tx_prod; + error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], + m_head, dc_dma_map_txbuf, sc, 0); + if (error) + return (error); + if (sc->dc_cdata.dc_tx_err != 0) + return (sc->dc_cdata.dc_tx_err); + sc->dc_cdata.dc_tx_chain[idx] = m_head; + bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], + BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } @@ -3117,7 +3286,7 @@ return; } - idx = sc->dc_cdata.dc_tx_prod; + idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); @@ -3137,11 +3306,12 @@ } } - if (dc_encap(sc, m_head, &idx)) { + if (dc_encap(sc, m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } + idx = sc->dc_cdata.dc_tx_prod; /* * If there's a BPF listener, bounce a copy of this frame @@ -3156,7 +3326,6 @@ } /* Transmit */ - sc->dc_cdata.dc_tx_prod = idx; if (!(sc->dc_flags & DC_TX_POLL)) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); @@ -3276,15 +3445,15 @@ } /* - * Init tx descriptors. + * Init TX descriptors. */ dc_list_tx_init(sc); /* * Load the address of the RX list. */ - CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); - CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); + CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); + CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); /* * Enable interrupts. @@ -3488,8 +3657,10 @@ static void dc_stop(struct dc_softc *sc) { + struct ifnet *ifp; + struct dc_list_data *ld; + struct dc_chain_data *cd; int i; - struct ifnet *ifp; DC_LOCK(sc); @@ -3509,33 +3680,36 @@ CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); sc->dc_link = 0; + ld = sc->dc_ldata; + cd = &sc->dc_cdata; + /* * Free data in the RX lists. */ for (i = 0; i < DC_RX_LIST_CNT; i++) { - if (sc->dc_cdata.dc_rx_chain[i] != NULL) { - m_freem(sc->dc_cdata.dc_rx_chain[i]); - sc->dc_cdata.dc_rx_chain[i] = NULL; + if (cd->dc_rx_chain[i] != NULL) { + m_freem(cd->dc_rx_chain[i]); + cd->dc_rx_chain[i] = NULL; } } - bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list)); + bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { - if (sc->dc_cdata.dc_tx_chain[i] != NULL) { - if (sc->dc_ldata->dc_tx_list[i].dc_ctl & - DC_TXCTL_SETUP) { - sc->dc_cdata.dc_tx_chain[i] = NULL; + if (cd->dc_tx_chain[i] != NULL) { + if ((ld->dc_tx_list[i].dc_ctl & DC_TXCTL_SETUP) || + !(ld->dc_tx_list[i].dc_ctl & DC_TXCTL_FIRSTFRAG)) { + cd->dc_tx_chain[i] = NULL; continue; } - m_freem(sc->dc_cdata.dc_tx_chain[i]); - sc->dc_cdata.dc_tx_chain[i] = NULL; + bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); + m_freem(cd->dc_tx_chain[i]); + cd->dc_tx_chain[i] = NULL; } } - - bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list)); + bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); DC_UNLOCK(sc); } --- //depot/vendor/freebsd/src/sys/pci/if_dcreg.h 2003/06/08 03:15:18 +++ //depot/user/mux/busdma/pci/if_dcreg.h 2003/07/03 11:38:26 @@ -464,7 +464,13 @@ #define DC_MIN_FRAMELEN 60 #define DC_RXLEN 1536 -#define DC_INC(x, y) (x) = (x + 1) % y +#define DC_INC(x, y) (x) = (x + 1) % y + +/* Macros to easily get the DMA address of a descriptor. */ +#define DC_RXDESC(sc, i) (sc->dc_laddr + \ + (uintptr_t)(sc->dc_ldata->dc_rx_list + i) - (uintptr_t)sc->dc_ldata) +#define DC_TXDESC(sc, i) (sc->dc_laddr + \ + (uintptr_t)(sc->dc_ldata->dc_tx_list + i) - (uintptr_t)sc->dc_ldata) struct dc_list_data { struct dc_desc dc_rx_list[DC_RX_LIST_CNT]; @@ -474,11 +480,17 @@ struct dc_chain_data { struct mbuf *dc_rx_chain[DC_RX_LIST_CNT]; struct mbuf *dc_tx_chain[DC_TX_LIST_CNT]; - u_int32_t dc_sbuf[DC_SFRAME_LEN/sizeof(u_int32_t)]; + bus_dmamap_t dc_rx_map[DC_RX_LIST_CNT]; + bus_dmamap_t dc_tx_map[DC_TX_LIST_CNT]; + u_int32_t *dc_sbuf; u_int8_t dc_pad[DC_MIN_FRAMELEN]; + int dc_tx_err; + int dc_tx_first; int dc_tx_prod; int dc_tx_cons; int dc_tx_cnt; + int dc_rx_err; + int dc_rx_cur; int dc_rx_prod; }; @@ -700,6 +712,14 @@ struct arpcom arpcom; /* interface info */ bus_space_handle_t dc_bhandle; /* bus space handle */ bus_space_tag_t dc_btag; /* bus space tag */ + bus_dma_tag_t dc_ltag; /* tag for descriptor ring */ + bus_dmamap_t dc_lmap; /* map for descriptor ring */ + u_int32_t dc_laddr; /* DMA address of dc_ldata */ + bus_dma_tag_t dc_mtag; /* tag for mbufs */ + bus_dmamap_t dc_sparemap; + bus_dma_tag_t dc_stag; /* tag for the setup frame */ + bus_dmamap_t dc_smap; /* map for the setup frame */ + u_int32_t dc_saddr; /* DMA address of setup frame */ void *dc_intrhand; struct resource *dc_irq; struct resource *dc_res; @@ -730,7 +750,6 @@ int rxcycles; /* ... when polling */ #endif int suspended; /* 0 = normal 1 = suspended */ - u_int32_t saved_maps[5]; /* pci data */ u_int32_t saved_biosaddr; u_int8_t saved_intline; @@ -1178,8 +1197,3 @@ u_int8_t dc_reset_len; /* u_int16_t dc_reset_dat[n]; */ }; - -#ifdef __alpha__ -#undef vtophys -#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)va) -#endif