netmap-release/sys/net/000755 000765 000024 00000000000 12227602301 015633 5ustar00luigistaff000000 000000 netmap-release/sys/net/netmap.h000644 000765 000024 00000035077 12225031332 017302 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * 3. Neither the name of the authors nor the names of their contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY MATTEO LANDI AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MATTEO LANDI OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * $FreeBSD: head/sys/net/netmap.h 251139 2013-05-30 14:07:14Z luigi $ * * Definitions of constants and the structures used by the netmap * framework, for the part visible to both kernel and userspace. * Detailed info on netmap is available with "man netmap" or at * * http://info.iet.unipi.it/~luigi/netmap/ */ #ifndef _NET_NETMAP_H_ #define _NET_NETMAP_H_ /* * --- Netmap data structures --- * * The data structures used by netmap are shown below. Those in * capital letters are in an mmapp()ed area shared with userspace, * while others are private to the kernel. * Shared structures do not contain pointers but only memory * offsets, so that addressing is portable between kernel and userspace. softc +----------------+ | standard fields| | if_pspare[0] ----------+ +----------------+ | | +----------------+<------+ |(netmap_adapter)| | | netmap_kring | tx_rings *--------------------------------->+---------------+ | | netmap_kring | ring *---------. | rx_rings *--------->+---------------+ | nr_hwcur | | +----------------+ | ring *--------. | nr_hwavail | V | nr_hwcur | | | selinfo | | | nr_hwavail | | +---------------+ . | selinfo | | | ... | . +---------------+ | |(ntx+1 entries)| | .... | | | | |(nrx+1 entries)| | +---------------+ | | | KERNEL +---------------+ | | ==================================================================== | USERSPACE | NETMAP_RING +---->+-------------+ / | cur | NETMAP_IF (nifp, one per file desc.) / | avail | +---------------+ / | buf_ofs | | ni_tx_rings | / +=============+ | ni_rx_rings | / | buf_idx | slot[0] | | / | len, flags | | | / +-------------+ +===============+ / | buf_idx | slot[1] | txring_ofs[0] | (rel.to nifp)--' | len, flags | | txring_ofs[1] | +-------------+ (num_rings+1 entries) (nr_num_slots entries) | txring_ofs[n] | | buf_idx | slot[n-1] +---------------+ | len, flags | | rxring_ofs[0] | +-------------+ | rxring_ofs[1] | (num_rings+1 entries) | txring_ofs[n] | +---------------+ * The private descriptor ('softc' or 'adapter') of each interface * is extended with a "struct netmap_adapter" containing netmap-related * info (see description in dev/netmap/netmap_kernel.h. * Among other things, tx_rings and rx_rings point to the arrays of * "struct netmap_kring" which in turn reach the various * "struct netmap_ring", shared with userspace. * The NETMAP_RING is the userspace-visible replica of the NIC ring. * Each slot has the index of a buffer, its length and some flags. * In user space, the buffer address is computed as * (char *)ring + buf_ofs + index*NETMAP_BUF_SIZE * In the kernel, buffers do not necessarily need to be contiguous, * and the virtual and physical addresses are derived through * a lookup table. * * struct netmap_slot: * * buf_idx is the index of the buffer associated to the slot. * len is the length of the payload * NS_BUF_CHANGED must be set whenever userspace wants * to change buf_idx (it might be necessary to * reprogram the NIC slot) * NS_REPORT must be set if we want the NIC to generate an interrupt * when this slot is used. Leaving it to 0 improves * performance. * NS_FORWARD if set on a receive ring, and the device is in * transparent mode, buffers released with the flag set * will be forwarded to the 'other' side (host stack * or NIC, respectively) on the next select() or ioctl() * * The following will be supported from NETMAP_API = 5 * NS_NO_LEARN on a VALE switch, do not 'learn' the source port for * this packet. * NS_INDIRECT the netmap buffer contains a 64-bit pointer to * the actual userspace buffer. This may be useful * to reduce copies in a VM environment. * NS_MOREFRAG Part of a multi-segment frame. The last (or only) * segment must not have this flag. * NS_PORT_MASK the high 8 bits of the flag, if not zero, indicate the * destination port for the VALE switch, overriding * the lookup table. */ struct netmap_slot { uint32_t buf_idx; /* buffer index */ uint16_t len; /* packet length, to be copied to/from the hw ring */ uint16_t flags; /* buf changed, etc. */ #define NS_BUF_CHANGED 0x0001 /* must resync the map, buffer changed */ #define NS_REPORT 0x0002 /* ask the hardware to report results * e.g. by generating an interrupt */ #define NS_FORWARD 0x0004 /* pass packet to the other endpoint * (host stack or device) */ #define NS_NO_LEARN 0x0008 #define NS_INDIRECT 0x0010 #define NS_MOREFRAG 0x0020 #define NS_PORT_SHIFT 8 #define NS_PORT_MASK (0xff << NS_PORT_SHIFT) /* * on receive rings, the high 8 bits are the number of fragments. */ #define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff) uint64_t ptr; /* pointer for indirect buffers */ }; /* * Netmap representation of a TX or RX ring (also known as "queue"). * This is a queue implemented as a fixed-size circular array. * At the software level, two fields are important: avail and cur. * * In TX rings: * avail indicates the number of slots available for transmission. * It is updated by the kernel after every netmap system call. * It MUST BE decremented by the application when it appends a * packet. * cur indicates the slot to use for the next packet * to send (i.e. the "tail" of the queue). * It MUST BE incremented by the application before * netmap system calls to reflect the number of newly * sent packets. * It is checked by the kernel on netmap system calls * (normally unmodified by the kernel unless invalid). * * The kernel side of netmap uses two additional fields in its own * private ring structure, netmap_kring: * nr_hwcur is a copy of nr_cur on an NIOCTXSYNC. * nr_hwavail is the number of slots known as available by the * hardware. It is updated on an INTR (inc by the * number of packets sent) and on a NIOCTXSYNC * (decrease by nr_cur - nr_hwcur) * A special case, nr_hwavail is -1 if the transmit * side is idle (no pending transmits). * * In RX rings: * avail is the number of packets available (possibly 0). * It MUST BE decremented by the application when it consumes * a packet, and it is updated to nr_hwavail on a NIOCRXSYNC * cur indicates the first slot that contains a packet not * processed yet (the "head" of the queue). * It MUST BE incremented by the software when it consumes * a packet. * reserved indicates the number of buffers before 'cur' * that the application has still in use. Normally 0, * it MUST BE incremented by the application when it * does not return the buffer immediately, and decremented * when the buffer is finally freed. * * The kernel side of netmap uses two additional fields in the kring: * nr_hwcur is a copy of nr_cur on an NIOCRXSYNC * nr_hwavail is the number of packets available. It is updated * on INTR (inc by the number of new packets arrived) * and on NIOCRXSYNC (decreased by nr_cur - nr_hwcur). * * DATA OWNERSHIP/LOCKING: * The netmap_ring is owned by the user program and it is only * accessed or modified in the upper half of the kernel during * a system call. * * The netmap_kring is only modified by the upper half of the kernel. * * FLAGS * NR_TIMESTAMP updates the 'ts' field on each syscall. This is * a global timestamp for all packets. * NR_RX_TSTMP if set, the last 64 byte in each buffer will * contain a timestamp for the frame supplied by * the hardware (if supported) * NR_FORWARD if set, the NS_FORWARD flag in each slot of the * RX ring is checked, and if set the packet is * passed to the other side (host stack or device, * respectively). This permits bpf-like behaviour * or transparency for selected packets. */ struct netmap_ring { /* * nr_buf_base_ofs is meant to be used through macros. * It contains the offset of the buffer region from this * descriptor. */ const ssize_t buf_ofs; const uint32_t num_slots; /* number of slots in the ring. */ uint32_t avail; /* number of usable slots */ uint32_t cur; /* 'current' r/w position */ uint32_t reserved; /* not refilled before current */ const uint16_t nr_buf_size; uint16_t flags; #define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */ #define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */ #define NR_RX_TSTMP 0x0008 /* set rx timestamp in slots */ struct timeval ts; /* time of last *sync() */ /* the slots follow. This struct has variable size */ struct netmap_slot slot[0]; /* array of slots. */ }; /* * Netmap representation of an interface and its queue(s). * There is one netmap_if for each file descriptor on which we want * to select/poll. We assume that on each interface has the same number * of receive and transmit queues. * select/poll operates on one or all pairs depending on the value of * nmr_queueid passed on the ioctl. */ struct netmap_if { char ni_name[IFNAMSIZ]; /* name of the interface. */ const u_int ni_version; /* API version, currently unused */ const u_int ni_rx_rings; /* number of rx rings */ const u_int ni_tx_rings; /* if zero, same as ni_rx_rings */ /* * The following array contains the offset of each netmap ring * from this structure. The first ni_tx_queues+1 entries refer * to the tx rings, the next ni_rx_queues+1 refer to the rx rings * (the last entry in each block refers to the host stack rings). * The area is filled up by the kernel on NIOCREG, * and then only read by userspace code. */ const ssize_t ring_ofs[0]; }; #ifndef NIOCREGIF /* * ioctl names and related fields * * NIOCGINFO takes a struct ifreq, the interface name is the input, * the outputs are number of queues and number of descriptor * for each queue (useful to set number of threads etc.). * * NIOCREGIF takes an interface name within a struct ifreq, * and activates netmap mode on the interface (if possible). * * For vale ports, starting with NETMAP_API = 5, * nr_tx_rings and nr_rx_rings specify how many software rings * are created (0 means 1). * * NIOCREGIF is also used to attach a NIC to a VALE switch. * In this case the name is vale*:ifname, and "nr_cmd" * is set to 'NETMAP_BDG_ATTACH' or 'NETMAP_BDG_DETACH'. * nr_ringid specifies which rings should be attached, 0 means all, * NETMAP_HW_RING + n means only the n-th ring. * The process can terminate after the interface has been attached. * * NIOCUNREGIF unregisters the interface associated to the fd. * this is deprecated and will go away. * * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues, * whose identity is set in NIOCREGIF through nr_ringid * * NETMAP_API is the API version. */ /* * struct nmreq overlays a struct ifreq */ struct nmreq { char nr_name[IFNAMSIZ]; uint32_t nr_version; /* API version */ #define NETMAP_API 4 /* current version */ uint32_t nr_offset; /* nifp offset in the shared region */ uint32_t nr_memsize; /* size of the shared region */ uint32_t nr_tx_slots; /* slots in tx rings */ uint32_t nr_rx_slots; /* slots in rx rings */ uint16_t nr_tx_rings; /* number of tx rings */ uint16_t nr_rx_rings; /* number of rx rings */ uint16_t nr_ringid; /* ring(s) we care about */ #define NETMAP_HW_RING 0x4000 /* low bits indicate one hw ring */ #define NETMAP_SW_RING 0x2000 /* process the sw ring */ #define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */ #define NETMAP_RING_MASK 0xfff /* the ring number */ uint16_t nr_cmd; #define NETMAP_BDG_ATTACH 1 /* attach the NIC */ #define NETMAP_BDG_DETACH 2 /* detach the NIC */ #define NETMAP_BDG_LOOKUP_REG 3 /* register lookup function */ #define NETMAP_BDG_LIST 4 /* get bridge's info */ uint16_t nr_arg1; #define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */ uint16_t nr_arg2; uint32_t spare2[3]; }; /* * FreeBSD uses the size value embedded in the _IOWR to determine * how much to copy in/out. So we need it to match the actual * data structure we pass. We put some spares in the structure * to ease compatibility with other versions */ #define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */ #define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */ #define NIOCUNREGIF _IO('i', 147) /* interface unregister */ #define NIOCTXSYNC _IO('i', 148) /* sync tx queues */ #define NIOCRXSYNC _IO('i', 149) /* sync rx queues */ #endif /* !NIOCREGIF */ #endif /* _NET_NETMAP_H_ */ netmap-release/sys/net/netmap_user.h000644 000765 000024 00000007076 12220335545 020346 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * 3. Neither the name of the authors nor the names of their contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY MATTEO LANDI AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MATTEO LANDI OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * $FreeBSD: head/sys/net/netmap_user.h 241166 2012-10-03 21:41:20Z emaste $ * * This header contains the macros used to manipulate netmap structures * and packets in userspace. See netmap(4) for more information. * * The address of the struct netmap_if, say nifp, is computed from the * value returned from ioctl(.., NIOCREG, ...) and the mmap region: * ioctl(fd, NIOCREG, &req); * mem = mmap(0, ... ); * nifp = NETMAP_IF(mem, req.nr_nifp); * (so simple, we could just do it manually) * * From there: * struct netmap_ring *NETMAP_TXRING(nifp, index) * struct netmap_ring *NETMAP_RXRING(nifp, index) * we can access ring->nr_cur, ring->nr_avail, ring->nr_flags * * ring->slot[i] gives us the i-th slot (we can access * directly plen, flags, bufindex) * * char *buf = NETMAP_BUF(ring, x) returns a pointer to * the buffer numbered x * * Since rings are circular, we have macros to compute the next index * i = NETMAP_RING_NEXT(ring, i); */ #ifndef _NET_NETMAP_USER_H_ #define _NET_NETMAP_USER_H_ #define _NETMAP_OFFSET(type, ptr, offset) \ ((type)(void *)((char *)(ptr) + (offset))) #define NETMAP_IF(b, o) _NETMAP_OFFSET(struct netmap_if *, b, o) #define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ nifp, (nifp)->ring_ofs[index] ) #define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \ nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] ) #define NETMAP_BUF(ring, index) \ ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size)) #define NETMAP_BUF_IDX(ring, buf) \ ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \ (ring)->nr_buf_size ) #define NETMAP_RING_NEXT(r, i) \ ((i)+1 == (r)->num_slots ? 0 : (i) + 1 ) #define NETMAP_RING_FIRST_RESERVED(r) \ ( (r)->cur < (r)->reserved ? \ (r)->cur + (r)->num_slots - (r)->reserved : \ (r)->cur - (r)->reserved ) /* * Return 1 if the given tx ring is empty. */ #define NETMAP_TX_RING_EMPTY(r) ((r)->avail >= (r)->num_slots - 1) #endif /* _NET_NETMAP_USER_H_ */ netmap-release/sys/dev/000755 000765 000024 00000000000 12220335545 015631 5ustar00luigistaff000000 000000 netmap-release/sys/dev/netmap/000755 000765 000024 00000000000 12230554267 017122 5ustar00luigistaff000000 000000 netmap-release/sys/dev/netmap/cxgbe_netmap.h000644 000765 000024 00000026431 12220335545 021730 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * * netmap modifications for cxgbe 20120120 t4_sge seems to be the main file for processing. the device has several queues iq ingress queue (messages posted ?) fl freelist queue buffers are in sd->cl interrupts are serviced by t4_intr*() which does a atomic_cmpset_int() to run only one instance of the driver (service_iq()) and then clears the flag at the end. The dispatches in there makes a list (iql) of postponed work. Handlers are cpl_handler[] per packet type. received packets are t4_eth_rx() the main transmit routine is t4_main.c :: cxgbe_transmit() which ends into t4_sge.c :: t4_eth_tx() and eventually write_txpkt_wr() refill_fl() is called under lock X_RSPD_TYPE_FLBUF is a data packet, perhaps */ #include #include // #include // #include /* vtophys ? */ #include static int cxgbe_netmap_reg(struct ifnet *, int onoff); static int cxgbe_netmap_txsync(void *, u_int, int); static int cxgbe_netmap_rxsync(void *, u_int, int); static void cxgbe_netmap_lock_wrapper(void *, int, u_int); SYSCTL_NODE(_dev, OID_AUTO, cxgbe, CTLFLAG_RW, 0, "cxgbe card"); static void cxgbe_netmap_attach(struct port_info *pi) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = pi->ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = 0; // qsize pi->num_tx_desc; na.num_rx_desc = 0; // XXX qsize pi->num_rx_desc; na.nm_txsync = cxgbe_netmap_txsync; na.nm_rxsync = cxgbe_netmap_rxsync; na.nm_register = cxgbe_netmap_reg; /* * adapter->rx_mbuf_sz is set by SIOCSETMTU, but in netmap mode * we allocate the buffers on the first register. So we must * disallow a SIOCSETMTU when if_capenable & IFCAP_NETMAP is set. */ na.buff_size = NETMAP_BUF_SIZE; netmap_attach(&na, pi->ntxq); } /* * support for netmap register/unregisted. We are already under core lock. * only called on the first init or the last unregister. */ static int cxgbe_netmap_reg(struct ifnet *ifp, int onoff) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (!na) return EINVAL; #if 0 ixgbe_disable_intr(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; /* save if_transmit to restore it later */ na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; ixgbe_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { fail: /* restore if_transmit */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; ixgbe_init_locked(adapter); /* also enables intr */ } #endif return (error); } /* * Reconcile kernel and user view of the transmit ring. * * Userspace has filled tx slots up to cur (excluded). * The last unused slot previously known to the kernel was nr_hwcur, * and the last interrupt reported nr_hwavail slots available * (using the special value -1 to indicate idle transmit ring). * The function must first update avail to what the kernel * knows, subtract the newly used slots (cur - nr_hwcur) * from both avail and nr_hwavail, and set nr_hwcur = cur * issuing a dmamap_sync on all slots. * * Check parameters in the struct netmap_ring. * We don't use avail, only check for bogus values. * Make sure cur is valid, and same goes for buffer indexes and lengths. * To avoid races, read the values once, and never use those from * the ring afterwards. */ static int cxgbe_netmap_txsync(void *a, u_int ring_nr, int flags) { #if 0 struct adapter *adapter = a; struct tx_ring *txr = &adapter->tx_rings[ring_nr]; struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, k, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ int report_frequency = kring->nkr_num_slots >> 1; k = ring->cur; /* ring is not protected by any lock */ if ( (kring->nr_kflags & NR_REINIT) || k > lim) return netmap_ring_reinit(kring); if (do_lock) IXGBE_TX_LOCK(txr); bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ while (j != k) { struct netmap_slot *slot = &ring->slot[j]; struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[j]; union ixgbe_adv_tx_desc *curr = &txr->tx_base[j]; void *addr = NMB(slot); int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? IXGBE_TXD_CMD_RS : 0; int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { if (do_lock) IXGBE_TX_UNLOCK(txr); return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; curr->read.buffer_addr = htole64(vtophys(addr)); curr->read.olinfo_status = 0; curr->read.cmd_type_len = htole32(txr->txd_cmd | len | (IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP | flags) ); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, unload and reload map */ netmap_reload_map(txr->txtag, txbuf->map, addr, na->buff_size); slot->flags &= ~NS_BUF_CHANGED; } bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; n++; } kring->nr_hwcur = k; /* the saved ring->cur */ ring->avail -= n; // XXX see others kring->nr_hwavail = ring->avail; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), k); } if (n == 0 || kring->nr_hwavail < 1) { /* record completed transmissions. TODO * * The datasheet discourages the use of TDH to find out the * number of sent packets; the right way to do so, is to check * the DD bit inside the status of a packet descriptor. On the * other hand, we avoid to set the `report status' bit for * *all* outgoing packets (kind of interrupt mitigation), * consequently the DD bit is not guaranteed to be set for all * the packets: thats way, for the moment we continue to use * TDH. */ j = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(ring_nr)); if (j >= kring->nkr_num_slots) { /* XXX can happen */ D("TDH wrap %d", j); j -= kring->nkr_num_slots; } int delta = j - txr->next_to_clean; if (delta) { /* new transmissions were completed, increment ring->nr_hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = j; kring->nr_hwavail += delta; ring->avail = kring->nr_hwavail; } } if (do_lock) IXGBE_TX_UNLOCK(txr); #endif return 0; } /* * Reconcile kernel and user view of the receive ring. * * Userspace has read rx slots up to cur (excluded). * The last unread slot previously known to the kernel was nr_hwcur, * and the last interrupt reported nr_hwavail slots available. * We must subtract the newly consumed slots (cur - nr_hwcur) * from nr_hwavail, clearing the descriptors for the next * read, tell the hardware that they are available, * and set nr_hwcur = cur and avail = nr_hwavail. * issuing a dmamap_sync on all slots. */ static int cxgbe_netmap_rxsync(void *a, u_int ring_nr, int flags) { #if 0 struct adapter *adapter = a; struct rx_ring *rxr = &adapter->rx_rings[ring_nr]; struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, k, n, lim = kring->nkr_num_slots - 1; k = ring->cur; /* ring is not protected by any lock */ if ( (kring->nr_kflags & NR_REINIT) || k > lim) return netmap_ring_reinit(kring); if (do_lock) IXGBE_RX_LOCK(rxr); /* XXX check sync modes */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); j = rxr->next_to_check; for (n = 0; ; n++) { union ixgbe_adv_rx_desc *curr = &rxr->rx_base[j]; uint32_t staterr = le32toh(curr->wb.upper.status_error); if ((staterr & IXGBE_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->wb.upper.length); ring->slot[j].flags = kring->nkr_slot_flags; bus_dmamap_sync(rxr->ptag, rxr->rx_buffers[j].pmap, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; } if (n) { rxr->next_to_check = j; kring->nr_hwavail += n; if (kring->nr_hwavail >= lim - 10) { ND("rx ring %d almost full %d", ring_nr, kring->nr_hwavail); } } /* skip past packets that userspace has already processed, * making them available for reception. * advance nr_hwcur and issue a bus_dmamap_sync on the * buffers so it is safe to write to them. * Also increase nr_hwavail */ j = kring->nr_hwcur; if (j != k) { /* userspace has read some packets. */ n = 0; while (j != k) { struct netmap_slot *slot = ring->slot + j; union ixgbe_adv_rx_desc *curr = &rxr->rx_base[j]; struct ixgbe_rx_buf *rxbuf = rxr->rx_buffers + j; void *addr = NMB(slot); if (addr == netmap_buffer_base) { /* bad buf */ if (do_lock) IXGBE_RX_UNLOCK(rxr); return netmap_ring_reinit(kring); } curr->wb.upper.status_error = 0; curr->read.pkt_addr = htole64(vtophys(addr)); if (slot->flags & NS_BUF_CHANGED) { netmap_reload_map(rxr->ptag, rxbuf->pmap, addr, na->buff_size); slot->flags &= ~NS_BUF_CHANGED; } bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; n++; } kring->nr_hwavail -= n; kring->nr_hwcur = ring->cur; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* IMPORTANT: we must leave one free slot in the ring, * so move j back by one unit */ j = (j == 0) ? lim : j - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), j); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail ; if (do_lock) IXGBE_RX_UNLOCK(rxr); #endif return 0; } netmap-release/sys/dev/netmap/if_bge_netmap.h000644 000765 000024 00000026216 12220335545 022054 0ustar00luigistaff000000 000000 /*- * (C) 2011 Luigi Rizzo - Universita` di Pisa * * BSD copyright * * $FreeBSD$ * * netmap support for if_bge.c * see ixgbe_netmap.h for details on the structure of the * various functions. */ #include #include #include #include /* vtophys ? */ #include /* * support for netmap register/unregisted. We are already under core lock. * only called on the first register or the last unregister. */ static int bge_netmap_reg(struct ifnet *ifp, int onoff) { struct bge_softc *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (!na) return (EINVAL); /* not attached */ /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); bge_stop(adapter); if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; /* save if_transmit and restore it */ na->if_transmit = ifp->if_transmit; /* XXX if_start and if_qflush ??? */ ifp->if_transmit = netmap_transmit; bge_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { fail: /* restore if_transmit */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; bge_init_locked(adapter); /* also enables intr */ } return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int bge_netmap_txsync(void *a, u_int ring_nr, int flags) { struct bge_softc *sc = a; struct netmap_adapter *na = NA(sc->bge_ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int delta, j, k, l, lim = kring->nkr_num_slots - 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* bge_tx_cons_idx is the equivalent of TDH on intel cards, * i.e. the index of the tx frame most recently completed. */ l = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; /* Sync the TX descriptor list */ bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); /* record completed transmissions */ delta = l - sc->bge_tx_saved_considx; if (delta < 0) /* wrap around */ delta += BGE_TX_RING_CNT; if (delta > 0) { /* some tx completed */ sc->bge_tx_saved_considx = l; sc->bge_txcnt -= delta; kring->nr_hwavail += delta; } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ bus_dmamap_t *txmap = sc->bge_cdata.bge_tx_dmamap; int n = 0; l = sc->bge_tx_prodidx; while (j != k) { struct netmap_slot *slot = &ring->slot[j]; struct bge_tx_bd *d = &sc->bge_ldata.bge_tx_ring[l]; void *addr = NMB(slot); int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { uint64_t paddr = vtophys(addr); d->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); /* buffer has changed, unload and reload map */ netmap_reload_map(sc->bge_cdata.bge_tx_mtag, txmap[l], addr, na->buff_size); slot->flags &= ~NS_BUF_CHANGED; } slot->flags &= ~NS_REPORT; d->bge_len = len; d->bge_flags = BGE_TXBDFLAG_END; bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, txmap[l], BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; n++; } kring->nr_hwcur = k; /* the saved ring->cur */ sc->bge_tx_prodidx = l; ring->avail -= n; kring->nr_hwavail = ring->avail; // XXX see others /* now repeat the last part of bge_start_locked() */ bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Transmit. */ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, l); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, l); sc->bge_timer = 5; } return 0; } /* * Reconcile kernel and user view of the receive ring. * In bge, the rx ring is initialized by setting the ring size * bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); * and the receiver always starts from 0. * sc->bge_rx_saved_considx starts from 0 and is the place from * which the driver reads incoming packets. * sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx is the * next (free) receive buffer where the hardware will put incoming packets. * * sc->bge_rx_saved_considx is maintained in software and represents XXX * * After a successful rxeof we do * sc->bge_rx_saved_considx = rx_cons; * ^---- effectively becomes rx_prod_idx * * bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); * ^--- we have freed some descriptors * * bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + * BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); * ^---- we have freed some buffers */ static int bge_netmap_rxsync(void *a, u_int ring_nr, int flags) { struct bge_softc *sc = a; struct netmap_adapter *na = NA(sc->bge_ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, k, n, lim = kring->nkr_num_slots - 1; uint32_t end; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); l = sc->bge_rx_saved_considx; j = kring->nkr_hwcur + kring->nkr_hwavail; l = j + kring->nkr_hwofs; if (j > lim) j -= lim + 1; /* bge_rx_prod_idx is the same as RDH on intel cards -- the next * (empty) buffer to be used for receptions. * To decide when to stop we rely on bge_rx_prod_idx * and not on the flags in the frame descriptors. */ end = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; if (l != end) { for (n = 0; l != end; n++) { struct bge_rx_bd *cur_rx; uint32_t len; cur_rx = &sc->bge_ldata.bge_rx_return_ring[l]; len = cur_rx->bge_len - ETHER_CRC_LEN; kring->ring->slot[j].len = len; kring->ring->slot[j].flags = kring->nkr_slot_flags; /* sync was in bge_newbuf() */ bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[l], BUS_DMASYNC_POSTREAD); j = j == lim ? 0 : j + 1; l = l == lim ? 0 : l + 1; } sc->bge_rx_saved_considx = end; bge_writembx(sc, BGE_MBX_RX_CONS0_LO, end); sc->bge_ifp->if_ipackets += n; kring->nr_hwavail += n; } /* skip past packets that userspace has already processed, * making them available for reception. * advance nr_hwcur and issue a bus_dmamap_sync on the * buffers so it is safe to write to them. * Also increase nr_hwavail */ j = kring->nr_hwcur; if (j != k) { /* userspace has read some packets. */ n = 0; l = kring->nr_hwcur - kring->nkr_hwofs; if (l < 0) l += lim + 1; while (j != k) { struct netmap_slot *slot = ring->slot + j; struct bge_rx_bd *r = sc->bge_ldata.bge_rx_std_ring + l; void *addr = NMB(slot); uint64_t paddr = vtophys(addr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); if (slot->flags & NS_BUF_CHANGED) { netmap_reload_map(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[l], addr, na->buff_size); slot->flags &= ~NS_BUF_CHANGED; } r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = na->buff_size; r->bge_idx = l; bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[l], BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; n++; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; /* Flush the RX DMA ring */ bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail ; return 0; } static void bge_netmap_tx_init(struct bge_softc *sc) { struct bge_tx_bd *d = sc->bge_ldata.bge_tx_ring; int i; struct netmap_adapter *na = NA(sc->bge_ifp); struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); /* slot is NULL if we are not in netmap mode */ if (!slot) return; /* in netmap mode, overwrite addresses and maps */ for (i = 0; i < BGE_TX_RING_CNT; i++) { /* * the first time, ``slot`` points the first slot of * the ring; the reset might have introduced some kind * of offset between the kernel and userspace view of * the ring; for these reasons, we use l to point * to the slot linked to the i-th descriptor. */ void *addr; uint64_t paddr; struct netmap_kring *kring = &na->tx_rings[0]; int l = i + kring->nkr_hwofs; if (l >= sc->rl_ldata.rl_tx_desc_cnt) l -= sc->rl_ldata.rl_tx_desc_cnt; addr = NMB(slot + l); paddr = vtophys(addr); d[i].bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); d[i].bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); netmap_load_map(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i], addr, na->buff_size); } } static void bge_netmap_rx_init(struct bge_softc *sc) { /* slot is NULL if we are not in netmap mode */ struct netmap_adapter *na = NA(sc->bge_ifp); struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); struct bge_rx_bd *r = sc->bge_ldata.bge_rx_std_ring; int i; if (!slot) return; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { /* * the first time, ``slot`` points the first slot of * the ring; the reset might have introduced some kind * of offset between the kernel and userspace view of * the ring; for these reasons, we use l to point * to the slot linked to the i-th descriptor. */ void *addr; uint64_t paddr; struct netmap_kring *kring = &na->rx_rings[0]; int l = i + kring->nkr_hwofs; if (l >= sc->rl_ldata.rl_rx_desc_cnt) l -= sc->rl_ldata.rl_rx_desc_cnt; addr = NMB(slot + l); paddr = vtophys(addr); r[i].bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr); r[i].bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr); r[i].bge_flags = BGE_RXBDFLAG_END; r[i].bge_len = na->buff_size; r[i].bge_idx = i; /* * userspace knows that hwavail packets were ready before the * reset, so we need to tell the NIC that last hwavail * descriptors of the ring are still owned by the driver. */ D("incomplete driver: don't know how to reserve hwavail slots"); netmap_reload_map(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], addr, na->buff_size); } } static void bge_netmap_attach(struct bge_softc *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->bge_ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = BGE_TX_RING_CNT; na.num_rx_desc = BGE_STD_RX_RING_CNT; na.nm_txsync = bge_netmap_txsync; na.nm_rxsync = bge_netmap_rxsync; na.nm_register = bge_netmap_reg; netmap_attach(&na, 1); } /* end of file */ netmap-release/sys/dev/netmap/if_em_netmap.h000644 000765 000024 00000023745 12220335545 021724 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/if_em_netmap.h 238985 2012-08-02 11:59:43Z luigi $ * * netmap support for em. * * For more details on netmap support please see ixgbe_netmap.h */ #include #include #include #include /* vtophys ? */ #include static void em_netmap_block_tasks(struct adapter *); static void em_netmap_unblock_tasks(struct adapter *); // XXX do we need to block/unblock the tasks ? static void em_netmap_block_tasks(struct adapter *adapter) { if (adapter->msix > 1) { /* MSIX */ int i; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; for (i = 0; i < adapter->num_queues; i++, txr++, rxr++) { taskqueue_block(txr->tq); taskqueue_drain(txr->tq, &txr->tx_task); taskqueue_block(rxr->tq); taskqueue_drain(rxr->tq, &rxr->rx_task); } } else { /* legacy */ taskqueue_block(adapter->tq); taskqueue_drain(adapter->tq, &adapter->link_task); taskqueue_drain(adapter->tq, &adapter->que_task); } } static void em_netmap_unblock_tasks(struct adapter *adapter) { if (adapter->msix > 1) { struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; int i; for (i = 0; i < adapter->num_queues; i++) { taskqueue_unblock(txr->tq); taskqueue_unblock(rxr->tq); } } else { /* legacy */ taskqueue_unblock(adapter->tq); } } /* * Register/unregister routine */ static int em_netmap_reg(struct ifnet *ifp, int onoff) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; /* no netmap support here */ em_disable_intr(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); em_netmap_block_tasks(adapter); if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; em_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { fail: /* return to non-netmap mode */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; em_init_locked(adapter); /* also enable intr */ } em_netmap_unblock_tasks(adapter); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = &adapter->tx_rings[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ u_int report_frequency = kring->nkr_num_slots >> 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ struct e1000_tx_desc *curr = &txr->tx_base[l]; struct em_buffer *txbuf = &txr->tx_buffers[l]; int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? E1000_TXD_CMD_RS : 0; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { curr->buffer_addr = htole64(paddr); /* buffer has changed, reload map */ netmap_reload_map(txr->txtag, txbuf->map, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->upper.data = 0; curr->lower.data = htole32(adapter->txd_cmd | len | (E1000_TXD_CMD_EOP | flags) ); bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l); } if (n == 0 || kring->nr_hwavail < 1) { int delta; /* record completed transmissions using TDH */ l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr)); if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; if (delta) { /* some completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; kring->nr_hwavail += delta; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct rx_ring *rxr = &adapter->rx_rings[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = rxr->next_to_check; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { struct e1000_rx_desc *curr = &rxr->rx_base[l]; uint32_t staterr = le32toh(curr->status); if ((staterr & E1000_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->length); ring->slot[j].flags = slot_flags; bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[l].map, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; /* make sure next_to_refresh follows next_to_check */ rxr->next_to_refresh = l; // XXX l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ rxr->next_to_check = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_rx_desc *curr = &rxr->rx_base[l]; struct em_buffer *rxbuf = &rxr->rx_buffers[l]; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { curr->buffer_addr = htole64(paddr); /* buffer has changed, reload map */ netmap_reload_map(rxr->rxtag, rxbuf->map, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->status = 0; bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } static void em_netmap_attach(struct adapter *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = adapter->num_tx_desc; na.num_rx_desc = adapter->num_rx_desc; na.nm_txsync = em_netmap_txsync; na.nm_rxsync = em_netmap_rxsync; na.nm_register = em_netmap_reg; netmap_attach(&na, adapter->num_queues); } /* end of file */ netmap-release/sys/dev/netmap/if_igb_netmap.h000644 000765 000024 00000023014 12230344623 022047 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Universita` di Pisa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/if_igb_netmap.h 256200 2013-10-09 17:32:52Z jfv $ * * Netmap support for igb, partly contributed by Ahmed Kooli * For details on netmap support please see ixgbe_netmap.h */ #include #include #include #include /* vtophys ? */ #include /* * register-unregister routine */ static int igb_netmap_reg(struct ifnet *ifp, int onoff) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; /* no netmap support here */ igb_disable_intr(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; igb_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { fail: /* restore if_transmit */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; igb_init_locked(adapter); /* also enable intr */ } return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = &adapter->tx_rings[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ u_int report_frequency = kring->nkr_num_slots >> 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); /* check for new packets to send. * j indexes the netmap ring, l indexes the nic ring, and * j = kring->nr_hwcur, l = E1000_TDT (not tracked), * j == (l + kring->nkr_hwofs) % ring_size */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ /* 82575 needs the queue index added */ u32 olinfo_status = (adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0; l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ union e1000_adv_tx_desc *curr = (union e1000_adv_tx_desc *)&txr->tx_base[l]; #ifndef IGB_MEDIA_RESET /* at the same time as IGB_MEDIA_RESET was defined, the * tx buffer descriptor was renamed, so use this to revert * back to the old name. */ #define igb_tx_buf igb_tx_buffer #endif struct igb_tx_buf *txbuf = &txr->tx_buffers[l]; int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? E1000_ADVTXD_DCMD_RS : 0; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(txr->txtag, txbuf->map, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->read.buffer_addr = htole64(paddr); // XXX check olinfo and cmd_type_len curr->read.olinfo_status = htole32(olinfo_status | (len<< E1000_ADVTXD_PAYLEN_SHIFT)); curr->read.cmd_type_len = htole32(len | E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DCMD_EOP | flags); bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; /* Set the watchdog XXX ? */ txr->queue_status = IGB_QUEUE_WORKING; txr->watchdog_time = ticks; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), l); } if (n == 0 || kring->nr_hwavail < 1) { int delta; /* record completed transmissions using TDH */ l = E1000_READ_REG(&adapter->hw, E1000_TDH(ring_nr)); if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; if (delta) { /* some completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; kring->nr_hwavail += delta; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct rx_ring *rxr = &adapter->rx_rings[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = rxr->next_to_check; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { union e1000_adv_rx_desc *curr = &rxr->rx_base[l]; uint32_t staterr = le32toh(curr->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->wb.upper.length); ring->slot[j].flags = slot_flags; bus_dmamap_sync(rxr->ptag, rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ rxr->next_to_check = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { struct netmap_slot *slot = ring->slot + j; union e1000_adv_rx_desc *curr = &rxr->rx_base[l]; struct igb_rx_buf *rxbuf = rxr->rx_buffers + l; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { netmap_reload_map(rxr->ptag, rxbuf->pmap, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->read.pkt_addr = htole64(paddr); curr->wb.upper.status_error = 0; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } static void igb_netmap_attach(struct adapter *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = adapter->num_tx_desc; na.num_rx_desc = adapter->num_rx_desc; na.nm_txsync = igb_netmap_txsync; na.nm_rxsync = igb_netmap_rxsync; na.nm_register = igb_netmap_reg; netmap_attach(&na, adapter->num_queues); } /* end of file */ netmap-release/sys/dev/netmap/if_lem_netmap.h000644 000765 000024 00000024071 12220335545 022071 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/if_lem_netmap.h 231881 2012-02-17 14:09:04Z luigi $ * * netmap support for "lem" * * For details on netmap support please see ixgbe_netmap.h */ #include #include #include #include /* vtophys ? */ #include /* * Register/unregister */ static int lem_netmap_reg(struct ifnet *ifp, int onoff) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; EM_CORE_LOCK(adapter); lem_disable_intr(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); #ifndef EM_LEGACY_IRQ // XXX do we need this ? taskqueue_block(adapter->tq); taskqueue_drain(adapter->tq, &adapter->rxtx_task); taskqueue_drain(adapter->tq, &adapter->link_task); #endif /* !EM_LEGCY_IRQ */ if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; lem_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { fail: /* return to non-netmap mode */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; lem_init_locked(adapter); /* also enable intr */ } #ifndef EM_LEGACY_IRQ taskqueue_unblock(adapter->tq); // XXX do we need this ? #endif /* !EM_LEGCY_IRQ */ EM_CORE_UNLOCK(adapter); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ int report_frequency = kring->nkr_num_slots >> 1; ND("%s: hwofs %d, hwcur %d hwavail %d lease %d cur %d avail %d", ifp->if_xname, kring->nkr_hwofs, kring->nr_hwcur, kring->nr_hwavail, kring->nkr_hwlease, ring->cur, ring->avail); /* take a copy of ring->cur now, and never read it again */ k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_POSTREAD); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; if (netmap_verbose > 255) RD(5, "device %s send %d->%d", ifp->if_xname, j, k); if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ struct e1000_tx_desc *curr = &adapter->tx_desc_base[l]; struct em_buffer *txbuf = &adapter->tx_buffer_area[l]; int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? E1000_TXD_CMD_RS : 0; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } ND("slot %d NIC %d %s", j, l, nm_dump_buf(addr, len, 128, NULL)); slot->flags &= ~NS_REPORT; if (1 || slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(adapter->txtag, txbuf->map, addr); curr->buffer_addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } curr->upper.data = 0; curr->lower.data = htole32( adapter->txd_cmd | len | (E1000_TXD_CMD_EOP | flags) ); ND("len %d kring %d nic %d", len, j, l); bus_dmamap_sync(adapter->txtag, txbuf->map, BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } ND("sent %d packets from %d, TDT now %d", n, kring->nr_hwcur, l); kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), l); } if (n == 0 || kring->nr_hwavail < 1) { int delta; /* record completed transmissions using TDH */ l = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); ND("tdh is now %d", l); if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */ D("bad TDH %d", l); l -= kring->nkr_num_slots; } delta = l - adapter->next_tx_to_clean; if (delta) { /* some tx completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; if (netmap_verbose > 255) RD(5, "%s tx recover %d bufs", ifp->if_xname, delta); adapter->next_tx_to_clean = l; kring->nr_hwavail += delta; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = adapter->next_rx_desc_to_check; j = netmap_idx_n2k(kring, l); ND("%s: next NIC %d kring %d (ofs %d), hwcur %d hwavail %d cur %d avail %d", ifp->if_xname, l, j, kring->nkr_hwofs, kring->nr_hwcur, kring->nr_hwavail, ring->cur, ring->avail); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { struct e1000_rx_desc *curr = &adapter->rx_desc_base[l]; uint32_t staterr = le32toh(curr->status); int len; if ((staterr & E1000_RXD_STAT_DD) == 0) break; len = le16toh(curr->length) - 4; // CRC if (len < 0) { D("bogus pkt size at %d", j); len = 0; } ND("\n%s", nm_dump_buf(NMB(&ring->slot[j]), len, 128, NULL)); ring->slot[j].len = len; ring->slot[j].flags = slot_flags; bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[l].map, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ adapter->next_rx_desc_to_check = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_rx_desc *curr = &adapter->rx_desc_base[l]; struct em_buffer *rxbuf = &adapter->rx_buffer_area[l]; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(adapter->rxtag, rxbuf->map, addr); curr->buffer_addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } curr->status = 0; bus_dmamap_sync(adapter->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } static void lem_netmap_attach(struct adapter *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = adapter->num_tx_desc; na.num_rx_desc = adapter->num_rx_desc; na.nm_txsync = lem_netmap_txsync; na.nm_rxsync = lem_netmap_rxsync; na.nm_register = lem_netmap_reg; netmap_attach(&na, 1); } /* end of file */ netmap-release/sys/dev/netmap/if_nfe_netmap.h000644 000765 000024 00000030037 12220335545 022063 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/if_em_netmap.h 231881 2012-02-17 14:09:04Z luigi $ * * netmap support for nfe. XXX not yet tested. * * For more details on netmap support please see ixgbe_netmap.h */ #include #include #include #include /* vtophys ? */ #include static int nfe_netmap_init_buffers(struct nfe_softc *sc) { struct netmap_adapter *na = NA(sc->nfe_ifp); struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); int i, l, n, max_avail; struct nfe_desc32 *desc32 = NULL; struct nfe_desc64 *desc64 = NULL; void *addr; uint64_t paddr; if (!slot) return 0; // XXX init the tx ring n = NFE_TX_RING_COUNT; for (i = 0; i < n; i++) { l = netmap_idx_n2k(&na->tx_rings[0], i); addr = PNMB(slot + l, &paddr); netmap_reload_map(sc->txq.tx_data_tag, sc->txq.data[l].tx_data_map, addr); slot[l].flags = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[l]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr)); desc64->vtag = 0; desc64->length = htole16(0); desc64->flags = htole16(0); } else { desc32 = &sc->txq.desc32[l]; desc32->physaddr = htole32(NFE_ADDR_LO(paddr)); desc32->length = htole16(0); desc32->flags = htole16(0); } } slot = netmap_reset(na, NR_RX, 0, 0); // XXX init the rx ring /* * Userspace owned hwavail packets before the reset, * so the NIC that last hwavail descriptors of the ring * are still owned by the driver (and keep one empty). */ n = NFE_RX_RING_COUNT; max_avail = n - 1 - na->rx_rings[0].nr_hwavail; for (i = 0; i < n; i++) { uint16_t flags; l = netmap_idx_n2k(&na->rx_rings[0], i); addr = PNMB(slot + l, &paddr); flags = (i < max_avail) ? NFE_RX_READY : 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[l]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr)); desc64->vtag = 0; desc64->length = htole16(NETMAP_BUF_SIZE); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[l]; desc32->physaddr = htole32(NFE_ADDR_LO(paddr)); desc32->length = htole16(NETMAP_BUF_SIZE); desc32->flags = htole16(NFE_RX_READY); } netmap_reload_map(sc->rxq.rx_data_tag, sc->rxq.data[l].rx_data_map, addr); bus_dmamap_sync(sc->rxq.rx_data_tag, sc->rxq.data[l].rx_data_map, BUS_DMASYNC_PREREAD); } return 1; } static void nfe_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid) { struct nfe_softc *sc = ifp->if_softc; switch (what) { case NETMAP_CORE_LOCK: NFE_LOCK(sc); break; case NETMAP_CORE_UNLOCK: NFE_UNLOCK(sc); break; } } /* * Register/unregister routine */ static int nfe_netmap_reg(struct ifnet *ifp, int onoff) { struct nfe_softc *sc = ifp->if_softc; struct netmap_adapter *na = NA(ifp); if (na == NULL) return EINVAL; /* no netmap support here */ /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; nfe_init_locked(sc); } else { /* return to non-netmap mode */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; nfe_init_locked(sc); /* also enable intr */ } return (0); } /* * Reconcile kernel and user view of the transmit ring. */ static int nfe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct nfe_softc *sc = ifp->if_softc; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; struct nfe_desc32 *desc32 = NULL; struct nfe_desc64 *desc64 = NULL; /* generate an interrupt approximately every half ring */ int report_frequency = kring->nkr_num_slots >> 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_POSTREAD); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; ND("hwcur %d cur %d", j, k); na->tx_rings[0].nr_kflags &= ~NKR_PENDINTR; if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(sc->txq.tx_data_tag, sc->txq.data[l].tx_data_map, addr); slot->flags &= ~NS_BUF_CHANGED; } if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[l]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr)); desc64->vtag = 0; desc64->length = htole16(len - 1); desc64->flags = htole16(NFE_TX_VALID | NFE_TX_LASTFRAG_V2); } else { desc32 = &sc->txq.desc32[l]; desc32->physaddr = htole32(NFE_ADDR_LO(paddr)); desc32->length = htole16(len - 1); desc32->flags = htole16(NFE_TX_VALID | NFE_TX_LASTFRAG_V1); } bus_dmamap_sync(sc->txq.tx_data_tag, sc->txq.data[l].tx_data_map, BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; sc->txq.cur = l; bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); } ND("send %d avail %d reclaim next %d cur %d", n, kring->nr_hwavail, sc->txq.next, sc->txq.cur); if (n == 0 || kring->nr_hwavail < 1) { l = sc->txq.next; k = sc->txq.cur; for (n = 0; l != k; n++, NFE_INC(l, NFE_TX_RING_COUNT)) { uint16_t flags; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[l]; flags = le16toh(desc64->flags); } else { desc32 = &sc->txq.desc32[l]; flags = le16toh(desc32->flags); } if (flags & NFE_TX_VALID) break; } ND("reclaimed %d next %d cur %d", n, sc->txq.next, sc->txq.cur); if (n > 0) { sc->txq.next = l; kring->nr_hwavail += n; } ND("reclaimed %d next %d cur %d", n, sc->txq.next, sc->txq.cur); } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int nfe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct nfe_softc *sc = ifp->if_softc; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = sc->rxq.cur; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t flags, len; uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[sc->rxq.cur]; flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->rxq.desc32[sc->rxq.cur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; ring->slot[j].len = len; ring->slot[j].flags = slot_flags; bus_dmamap_sync(sc->rxq.rx_data_tag, sc->rxq.data[l].rx_data_map, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ sc->rxq.cur = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(sc->rxq.rx_data_tag, sc->rxq.data[l].rx_data_map, addr); slot->flags &= ~NS_BUF_CHANGED; } if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[l]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(paddr)); desc64->length = htole16(NETMAP_BUF_SIZE); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[l]; desc32->physaddr = htole32(NFE_ADDR_LO(paddr)); desc32->length = htole16(NETMAP_BUF_SIZE); desc32->flags = htole16(NFE_RX_READY); } bus_dmamap_sync(sc->rxq.rx_data_tag, sc->rxq.data[l].rx_data_map, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } static void nfe_netmap_attach(struct nfe_softc *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->nfe_ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = NFE_TX_RING_COUNT; na.num_rx_desc = NFE_RX_RING_COUNT; na.nm_txsync = nfe_netmap_txsync; na.nm_rxsync = nfe_netmap_rxsync; na.nm_register = nfe_netmap_reg; netmap_attach(&na, 1); } /* end of file */ netmap-release/sys/dev/netmap/if_re_netmap.h000644 000765 000024 00000026616 12220335545 021731 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/if_re_netmap.h 234225 2012-04-13 15:33:12Z luigi $ * * netmap support for "re" * For details on netmap support please see ixgbe_netmap.h */ #include #include #include #include /* vtophys ? */ #include /* * support for netmap register/unregisted. We are already under core lock. * only called on the first register or the last unregister. */ static int re_netmap_reg(struct ifnet *ifp, int onoff) { struct rl_softc *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); re_stop(adapter); if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; /* save if_transmit to restore it later */ na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; re_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { fail: /* restore if_transmit */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; re_init_locked(adapter); /* also enables intr */ } return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct rl_softc *sc = ifp->if_softc; struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc; struct netmap_adapter *na = NA(sc->rl_ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, k, l, n, lim = kring->nkr_num_slots - 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* Sync the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* XXX move after the transmissions */ /* record completed transmissions */ for (n = 0, l = sc->rl_ldata.rl_tx_considx; l != sc->rl_ldata.rl_tx_prodidx; n++, l = RL_TX_DESC_NXT(sc, l)) { uint32_t cmdstat = le32toh(sc->rl_ldata.rl_tx_list[l].rl_cmdstat); if (cmdstat & RL_TDESC_STAT_OWN) break; } if (n > 0) { sc->rl_ldata.rl_tx_considx = l; sc->rl_ldata.rl_tx_free += n; kring->nr_hwavail += n; } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = sc->rl_ldata.rl_tx_prodidx; for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l]; int cmd = slot->len | RL_TDESC_CMD_EOF | RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ; uint64_t paddr; void *addr = PNMB(slot, &paddr); int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { // XXX what about prodidx ? return netmap_ring_reinit(kring); } if (l == lim) /* mark end of ring */ cmd |= RL_TDESC_CMD_EOR; if (slot->flags & NS_BUF_CHANGED) { desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); /* buffer has changed, unload and reload map */ netmap_reload_map(sc->rl_ldata.rl_tx_mtag, txd[l].tx_dmamap, addr); slot->flags &= ~NS_BUF_CHANGED; } slot->flags &= ~NS_REPORT; desc->rl_cmdstat = htole32(cmd); bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } sc->rl_ldata.rl_tx_prodidx = l; kring->nr_hwcur = k; /* the saved ring->cur */ ring->avail -= n; // XXX see others kring->nr_hwavail = ring->avail; bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); /* start ? */ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); } return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct rl_softc *sc = ifp->if_softc; struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc; struct netmap_adapter *na = NA(sc->rl_ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. * * The device uses all the buffers in the ring, so we need * another termination condition in addition to RL_RDESC_STAT_OWN * cleared (all buffers could have it cleared. The easiest one * is to limit the amount of data reported up to 'lim' */ l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */ j = netmap_idx_n2k(kring, l); /* the kring index */ if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = kring->nr_hwavail; n < lim ; n++) { struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l]; uint32_t rxstat = le32toh(cur_rx->rl_cmdstat); uint32_t total_len; if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; /* XXX subtract crc */ total_len = (total_len < 4) ? 0 : total_len - 4; kring->ring->slot[j].len = total_len; kring->ring->slot[j].flags = slot_flags; /* sync was in re_newbuf() */ bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n != kring->nr_hwavail) { sc->rl_ldata.rl_rx_prodidx = l; sc->rl_ifp->if_ipackets += n - kring->nr_hwavail; kring->nr_hwavail = n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* the NIC index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = ring->slot + j; struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l]; int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (l == lim) /* mark end of ring */ cmd |= RL_RDESC_CMD_EOR; slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { netmap_reload_map(sc->rl_ldata.rl_rx_mtag, rxd[l].rx_dmamap, addr); desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); slot->flags &= ~NS_BUF_CHANGED; } desc->rl_cmdstat = htole32(cmd); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } /* * Additional routines to init the tx and rx rings. * In other drivers we do that inline in the main code. */ static void re_netmap_tx_init(struct rl_softc *sc) { struct rl_txdesc *txd; struct rl_desc *desc; int i, n; struct netmap_adapter *na = NA(sc->rl_ifp); struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); /* slot is NULL if we are not in netmap mode */ if (!slot) return; /* in netmap mode, overwrite addresses and maps */ txd = sc->rl_ldata.rl_tx_desc; desc = sc->rl_ldata.rl_tx_list; n = sc->rl_ldata.rl_tx_desc_cnt; /* l points in the netmap ring, i points in the NIC ring */ for (i = 0; i < n; i++) { uint64_t paddr; int l = netmap_idx_n2k(&na->tx_rings[0], i); void *addr = PNMB(slot + l, &paddr); desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); netmap_load_map(sc->rl_ldata.rl_tx_mtag, txd[i].tx_dmamap, addr); } } static void re_netmap_rx_init(struct rl_softc *sc) { struct netmap_adapter *na = NA(sc->rl_ifp); struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); struct rl_desc *desc = sc->rl_ldata.rl_rx_list; uint32_t cmdstat; int i, n, max_avail; if (!slot) return; n = sc->rl_ldata.rl_rx_desc_cnt; /* * Userspace owned hwavail packets before the reset, * so the NIC that last hwavail descriptors of the ring * are still owned by the driver (and keep one empty). */ max_avail = n - 1 - na->rx_rings[0].nr_hwavail; for (i = 0; i < n; i++) { void *addr; uint64_t paddr; int l = netmap_idx_n2k(&na->rx_rings[0], i); addr = PNMB(slot + l, &paddr); netmap_reload_map(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD); desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr)); desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr)); cmdstat = NETMAP_BUF_SIZE; if (i == n - 1) /* mark the end of ring */ cmdstat |= RL_RDESC_CMD_EOR; if (i < max_avail) cmdstat |= RL_RDESC_CMD_OWN; desc[i].rl_cmdstat = htole32(cmdstat); } } static void re_netmap_attach(struct rl_softc *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->rl_ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt; na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt; na.nm_txsync = re_netmap_txsync; na.nm_rxsync = re_netmap_rxsync; na.nm_register = re_netmap_reg; netmap_attach(&na, 1); } /* end of file */ netmap-release/sys/dev/netmap/if_sfxge_netmap.h000644 000765 000024 00000031641 12220335545 022431 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/ixgbe_netmap.h 232238 2012-02-27 19:05:01Z luigi $ * * netmap modifications for sfxge init: interrupt: sfxge_ev: sfxge_ev_qpoll() in turn calls common/efx_ev.c efx_ev_qpoll() the queue contains handlers which are interleaved, The specific drivers are efx_ev_rx 0 then call eec_rx() or sfxge_ev_rx efx_ev_tx 2 then call eec_tx() or sfxge_ev_tx plus some generic events. efx_ev_driver 5 efx_ev_global 6 efx_ev_drv_gen 7 efx_ev_mcdi 0xc The receive ring seems to be circular, SFXGE_NDESCS in both rx and tx. struct sfxge_rxq *rxq; struct sfxge_rx_sw_desc *rx_desc; id = rxq->pending modulo SFXGE_NDESCS the descriptor is rxq->queue[id] each slot has size efx_qword_t (8 bytes with all overlays) The card is reset through sfxge_schedule_reset() Global lock: sx_xlock(&sc->softc_lock); */ #include #include /* * Some drivers may need the following headers. Others * already include them by default #include #include */ #include static void sfxge_stop(struct sfxge_softc *sc); static int sfxge_start(struct sfxge_softc *sc); void sfxge_tx_qlist_post(struct sfxge_txq *txq); static int sfxge_netmap_init_buffers(struct sfxge_softc *sc) { struct netmap_adapter *na = NA(sc->ifnet); struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); int i, l, n, max_avail; void *addr; uint64_t paddr; // tx rings, see // sfxge_tx_qinit() return 0; } /* * Register/unregister. We are already under core lock. * Only called on the first register or the last unregister. */ static int sfxge_netmap_reg(struct ifnet *ifp, int onoff) { struct sfxge_softc *sc = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; /* no netmap support here */ sfxge_stop(sc); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; /* save if_transmit and replace with our routine */ na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; /* * reinitialize the adapter, now with netmap flag set, * so the rings will be set accordingly. */ sfxge_start(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { /* reset normal mode (explicit request or netmap failed) */ fail: /* restore if_transmit */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; /* initialize the card, this time in standard mode */ sfxge_start(sc); /* also enables intr */ } return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int sfxge_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct sfxge_softc *sc = ifp->if_softc; struct sfxge_txq *txr = sc->txq[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k = ring->cur, l, n = 0, lim = kring->nkr_num_slots - 1; if (k > lim) return netmap_ring_reinit(kring); // bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, // BUS_DMASYNC_POSTREAD); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. * The two numbers differ because upon a *_init() we reset * the NIC ring but leave the netmap ring unchanged. * For the transmit ring, we have * * j = kring->nr_hwcur * l = IXGBE_TDT (not tracked in the driver) * and * j == (l + kring->nkr_hwofs) % ring_size * * In this driver kring->nkr_hwofs >= 0, but for other * drivers it might be negative as well. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); /* NIC index */ for (n = 0; j != k; n++) { /* * Collect per-slot info. * Note that txbuf and curr are indexed by l. * * In this driver we collect the buffer address * (using the PNMB() macro) because we always * need to rewrite it into the NIC ring. * Many other drivers preserve the address, so * we only need to access it if NS_BUF_CHANGED * is set. * XXX note, on this device the dmamap* calls are * not necessary because tag is 0, however just accessing * the per-packet tag kills 1Mpps at 900 MHz. */ struct netmap_slot *slot = &ring->slot[j]; uint64_t paddr; u_int len = slot->len; efx_buffer_t *desc; void *addr = PNMB(slot, &paddr); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { ring_reset: return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, unload and reload map */ netmap_reload_map(txr->packet_dma_tag, txr->stmp[l].map, addr); slot->flags &= ~NS_BUF_CHANGED; } slot->flags &= ~NS_REPORT; /* * Fill the slot in the NIC ring. * In this driver we need to rewrite the buffer * address in the NIC ring. Other drivers do not * need this. * Use legacy descriptor, it is faster. */ desc->eb_addr = paddr; desc->eb_size = len; desc->eb_eop = 1; txr->n_pend_desc = 1; sfxge_tx_qlist_post(txr); /* make sure changes to the buffer are synced */ bus_dmamap_sync(txr->packet_dma_tag, txr->stmp[l].map, BUS_DMASYNC_PREWRITE); } kring->nr_hwcur = k; /* the saved ring->cur */ /* decrease avail by number of packets sent */ kring->nr_hwavail -= n; /* synchronize the NIC ring */ // bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, // BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the transmitter up to slot l (excluded) */ // IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), l); } /* * Reclaim buffers for completed transmissions. */ if (flags & NAF_FORCE_RECLAIM) { j = 1; /* forced reclaim, ignore interrupts */ } else if (kring->nr_hwavail > 0) { j = 0; /* buffers still available: no reclaim, ignore intr. */ } else { j = 1; } if (j) { // txeof body to reclaim buffers if (txr->pending != txr->completed) { n = (txr->pending > txr->completed) ? txr->pending - txr->completed : txr->pending - txr->completed + SFXGE_NDESCS; txr->completed = txr->pending; kring->nr_hwavail += n; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; if (kring->nr_hwavail > lim) return netmap_ring_reinit(kring); return 0; } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient and * avoid races in accessing the shared regions. * * When called, userspace has read data from slots kring->nr_hwcur * up to ring->cur (excluded). * * The last interrupt reported kring->nr_hwavail slots available * after kring->nr_hwcur. * We must subtract the newly consumed slots (cur - nr_hwcur) * from nr_hwavail, make the descriptors available for the next reads, * and set kring->nr_hwcur = ring->cur and ring->avail = kring->nr_hwavail. * */ static int sfxge_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct sfxge_softc *sc = ifp->if_softc; struct sfxge_rxq *rxq = sc->rxq[ring_nr]; struct sfxge_evq *evq = sc->evq[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ // bus_dmamap_sync(rxq->rxdma.dma_tag, rxq->rxdma.dma_map, // BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part, import newly received packets into the netmap ring. * * j is the index of the next free slot in the netmap ring, * and l is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * j = (kring->nr_hwcur + kring->nr_hwavail) % ring_size * l = rxr->next_to_check; * and * j == (l + kring->nkr_hwofs) % ring_size * * rxr->next_to_check is set to 0 on a ring reinit */ l = rxq->completed; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; // see sfxge_rx_qcomplete() for (n = 0; l != rxq->pending ; n++) { struct sfxge_rx_sw_desc *rx_desc = &rxq->queue[l]; ring->slot[j].len = rx_desc->size - sc->rx_prefix_size; ring->slot[j].flags = slot_flags; // bus_dmamap_sync(rxq->ptag, // rxq->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ // rxq->next_to_check = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* * Skip past packets that userspace has released * (from kring->nr_hwcur to ring->cur - ring->reserved excluded), * and make the buffers available for reception. * As usual j is the index in the netmap ring, l is the index * in the NIC ring, and j == (l + kring->nkr_hwofs) % ring_size */ j = kring->nr_hwcur; if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* collect per-slot info, with similar validations * and flag handling as in the txsync code. * * NOTE curr and rxbuf are indexed by l. * Also, this driver needs to update the physical * address in the NIC ring, but other drivers * may not have this requirement. */ struct netmap_slot *slot = &ring->slot[j]; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) /* bad buf */ goto ring_reset; if (slot->flags & NS_BUF_CHANGED) { //netmap_reload_map(rxq->ptag, rxbuf->pmap, addr); slot->flags &= ~NS_BUF_CHANGED; } // curr->wb.upper.status_error = 0; // curr->read.pkt_addr = htole64(paddr); // bus_dmamap_sync(rxq->ptag, rxbuf->pmap, // BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; // bus_dmamap_sync(rxq->rxdma.dma_tag, rxq->rxdma.dma_map, // BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; //IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; ring_reset: return netmap_ring_reinit(kring); } /* * The attach routine, called near the end of ixgbe_attach(), * fills the parameters for netmap_attach() and calls it. * It cannot fail, in the worst case (such as no memory) * netmap mode will be disabled and the driver will only * operate in standard mode. */ static void sfxge_netmap_attach(struct sfxge_softc *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->ifnet; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = SFXGE_NDESCS; na.num_rx_desc = SFXGE_NDESCS; na.nm_txsync = sfxge_netmap_txsync; na.nm_rxsync = sfxge_netmap_rxsync; na.nm_register = sfxge_netmap_reg; na.num_tx_rings = SFXGE_TXQ_NTYPES + SFXGE_RX_SCALE_MAX; netmap_attach(&na, SFXGE_RX_SCALE_MAX); } /* end of file */ netmap-release/sys/dev/netmap/ixgbe_netmap.h000644 000765 000024 00000045406 12220335545 021741 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/ixgbe_netmap.h 244514 2012-12-20 22:26:03Z luigi $ * * netmap modifications for ixgbe * * This file is meant to be a reference on how to implement * netmap support for a network driver. * This file contains code but only static or inline functions * that are used by a single driver. To avoid replication of * code we just #include it near the beginning of the * standard driver. */ #include #include /* * Some drivers may need the following headers. Others * already include them by default #include #include */ #include /* * ix_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * ix_rx_miss, ix_rx_miss_bufs: * count packets that might be missed due to lost interrupts. * * ix_use_dd * use the dd bit for completed tx transmissions. * This is tricky, much better to use TDH for now. */ SYSCTL_DECL(_dev_netmap); static int ix_rx_miss, ix_rx_miss_bufs, ix_use_dd, ix_crcstrip; SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip, CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames"); SYSCTL_INT(_dev_netmap, OID_AUTO, ix_use_dd, CTLFLAG_RW, &ix_use_dd, 0, "use dd instead of tdh to detect tx frames"); SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss, CTLFLAG_RW, &ix_rx_miss, 0, "potentially missed rx intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, ix_rx_miss_bufs, CTLFLAG_RW, &ix_rx_miss_bufs, 0, "potentially missed rx intr bufs"); static void set_crcstrip(struct ixgbe_hw *hw, int onoff) { /* crc stripping is set in two places: * IXGBE_HLREG0 (modified on init_locked and hw reset) * IXGBE_RDRXCTL (set by the original driver in * ixgbe_setup_hw_rsc() called in init_locked. * We disable the setting when netmap is compiled in). * We update the values here, but also in ixgbe.c because * init_locked sometimes is called outside our control. */ uint32_t hl, rxc; hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); if (netmap_verbose) D("%s read HLREG 0x%x rxc 0x%x", onoff ? "enter" : "exit", hl, rxc); /* hw requirements ... */ rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; rxc |= IXGBE_RDRXCTL_RSCACKC; if (onoff && !ix_crcstrip) { /* keep the crc. Fast rx */ hl &= ~IXGBE_HLREG0_RXCRCSTRP; rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; } else { /* reset default mode */ hl |= IXGBE_HLREG0_RXCRCSTRP; rxc |= IXGBE_RDRXCTL_CRCSTRIP; } if (netmap_verbose) D("%s write HLREG 0x%x rxc 0x%x", onoff ? "enter" : "exit", hl, rxc); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); } /* * Register/unregister. We are already under core lock. * Only called on the first register or the last unregister. */ static int ixgbe_netmap_reg(struct ifnet *ifp, int onoff) { struct adapter *adapter = ifp->if_softc; struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; /* no netmap support here */ IXGBE_CORE_LOCK(adapter); ixgbe_disable_intr(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); set_crcstrip(&adapter->hw, onoff); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; /* save if_transmit and replace with our routine */ na->if_transmit = ifp->if_transmit; ifp->if_transmit = netmap_transmit; /* * reinitialize the adapter, now with netmap flag set, * so the rings will be set accordingly. */ ixgbe_init_locked(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) { error = ENOMEM; goto fail; } } else { /* reset normal mode (explicit request or netmap failed) */ fail: /* restore if_transmit */ ifp->if_transmit = na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; /* initialize the card, this time in standard mode */ ixgbe_init_locked(adapter); /* also enables intr */ } set_crcstrip(&adapter->hw, onoff); IXGBE_CORE_UNLOCK(adapter); return (error); } /* * Reconcile kernel and user view of the transmit ring. * This routine might be called frequently so it must be efficient. * * ring->cur holds the userspace view of the current ring index. Userspace * has filled the tx slots from the previous call's ring->cur up to but not * including ring->cur for this call. In this function the kernel updates * kring->nr_hwcur to ring->cur, thus slots [kring->nr_hwcur, ring->cur) are * now ready to transmit. At the last interrupt kring->nr_hwavail slots were * available. * * This function runs under lock (acquired from the caller or internally). * It must first update ring->avail to what the kernel knows, * subtract the newly used slots (ring->cur - kring->nr_hwcur) * from both avail and nr_hwavail, and set ring->nr_hwcur = ring->cur * issuing a dmamap_sync on all slots. * * Since ring comes from userspace, its content must be read only once, * and validated before being used to update the kernel's structures. * (this is also true for every use of ring in the kernel). * * ring->avail is never used, only checked for bogus values. * * I flags & FORCE_RECLAIM, reclaim transmitted * buffers irrespective of interrupt mitigation. */ static int ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = &adapter->tx_rings[ring_nr]; struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n = 0; u_int const k = ring->cur, lim = kring->nkr_num_slots - 1; /* * ixgbe can generate an interrupt on every tx packet, but it * seems very expensive, so we interrupt once every half ring, * or when requested with NS_REPORT */ u_int report_frequency = kring->nkr_num_slots >> 1; if (k > lim) return netmap_ring_reinit(kring); bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. * The two numbers differ because upon a *_init() we reset * the NIC ring but leave the netmap ring unchanged. * For the transmit ring, we have * * j = kring->nr_hwcur * l = IXGBE_TDT (not tracked in the driver) * and * j == (l + kring->nkr_hwofs) % ring_size * * In this driver kring->nkr_hwofs >= 0, but for other * drivers it might be negative as well. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ prefetch(&ring->slot[j]); l = netmap_idx_k2n(kring, j); /* NIC index */ prefetch(&txr->tx_buffers[l]); for (n = 0; j != k; n++) { /* * Collect per-slot info. * Note that txbuf and curr are indexed by l. * * In this driver we collect the buffer address * (using the PNMB() macro) because we always * need to rewrite it into the NIC ring. * Many other drivers preserve the address, so * we only need to access it if NS_BUF_CHANGED * is set. * XXX note, on this device the dmamap* calls are * not necessary because tag is 0, however just accessing * the per-packet tag kills 1Mpps at 900 MHz. */ struct netmap_slot *slot = &ring->slot[j]; union ixgbe_adv_tx_desc *curr = &txr->tx_base[l]; struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[l]; uint64_t paddr; // XXX type for flags and len ? int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? IXGBE_TXD_CMD_RS : 0; u_int len = slot->len; void *addr = PNMB(slot, &paddr); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; prefetch(&ring->slot[j]); prefetch(&txr->tx_buffers[l]); /* * Quick check for valid addr and len. * NMB() returns netmap_buffer_base for invalid * buffer indexes (but the address is still a * valid one to be used in a ring). slot->len is * unsigned so no need to check for negative values. */ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { ring_reset: return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, unload and reload map */ netmap_reload_map(txr->txtag, txbuf->map, addr); slot->flags &= ~NS_BUF_CHANGED; } slot->flags &= ~NS_REPORT; /* * Fill the slot in the NIC ring. * In this driver we need to rewrite the buffer * address in the NIC ring. Other drivers do not * need this. * Use legacy descriptor, it is faster. */ curr->read.buffer_addr = htole64(paddr); curr->read.olinfo_status = 0; curr->read.cmd_type_len = htole32(len | flags | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP); /* make sure changes to the buffer are synced */ bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_PREWRITE); } kring->nr_hwcur = k; /* the saved ring->cur */ /* decrease avail by number of packets sent */ kring->nr_hwavail -= n; /* synchronize the NIC ring */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the transmitter up to slot l (excluded) */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), l); } /* * Reclaim buffers for completed transmissions. * Because this is expensive (we read a NIC register etc.) * we only do it in specific cases (see below). * In all cases kring->nr_kflags indicates which slot will be * checked upon a tx interrupt (nkr_num_slots means none). */ if (flags & NAF_FORCE_RECLAIM) { j = 1; /* forced reclaim, ignore interrupts */ kring->nr_kflags = kring->nkr_num_slots; } else if (kring->nr_hwavail > 0) { j = 0; /* buffers still available: no reclaim, ignore intr. */ kring->nr_kflags = kring->nkr_num_slots; } else { /* * no buffers available, locate a slot for which we request * ReportStatus (approximately half ring after next_to_clean) * and record it in kring->nr_kflags. * If the slot has DD set, do the reclaim looking at TDH, * otherwise we go to sleep (in netmap_poll()) and will be * woken up when slot nr_kflags will be ready. */ struct ixgbe_legacy_tx_desc *txd = (struct ixgbe_legacy_tx_desc *)txr->tx_base; j = txr->next_to_clean + kring->nkr_num_slots/2; if (j >= kring->nkr_num_slots) j -= kring->nkr_num_slots; // round to the closest with dd set j= (j < kring->nkr_num_slots / 4 || j >= kring->nkr_num_slots*3/4) ? 0 : report_frequency; kring->nr_kflags = j; /* the slot to check */ j = txd[j].upper.fields.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ? } if (j) { int delta; /* * Record completed transmissions. * We (re)use the driver's txr->next_to_clean to keep * track of the most recently completed transmission. * * The datasheet discourages the use of TDH to find out the * number of sent packets. We should rather check the DD * status bit in a packet descriptor. However, we only set * the "report status" bit for some descriptors (a kind of * interrupt mitigation), so we can only check on those. * For the time being we use TDH, as we do it infrequently * enough not to pose performance problems. */ if (ix_use_dd) { struct ixgbe_legacy_tx_desc *txd = (struct ixgbe_legacy_tx_desc *)txr->tx_base; u_int k1 = netmap_idx_k2n(kring, kring->nr_hwcur); l = txr->next_to_clean; delta = 0; while (l != k1 && txd[l].upper.fields.status & IXGBE_TXD_STAT_DD) { delta++; l = (l == lim) ? 0 : l + 1; } } else { l = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(ring_nr)); if (l >= kring->nkr_num_slots) { /* XXX can happen */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; } if (delta) { /* some tx completed, increment avail */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; kring->nr_hwavail += delta; if (kring->nr_hwavail > lim) goto ring_reset; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient and * avoid races in accessing the shared regions. * * When called, userspace has read data from slots kring->nr_hwcur * up to ring->cur (excluded). * * The last interrupt reported kring->nr_hwavail slots available * after kring->nr_hwcur. * We must subtract the newly consumed slots (cur - nr_hwcur) * from nr_hwavail, make the descriptors available for the next reads, * and set kring->nr_hwcur = ring->cur and ring->avail = kring->nr_hwavail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct adapter *adapter = ifp->if_softc; struct rx_ring *rxr = &adapter->rx_rings[ring_nr]; struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (k > lim) return netmap_ring_reinit(kring); /* XXX check sync modes */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part, import newly received packets into the netmap ring. * * j is the index of the next free slot in the netmap ring, * and l is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * j = (kring->nr_hwcur + kring->nr_hwavail) % ring_size * l = rxr->next_to_check; * and * j == (l + kring->nkr_hwofs) % ring_size * * rxr->next_to_check is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { int crclen = ix_crcstrip ? 0 : 4; uint16_t slot_flags = kring->nkr_slot_flags; l = rxr->next_to_check; j = netmap_idx_n2k(kring, l); for (n = 0; ; n++) { union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l]; uint32_t staterr = le32toh(curr->wb.upper.status_error); if ((staterr & IXGBE_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->wb.upper.length) - crclen; ring->slot[j].flags = slot_flags; bus_dmamap_sync(rxr->ptag, rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ ix_rx_miss ++; ix_rx_miss_bufs += n; } rxr->next_to_check = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* * Skip past packets that userspace has released * (from kring->nr_hwcur to ring->cur - ring->reserved excluded), * and make the buffers available for reception. * As usual j is the index in the netmap ring, l is the index * in the NIC ring, and j == (l + kring->nkr_hwofs) % ring_size */ j = kring->nr_hwcur; if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* collect per-slot info, with similar validations * and flag handling as in the txsync code. * * NOTE curr and rxbuf are indexed by l. * Also, this driver needs to update the physical * address in the NIC ring, but other drivers * may not have this requirement. */ struct netmap_slot *slot = &ring->slot[j]; union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l]; struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[l]; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) /* bad buf */ goto ring_reset; if (slot->flags & NS_BUF_CHANGED) { netmap_reload_map(rxr->ptag, rxbuf->pmap, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->wb.upper.status_error = 0; curr->read.pkt_addr = htole64(paddr); bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; ring_reset: return netmap_ring_reinit(kring); } /* * The attach routine, called near the end of ixgbe_attach(), * fills the parameters for netmap_attach() and calls it. * It cannot fail, in the worst case (such as no memory) * netmap mode will be disabled and the driver will only * operate in standard mode. */ static void ixgbe_netmap_attach(struct adapter *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->ifp; na.na_flags = NAF_BDG_MAYSLEEP; na.num_tx_desc = adapter->num_tx_desc; na.num_rx_desc = adapter->num_rx_desc; na.nm_txsync = ixgbe_netmap_txsync; na.nm_rxsync = ixgbe_netmap_rxsync; na.nm_register = ixgbe_netmap_reg; netmap_attach(&na, adapter->num_queues); } /* end of file */ netmap-release/sys/dev/netmap/netmap.4000644 000765 000024 00000044770 12230554267 020507 0ustar00luigistaff000000 000000 .\" Copyright (c) 2011-2013 Matteo Landi, Luigi Rizzo, Universita` di Pisa .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" This document is derived in part from the enet man page (enet.4) .\" distributed with 4.3BSD Unix. .\" .\" $FreeBSD: head/share/man/man4/netmap.4 228017 2011-11-27 06:55:57Z gjb $ .\" .Dd October 18, 2013 .Dt NETMAP 4 .Os .Sh NAME .Nm netmap .Nd a framework for fast packet I/O .Sh SYNOPSIS .Cd device netmap .Sh DESCRIPTION .Nm is a framework for extremely fast and efficient packet I/O (reaching 14.88 Mpps with a single core at less than 1 GHz) for both userspace and kernel clients. Userspace clients can use the netmap API to send and receive raw packets through physical interfaces or ports of the .Xr VALE 4 switch. .Pp .Nm VALE is a very fast (reaching 20 Mpps per port) and modular software switch, implemented within the kernel, which can interconnect virtual ports, physical devices, and the native host stack. .Pp .Nm uses a memory mapped region to share packet buffers, descriptors and queues with the kernel. Simple .Pa ioctl()s are used to bind interfaces/ports to file descriptors and implement non-blocking I/O, whereas blocking I/O uses .Pa select()/poll(). .Nm can exploit the parallelism in multiqueue devices and multicore systems. .Pp For the best performance, .Nm requires explicit support in device drivers; a generic emulation layer is available to implement the .Nm API on top of unmodified device drivers, at the price of reduced performance (but still better than what can be achieved with sockets or BPF/pcap). .Pp For a list of devices with native .Nm support, see the end of this manual page. .Pp .Sh OPERATION - THE NETMAP API .Nm clients must first .Pa open("/dev/netmap") , and then issue an .Pa ioctl(fd, NIOCREGIF, (struct nmreq *)arg) to bind the file descriptor to a specific interface or port. .Nm has multiple modes of operation controlled by the content of the .Pa struct nmreq passed to the .Pa ioctl() . In particular, the .Em nr_name field specifies whether the client operates on a physical network interface or on a port of a .Nm VALE switch, as indicated below. Additional fields in the .Pa struct nmreq control the details of operation. .Pp .Bl -tag -width XXXX .It Dv Interface name (e.g. 'em0', 'eth1', ... ) The data path of the interface is disconnected from the host stack. Depending on additional arguments, the file descriptor is bound to the NIC (one or all queues), or to the host stack. .It Dv valeXXX:YYY (arbitrary XXX and YYY) The file descriptor is bound to port YYY of a VALE switch called XXX, where XXX and YYY are arbitrary alphanumeric strings. The string cannot exceed IFNAMSIZ characters, and YYY cannot matching the name of any existing interface. .Pp The switch and the port are created if not existing. .It Dv valeXXX:ifname (ifname is an existing interface) Flags in the argument control whether the physical interface (and optionally the corrisponding host stack endpoint) are connected or disconnected from the VALE switch named XXX. .Pp In this case the .Pa ioctl() is used only for configuring the VALE switch, typically through the .Nm vale-ctl command. The file descriptor cannot be used for I/O, and should be .Pa close()d after issuing the .Pa ioctl(). .El .Pp The binding can be removed (and the interface returns to regular operation, or the virtual port destroyed) with a .Pa close() on the file descriptor. .Pp The processes owning the file descriptor can then .Pa mmap() the memory region that contains pre-allocated buffers, descriptors and queues, and use them to read/write raw packets. Non blocking I/O is done with special .Pa ioctl()'s , whereas the file descriptor can be passed to .Pa select()/poll() to be notified about incoming packet or available transmit buffers. .Ss DATA STRUCTURES The data structures in the mmapped memory are described below (see .Xr sys/net/netmap.h for reference). All physical devices operating in .Nm mode use the same memory region, shared by the kernel and all processes who own .Pa /dev/netmap descriptors bound to those devices (NOTE: visibility may be restricted in future implementations). Virtual ports instead use separate memory regions, shared only with the kernel. .Pp All references between the shared data structure are relative (offsets or indexes). Some macros help converting them into actual pointers. .Pp .Pp .Bl -tag -width XXX .It Dv struct netmap_if (one per interface) indicates the number of rings supported by an interface, their sizes, and the offsets of the .Pa netmap_rings associated to the interface. .Pp .Pa struct netmap_if is at offset .Pa nr_offset in the shared memory region is indicated by the field in the structure returned by the .Pa NIOCREGIF (see below). .Bd -literal struct netmap_if { char ni_name[IFNAMSIZ]; /* name of the interface. */ const u_int ni_version; /* API version */ const u_int ni_rx_rings; /* number of rx ring pairs */ const u_int ni_tx_rings; /* if 0, same as ni_rx_rings */ const ssize_t ring_ofs[]; /* offset of tx and rx rings */ }; .Ed .It Dv struct netmap_ring (one per ring) Contains the positions in the transmit and receive rings to synchronize the kernel and the application, and an array of .Pa slots describing the buffers. 'reserved' is used in receive rings to tell the kernel the number of slots after 'cur' that are still in usr indicates how many slots starting from 'cur' the .Pp Each physical interface has one .Pa netmap_ring for each hardware transmit and receive ring, plus one extra transmit and one receive structure that connect to the host stack. .Bd -literal struct netmap_ring { const ssize_t buf_ofs; /* see details */ const uint32_t num_slots; /* number of slots in the ring */ uint32_t avail; /* number of usable slots */ uint32_t cur; /* 'current' read/write index */ uint32_t reserved; /* not refilled before current */ const uint16_t nr_buf_size; uint16_t flags; #define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */ #define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */ #define NR_RX_TSTMP 0x0008 /* set rx timestamp in slots */ struct timeval ts; struct netmap_slot slot[0]; /* array of slots */ } .Ed .Pp In transmit rings, after a system call 'cur' indicates the first slot that can be used for transmissions, and 'avail' reports how many of them are available. Before the next netmap-related system call on the file descriptor, the application should fill buffers and slots with data, and update 'cur' and 'avail' accordingly, as shown in the figure below: .Bd -literal cur |----- avail ---| (after syscall) v TX [*****aaaaaaaaaaaaaaaaa**] TX [*****TTTTTaaaaaaaaaaaa**] ^ |-- avail --| (before syscall) cur .Ed In receive rings, after a system call 'cur' indicates the first slot that contains a valid packet, and 'avail' reports how many of them are available. Before the next netmap-related system call on the file descriptor, the application can process buffers and release them to the kernel updating 'cur' and 'avail' accordingly, as shown in the figure below. Receive rings have an additional field called 'reserved' to indicate how many buffers before 'cur' are still under processing and cannot be released. .Bd -literal cur |-res-|-- avail --| (after syscall) v RX [**rrrrrrRRRRRRRRRRRR******] RX [**...........rrrrRRR******] |res|--|flags >> 8) & 0xff) uint64_t ptr; /* buffer address (indirect buffers) */ }; .Ed The flags control how the the buffer associated to the slot should be managed. .It Dv packet buffers are normally fixed size (2 Kbyte) buffers allocated by the kernel that contain packet data. Buffers addresses are computed through macros. .El .Pp .Bl -tag -width XXX Some macros support the access to objects in the shared memory region. In particular, .It NETMAP_TXRING(nifp, i) .It NETMAP_RXRING(nifp, i) return the address of the i-th transmit and receive ring, respectively, whereas .It NETMAP_BUF(ring, buf_idx) returns the address of the buffer with index buf_idx (which can be part of any ring for the given interface). .El .Pp Normally, buffers are associated to slots when interfaces are bound, and one packet is fully contained in a single buffer. Clients can however modify the mapping using the following flags: .Ss FLAGS .Bl -tag -width XXX .It NS_BUF_CHANGED indicates that the buf_idx in the slot has changed. This can be useful if the client wants to implement some form of zero-copy forwarding (e.g. by passing buffers from an input interface to an output interface), or needs to process packets out of order. .Pp The flag MUST be used whenever the buffer index is changed. .It NS_REPORT indicates that we want to be woken up when this buffer has been transmitted. This reduces performance but insures a prompt notification when a buffer has been sent. Normally, .Nm notifies transmit completions in batches, hence signals can be delayed indefinitely. However, we need such notifications before closing a descriptor. .It NS_FORWARD When the device is open in 'transparent' mode, the client can mark slots in receive rings with this flag. For all marked slots, marked packets are forwarded to the other endpoint at the next system call, thus restoring (in a selective way) the connection between the NIC and the host stack. .It NS_NO_LEARN tells the forwarding code that the SRC MAC address for this packet should not be used in the learning bridge .It NS_INDIRECT indicates that the packet's payload is not in the netmap supplied buffer, but in a user-supplied buffer whose user virtual address is in the 'ptr' field of the slot. The size can reach 65535 bytes. .Em This is only supported on the transmit ring of virtual ports .It NS_MOREFRAG indicates that the packet continues with subsequent buffers; the last buffer in a packet must have the flag clear. The maximum length of a chain is 64 buffers. .Em This is only supported on virtual ports .It ns_ctr on receive rings, contains the number of remaining buffers in a packet, including this one. Slots with a value greater than 1 also have NS_MOREFRAG set. The length refers to the individual buffer, there is no field for the total length XXX maybe put it in the ptr field ? .Pp On transmit rings, if NS_DST is set, it is passed to the lookup function, which can use it e.g. as the index of the destination port instead of doing an address lookup. .El .Sh IOCTLS .Pp .Nm supports some ioctl() to synchronize the state of the rings between the kernel and the user processes, plus some to query and configure the interface. The former do not require any argument, whereas the latter use a .Pa struct netmap_req defined as follows: .Bd -literal struct nmreq { char nr_name[IFNAMSIZ]; uint32_t nr_version; /* API version */ #define NETMAP_API 4 /* current version */ uint32_t nr_offset; /* nifp offset in the shared region */ uint32_t nr_memsize; /* size of the shared region */ uint32_t nr_tx_slots; /* slots in tx rings */ uint32_t nr_rx_slots; /* slots in rx rings */ uint16_t nr_tx_rings; /* number of tx rings */ uint16_t nr_rx_rings; /* number of tx rings */ uint16_t nr_ringid; /* ring(s) we care about */ #define NETMAP_HW_RING 0x4000 /* low bits indicate one hw ring */ #define NETMAP_SW_RING 0x2000 /* we process the sw ring */ #define NETMAP_NO_TX_POLL 0x1000 /* no gratuitous txsync on poll */ #define NETMAP_RING_MASK 0xfff /* the actual ring number */ uint16_t nr_cmd; #define NETMAP_BDG_ATTACH 1 /* attach the NIC */ #define NETMAP_BDG_DETACH 2 /* detach the NIC */ #define NETMAP_BDG_LOOKUP_REG 3 /* register lookup function */ #define NETMAP_BDG_LIST 4 /* get bridge's info */ uint16_t nr_arg1; uint16_t nr_arg2; uint32_t spare2[3]; }; .Ed A device descriptor obtained through .Pa /dev/netmap also supports the ioctl supported by network devices. .Pp The netmap-specific .Xr ioctl 2 command codes below are defined in .In net/netmap.h and are: .Bl -tag -width XXXX .It Dv NIOCGINFO returns EINVAL if the named device does not support netmap. Otherwise, it returns 0 and (advisory) information about the interface. Note that all the information below can change before the interface is actually put in netmap mode. .Pp .Pa nr_memsize indicates the size of the netmap memory region. Physical devices all share the same memory region, whereas VALE ports may have independent regions for each port. These sizes can be set through system-wise sysctl variables. .Pa nr_tx_slots, nr_rx_slots indicate the size of transmit and receive rings. .Pa nr_tx_rings, nr_rx_rings indicate the number of transmit and receive rings. Both ring number and sizes may be configured at runtime using interface-specific functions (e.g. .Pa sysctl or .Pa ethtool . .It Dv NIOCREGIF puts the interface named in nr_name into netmap mode, disconnecting it from the host stack, and/or defines which rings are controlled through this file descriptor. On return, it gives the same info as NIOCGINFO, and nr_ringid indicates the identity of the rings controlled through the file descriptor. .Pp Possible values for nr_ringid are .Bl -tag -width XXXXX .It 0 default, all hardware rings .It NETMAP_SW_RING the ``host rings'' connecting to the host stack .It NETMAP_HW_RING + i the i-th hardware ring .El By default, a .Nm poll or .Nm select call pushes out any pending packets on the transmit ring, even if no write events are specified. The feature can be disabled by or-ing .Nm NETMAP_NO_TX_SYNC to nr_ringid. But normally you should keep this feature unless you are using separate file descriptors for the send and receive rings, because otherwise packets are pushed out only if NETMAP_TXSYNC is called, or the send queue is full. .Pp .Pa NIOCREGIF can be used multiple times to change the association of a file descriptor to a ring pair, always within the same device. .Pp When registering a virtual interface that is dynamically created to a .Xr vale 4 switch, we can specify the desired number of rings (1 by default, and currently up to 16) on it using nr_tx_rings and nr_rx_rings fields. .It Dv NIOCTXSYNC tells the hardware of new packets to transmit, and updates the number of slots available for transmission. .It Dv NIOCRXSYNC tells the hardware of consumed packets, and asks for newly available packets. .El .Sh SYSTEM CALLS .Nm uses .Xr select 2 and .Xr poll 2 to wake up processes when significant events occur, and .Xr mmap 2 to map memory. .Pp Applications may need to create threads and bind them to specific cores to improve performance, using standard OS primitives, see .Xr pthread 3 . In particular, .Xr pthread_setaffinity_np 3 may be of use. .Sh EXAMPLES The following code implements a traffic generator .Pp .Bd -literal -compact #include #include struct netmap_if *nifp; struct netmap_ring *ring; struct nmreq nmr; fd = open("/dev/netmap", O_RDWR); bzero(&nmr, sizeof(nmr)); strcpy(nmr.nr_name, "ix0"); nmr.nm_version = NETMAP_API; ioctl(fd, NIOCREG, &nmr); p = mmap(0, nmr.nr_memsize, fd); nifp = NETMAP_IF(p, nmr.nr_offset); ring = NETMAP_TXRING(nifp, 0); fds.fd = fd; fds.events = POLLOUT; for (;;) { poll(list, 1, -1); for ( ; ring->avail > 0 ; ring->avail--) { i = ring->cur; buf = NETMAP_BUF(ring, ring->slot[i].buf_index); ... prepare packet in buf ... ring->slot[i].len = ... packet length ... ring->cur = NETMAP_RING_NEXT(ring, i); } } .Ed .Sh SUPPORTED INTERFACES .Nm supports the following interfaces: .Xr em 4 , .Xr igb 4 , .Xr ixgbe 4 , .Xr lem 4 , .Xr re 4 .Sh SEE ALSO .Xr vale 4 .Pp http://info.iet.unipi.it/~luigi/netmap/ .Pp Luigi Rizzo, Revisiting network I/O APIs: the netmap framework, Communications of the ACM, 55 (3), pp.45-51, March 2012 .Pp Luigi Rizzo, netmap: a novel framework for fast packet I/O, Usenix ATC'12, June 2012, Boston .Sh AUTHORS .An -nosplit The .Nm framework has been originally designed and implemented at the Universita` di Pisa in 2011 by .An Luigi Rizzo , and further extended with help from .An Matteo Landi , .An Gaetano Catalli , .An Giuseppe Lettieri , .An Vincenzo Maffione . .Pp .Nm and .Nm VALE have been funded by the European Commission within FP7 Projects CHANGE (257422) and OPENLAB (287581). netmap-release/sys/dev/netmap/netmap.c000644 000765 000024 00000336357 12230530510 020554 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef __FreeBSD__ #define TEST_STUFF // test code, does not compile yet on linux #endif /* __FreeBSD__ */ /* * This module supports memory mapped access to network devices, * see netmap(4). * * The module uses a large, memory pool allocated by the kernel * and accessible as mmapped memory by multiple userspace threads/processes. * The memory pool contains packet buffers and "netmap rings", * i.e. user-accessible copies of the interface's queues. * * Access to the network card works like this: * 1. a process/thread issues one or more open() on /dev/netmap, to create * select()able file descriptor on which events are reported. * 2. on each descriptor, the process issues an ioctl() to identify * the interface that should report events to the file descriptor. * 3. on each descriptor, the process issues an mmap() request to * map the shared memory region within the process' address space. * The list of interesting queues is indicated by a location in * the shared memory region. * 4. using the functions in the netmap(4) userspace API, a process * can look up the occupation state of a queue, access memory buffers, * and retrieve received packets or enqueue packets to transmit. * 5. using some ioctl()s the process can synchronize the userspace view * of the queue with the actual status in the kernel. This includes both * receiving the notification of new packets, and transmitting new * packets on the output interface. * 6. select() or poll() can be used to wait for events on individual * transmit or receive queues (or all queues for a given interface). * SYNCHRONIZATION (USER) The netmap rings and data structures may be shared among multiple user threads or even independent processes. Any synchronization among those threads/processes is delegated to the threads themselves. Only one thread at a time can be in a system call on the same netmap ring. The OS does not enforce this and only guarantees against system crashes in case of invalid usage. LOCKING (INTERNAL) Within the kernel, access to the netmap rings is protected as follows: - a spinlock on each ring, to handle producer/consumer races on RX rings attached to the host stack (against multiple host threads writing from the host stack to the same ring), and on 'destination' rings attached to a VALE switch (i.e. RX rings in VALE ports, and TX rings in NIC/host ports) protecting multiple active senders for the same destination) - an atomic variable to guarantee that there is at most one instance of *_*xsync() on the ring at any time. For rings connected to user file descriptors, an atomic_test_and_set() protects this, and the lock on the ring is not actually used. For NIC RX rings connected to a VALE switch, an atomic_test_and_set() is also used to prevent multiple executions (the driver might indeed already guarantee this). For NIC TX rings connected to a VALE switch, the lock arbitrates access to the queue (both when allocating buffers and when pushing them out). - *xsync() should be protected against initializations of the card. On FreeBSD most devices have the reset routine protected by a RING lock (ixgbe, igb, em) or core lock (re). lem is missing the RING protection on rx_reset(), this should be added. On linux there is an external lock on the tx path, which probably also arbitrates access to the reset routine. XXX to be revised - a per-interface core_lock protecting access from the host stack while interfaces may be detached from netmap mode. XXX there should be no need for this lock if we detach the interfaces only while they are down. --- VALE SWITCH --- NMG_LOCK() serializes all modifications to switches and ports. A switch cannot be deleted until all ports are gone. For each switch, an SX lock (RWlock on linux) protects deletion of ports. When configuring or deleting a new port, the lock is acquired in exclusive mode (after holding NMG_LOCK). When forwarding, the lock is acquired in shared mode (without NMG_LOCK). The lock is held throughout the entire forwarding cycle, during which the thread may incur in a page fault. Hence it is important that sleepable shared locks are used. On the rx ring, the per-port lock is grabbed initially to reserve a number of slot in the ring, then the lock is released, packets are copied from source to destination, and then the lock is acquired again and the receive ring is updated. (A similar thing is done on the tx ring for NIC and host stack ports attached to the switch) */ /* * OS-specific code that is used only within this file. * Other OS-specific code that must be accessed by drivers * is present in netmap_kern.h */ #if defined(__FreeBSD__) #include /* prerequisite */ __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 241723 2012-10-19 09:41:45Z glebius $"); #include #include #include #include /* defines used in kernel.h */ #include #include /* types used in module initialization */ #include /* cdevsw struct */ #include /* uio struct */ #include #include /* struct socket */ #include #include /* PROT_EXEC */ #include #include #include #include /* vtophys */ #include /* vtophys */ #include #include #include #include #include #include /* sockaddrs */ #include #include #include #include /* BIOCIMMEDIATE */ #include #include /* bus_dmamap_* */ #include #include #define prefetch(x) __builtin_prefetch(x) #define BDG_RWLOCK_T struct rwlock // struct rwlock #define BDG_RWINIT(b) \ rw_init_flags(&(b)->bdg_lock, "bdg lock", RW_NOWITNESS) #define BDG_WLOCK(b) rw_wlock(&(b)->bdg_lock) #define BDG_WUNLOCK(b) rw_wunlock(&(b)->bdg_lock) #define BDG_RLOCK(b) rw_rlock(&(b)->bdg_lock) #define BDG_RTRYLOCK(b) rw_try_rlock(&(b)->bdg_lock) #define BDG_RUNLOCK(b) rw_runlock(&(b)->bdg_lock) #define BDG_RWDESTROY(b) rw_destroy(&(b)->bdg_lock) /* netmap global lock. * normally called within the user thread (upon a system call) * or when a file descriptor or process is terminated * (last close or last munmap) */ #define NMG_LOCK_T struct mtx #define NMG_LOCK_INIT() mtx_init(&netmap_global_lock, "netmap global lock", NULL, MTX_DEF) #define NMG_LOCK_DESTROY() mtx_destroy(&netmap_global_lock) #define NMG_LOCK() mtx_lock(&netmap_global_lock) #define NMG_UNLOCK() mtx_unlock(&netmap_global_lock) #define NMG_LOCK_ASSERT() mtx_assert(&netmap_global_lock, MA_OWNED) /* atomic operations */ #include #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) #elif defined(linux) #include "bsd_glue.h" static netdev_tx_t linux_netmap_start_xmit(struct sk_buff *, struct net_device *); static struct device_driver* linux_netmap_find_driver(struct device *dev) { struct device_driver *dd; while ( (dd = dev->driver) == NULL ) { if ( (dev = dev->parent) == NULL ) return NULL; } return dd; } static struct net_device* ifunit_ref(const char *name) { struct net_device *ifp = dev_get_by_name(&init_net, name); struct device_driver *dd; if (ifp == NULL) return NULL; if ( (dd = linux_netmap_find_driver(&ifp->dev)) == NULL ) goto error; if (!try_module_get(dd->owner)) goto error; return ifp; error: dev_put(ifp); return NULL; } static void if_rele(struct net_device *ifp) { struct device_driver *dd; dd = linux_netmap_find_driver(&ifp->dev); dev_put(ifp); if (dd) module_put(dd->owner); } // XXX a mtx would suffice here too 20130404 gl #define NMG_LOCK_T struct semaphore #define NMG_LOCK_INIT() sema_init(&netmap_global_lock, 1) #define NMG_LOCK_DESTROY() #define NMG_LOCK() down(&netmap_global_lock) #define NMG_UNLOCK() up(&netmap_global_lock) #define NMG_LOCK_ASSERT() // XXX to be completed #elif defined(__APPLE__) #warning OSX support is only partial #include "osx_glue.h" #else #error Unsupported platform #endif /* unsupported */ /* * common headers */ #include #include #include MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); /* * The following variables are used by the drivers and replicate * fields in the global memory pool. They only refer to buffers * used by physical interfaces. */ u_int netmap_total_buffers; u_int netmap_buf_size; char *netmap_buffer_base; /* also address of an invalid buffer */ /* user-controlled variables */ int netmap_verbose; static int netmap_no_timestamp; /* don't timestamp on rxsync */ SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); int netmap_mitigate = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, ""); int netmap_no_pendintr = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); int netmap_txsync_retry = 2; SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW, &netmap_txsync_retry, 0 , "Number of txsync loops in bridge's flush."); int netmap_drop = 0; /* debugging */ int netmap_flags = 0; /* debug flags */ int netmap_fwd = 0; /* force transparent mode */ SYSCTL_INT(_dev_netmap, OID_AUTO, drop, CTLFLAG_RW, &netmap_drop, 0 , ""); SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , ""); SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0 , ""); NMG_LOCK_T netmap_global_lock; /* * protect against multiple threads using the same ring. * also check that the ring has not been stopped. */ #define NM_KR_BUSY 1 #define NM_KR_STOPPED 2 static void nm_kr_put(struct netmap_kring *kr); static __inline int nm_kr_tryget(struct netmap_kring *kr) { /* check a first time without taking the lock * to avoid starvation for nm_kr_get() */ if (unlikely(kr->nkr_stopped)) { ND("ring %p stopped (%d)", kr, kr->nkr_stopped); return NM_KR_STOPPED; } if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))) return NM_KR_BUSY; /* check a second time with lock held */ if (unlikely(kr->nkr_stopped)) { ND("ring %p stopped (%d)", kr, kr->nkr_stopped); nm_kr_put(kr); return NM_KR_STOPPED; } return 0; } static __inline void nm_kr_put(struct netmap_kring *kr) { NM_ATOMIC_CLEAR(&kr->nr_busy); } static void nm_kr_get(struct netmap_kring *kr) { while (NM_ATOMIC_TEST_AND_SET(&kr->nr_busy)) tsleep(kr, 0, "NM_KR_GET", 4); } static void nm_disable_ring(struct netmap_kring *kr) { kr->nkr_stopped = 1; nm_kr_get(kr); mtx_lock(&kr->q_lock); mtx_unlock(&kr->q_lock); nm_kr_put(kr); } void netmap_disable_all_rings(struct ifnet *ifp) { struct netmap_adapter *na; int i; if (!(ifp->if_capenable & IFCAP_NETMAP)) return; na = NA(ifp); for (i = 0; i < na->num_tx_rings + 1; i++) { nm_disable_ring(na->tx_rings + i); selwakeuppri(&na->tx_rings[i].si, PI_NET); } for (i = 0; i < na->num_rx_rings + 1; i++) { nm_disable_ring(na->rx_rings + i); selwakeuppri(&na->rx_rings[i].si, PI_NET); } selwakeuppri(&na->tx_si, PI_NET); selwakeuppri(&na->rx_si, PI_NET); } void netmap_enable_all_rings(struct ifnet *ifp) { struct netmap_adapter *na; int i; if (!(ifp->if_capenable & IFCAP_NETMAP)) return; na = NA(ifp); for (i = 0; i < na->num_tx_rings + 1; i++) { D("enabling %p", na->tx_rings + i); na->tx_rings[i].nkr_stopped = 0; } for (i = 0; i < na->num_rx_rings + 1; i++) { D("enabling %p", na->rx_rings + i); na->rx_rings[i].nkr_stopped = 0; } } /* * generic bound_checking function */ u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg) { u_int oldv = *v; const char *op = NULL; if (dflt < lo) dflt = lo; if (dflt > hi) dflt = hi; if (oldv < lo) { *v = dflt; op = "Bump"; } else if (oldv > hi) { *v = hi; op = "Clamp"; } if (op && msg) printf("%s %s to %d (was %d)\n", op, msg, *v, oldv); return *v; } /* * packet-dump function, user-supplied or static buffer. * The destination buffer must be at least 30+4*len */ const char * nm_dump_buf(char *p, int len, int lim, char *dst) { static char _dst[8192]; int i, j, i0; static char hex[] ="0123456789abcdef"; char *o; /* output position */ #define P_HI(x) hex[((x) & 0xf0)>>4] #define P_LO(x) hex[((x) & 0xf)] #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.') if (!dst) dst = _dst; if (lim <= 0 || lim > len) lim = len; o = dst; sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim); o += strlen(o); /* hexdump routine */ for (i = 0; i < lim; ) { sprintf(o, "%5d: ", i); o += strlen(o); memset(o, ' ', 48); i0 = i; for (j=0; j < 16 && i < lim; i++, j++) { o[j*3] = P_HI(p[i]); o[j*3+1] = P_LO(p[i]); } i = i0; for (j=0; j < 16 && i < lim; i++, j++) o[j + 48] = P_C(p[i]); o[j+48] = '\n'; o += j+49; } *o = '\0'; #undef P_HI #undef P_LO #undef P_C return dst; } /* * system parameters (most of them in netmap_kern.h) * NM_NAME prefix for switch port names, default "vale" * NM_MAXPORTS number of ports * NM_BRIDGES max number of switches in the system. * XXX should become a sysctl or tunable * * Switch ports are named valeX:Y where X is the switch name and Y * is the port. If Y matches a physical interface name, the port is * connected to a physical device. * * Unlike physical interfaces, switch ports use their own memory region * for rings and buffers. * The virtual interfaces use per-queue lock instead of core lock. * In the tx loop, we aggregate traffic in batches to make all operations * faster. The batch size is bridge_batch. */ #define NM_BDG_MAXRINGS 16 /* XXX unclear how many. */ #define NM_BDG_MAXSLOTS 4096 /* XXX same as above */ #define NM_BRIDGE_RINGSIZE 1024 /* in the device */ #define NM_BDG_HASH 1024 /* forwarding table entries */ #define NM_BDG_BATCH 1024 /* entries in the forwarding buffer */ #define NM_MULTISEG 64 /* max size of a chain of bufs */ /* actual size of the tables */ #define NM_BDG_BATCH_MAX (NM_BDG_BATCH + NM_MULTISEG) /* NM_FT_NULL terminates a list of slots in the ft */ #define NM_FT_NULL NM_BDG_BATCH_MAX #define NM_BRIDGES 8 /* number of bridges */ /* * bridge_batch is set via sysctl to the max batch size to be * used in the bridge. The actual value may be larger as the * last packet in the block may overflow the size. */ int bridge_batch = NM_BDG_BATCH; /* bridge batch size */ SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0 , ""); /* * These are used to handle reference counters for bridge ports. */ #define ADD_BDG_REF(ifp) refcount_acquire(&NA(ifp)->na_bdg_refcount) #define DROP_BDG_REF(ifp) refcount_release(&NA(ifp)->na_bdg_refcount) /* The bridge references the buffers using the device specific look up table */ static inline void * BDG_NMB(struct netmap_mem_d *nmd, struct netmap_slot *slot) { struct lut_entry *lut = nmd->pools[NETMAP_BUF_POOL].lut; uint32_t i = slot->buf_idx; return (unlikely(i >= nmd->pools[NETMAP_BUF_POOL].objtotal)) ? lut[0].vaddr : lut[i].vaddr; } static void bdg_netmap_attach(struct netmap_adapter *); static int bdg_netmap_reg(struct ifnet *ifp, int onoff); int kern_netmap_regif(struct nmreq *nmr); /* * Each transmit queue accumulates a batch of packets into * a structure before forwarding. Packets to the same * destination are put in a list using ft_next as a link field. * ft_frags and ft_next are valid only on the first fragment. */ struct nm_bdg_fwd { /* forwarding entry for a bridge */ void *ft_buf; /* netmap or indirect buffer */ uint8_t ft_frags; /* how many fragments (only on 1st frag) */ uint8_t _ft_port; /* dst port (unused) */ uint16_t ft_flags; /* flags, e.g. indirect */ uint16_t ft_len; /* src fragment len */ uint16_t ft_next; /* next packet to same destination */ }; /* * For each output interface, nm_bdg_q is used to construct a list. * bq_len is the number of output buffers (we can have coalescing * during the copy). */ struct nm_bdg_q { uint16_t bq_head; uint16_t bq_tail; uint32_t bq_len; /* number of buffers */ }; /* XXX revise this */ struct nm_hash_ent { uint64_t mac; /* the top 2 bytes are the epoch */ uint64_t ports; }; /* * nm_bridge is a descriptor for a VALE switch. * Interfaces for a bridge are all in bdg_ports[]. * The array has fixed size, an empty entry does not terminate * the search, but lookups only occur on attach/detach so we * don't mind if they are slow. * * The bridge is non blocking on the transmit ports: excess * packets are dropped if there is no room on the output port. * * bdg_lock protects accesses to the bdg_ports array. * This is a rw lock (or equivalent). */ struct nm_bridge { /* XXX what is the proper alignment/layout ? */ BDG_RWLOCK_T bdg_lock; /* protects bdg_ports */ int bdg_namelen; uint32_t bdg_active_ports; /* 0 means free */ char bdg_basename[IFNAMSIZ]; /* Indexes of active ports (up to active_ports) * and all other remaining ports. */ uint8_t bdg_port_index[NM_BDG_MAXPORTS]; struct netmap_adapter *bdg_ports[NM_BDG_MAXPORTS]; /* * The function to decide the destination port. * It returns either of an index of the destination port, * NM_BDG_BROADCAST to broadcast this packet, or NM_BDG_NOPORT not to * forward this packet. ring_nr is the source ring index, and the * function may overwrite this value to forward this packet to a * different ring index. * This function must be set by netmap_bdgctl(). */ bdg_lookup_fn_t nm_bdg_lookup; /* the forwarding table, MAC+ports. * XXX should be changed to an argument to be passed to * the lookup function, and allocated on attach */ struct nm_hash_ent ht[NM_BDG_HASH]; }; /* * XXX in principle nm_bridges could be created dynamically * Right now we have a static array and deletions are protected * by an exclusive lock. */ struct nm_bridge nm_bridges[NM_BRIDGES]; /* * A few function to tell which kind of port are we using. * XXX should we hold a lock ? * * nma_is_vp() virtual port * nma_is_host() port connected to the host stack * nma_is_hw() port connected to a NIC */ int nma_is_vp(struct netmap_adapter *na); int nma_is_vp(struct netmap_adapter *na) { return na->nm_register == bdg_netmap_reg; } static __inline int nma_is_host(struct netmap_adapter *na) { return na->nm_register == NULL; } static __inline int nma_is_hw(struct netmap_adapter *na) { /* In case of sw adapter, nm_register is NULL */ return !nma_is_vp(na) && !nma_is_host(na); } /* * If the NIC is owned by the kernel * (i.e., bridge), neither another bridge nor user can use it; * if the NIC is owned by a user, only users can share it. * Evaluation must be done under NMG_LOCK(). */ #define NETMAP_OWNED_BY_KERN(ifp) (!nma_is_vp(NA(ifp)) && NA(ifp)->na_bdg) #define NETMAP_OWNED_BY_ANY(ifp) \ (NETMAP_OWNED_BY_KERN(ifp) || (NA(ifp)->refcount > 0)) /* * NA(ifp)->bdg_port port index */ /* * this is a slightly optimized copy routine which rounds * to multiple of 64 bytes and is often faster than dealing * with other odd sizes. We assume there is enough room * in the source and destination buffers. * * XXX only for multiples of 64 bytes, non overlapped. */ static inline void pkt_copy(void *_src, void *_dst, int l) { uint64_t *src = _src; uint64_t *dst = _dst; if (unlikely(l >= 1024)) { memcpy(dst, src, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } #ifdef TEST_STUFF struct xxx { char *name; void (*fn)(uint32_t); }; static void nm_test_defmtx(uint32_t n) { uint32_t i; struct mtx m; mtx_init(&m, "test", NULL, MTX_DEF); for (i = 0; i < n; i++) { mtx_lock(&m); mtx_unlock(&m); } mtx_destroy(&m); return; } static void nm_test_spinmtx(uint32_t n) { uint32_t i; struct mtx m; mtx_init(&m, "test", NULL, MTX_SPIN); for (i = 0; i < n; i++) { mtx_lock(&m); mtx_unlock(&m); } mtx_destroy(&m); return; } static void nm_test_rlock(uint32_t n) { uint32_t i; struct rwlock m; rw_init(&m, "test"); for (i = 0; i < n; i++) { rw_rlock(&m); rw_runlock(&m); } rw_destroy(&m); return; } static void nm_test_wlock(uint32_t n) { uint32_t i; struct rwlock m; rw_init(&m, "test"); for (i = 0; i < n; i++) { rw_wlock(&m); rw_wunlock(&m); } rw_destroy(&m); return; } static void nm_test_slock(uint32_t n) { uint32_t i; struct sx m; sx_init(&m, "test"); for (i = 0; i < n; i++) { sx_slock(&m); sx_sunlock(&m); } sx_destroy(&m); return; } static void nm_test_xlock(uint32_t n) { uint32_t i; struct sx m; sx_init(&m, "test"); for (i = 0; i < n; i++) { sx_xlock(&m); sx_xunlock(&m); } sx_destroy(&m); return; } struct xxx nm_tests[] = { { "defmtx", nm_test_defmtx }, { "spinmtx", nm_test_spinmtx }, { "rlock", nm_test_rlock }, { "wlock", nm_test_wlock }, { "slock", nm_test_slock }, { "xlock", nm_test_xlock }, }; static int nm_test(struct nmreq *nmr) { uint32_t scale, n, test; static int old_test = -1; test = nmr->nr_cmd; scale = nmr->nr_offset; n = sizeof(nm_tests) / sizeof(struct xxx) - 1; if (test > n) { D("test index too high, max %d", n); return 0; } if (old_test != test) { D("test %s scale %d", nm_tests[test].name, scale); old_test = test; } nm_tests[test].fn(scale); return 0; } #endif /* TEST_STUFF */ /* * locate a bridge among the existing ones. * MUST BE CALLED WITH NMG_LOCK() * * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME. * We assume that this is called with a name of at least NM_NAME chars. */ static struct nm_bridge * nm_find_bridge(const char *name, int create) { int i, l, namelen; struct nm_bridge *b = NULL; NMG_LOCK_ASSERT(); namelen = strlen(NM_NAME); /* base length */ l = name ? strlen(name) : 0; /* actual length */ if (l < namelen) { D("invalid bridge name %s", name ? name : NULL); return NULL; } for (i = namelen + 1; i < l; i++) { if (name[i] == ':') { namelen = i; break; } } if (namelen >= IFNAMSIZ) namelen = IFNAMSIZ; ND("--- prefix is '%.*s' ---", namelen, name); /* lookup the name, remember empty slot if there is one */ for (i = 0; i < NM_BRIDGES; i++) { struct nm_bridge *x = nm_bridges + i; if (x->bdg_active_ports == 0) { if (create && b == NULL) b = x; /* record empty slot */ } else if (x->bdg_namelen != namelen) { continue; } else if (strncmp(name, x->bdg_basename, namelen) == 0) { ND("found '%.*s' at %d", namelen, name, i); b = x; break; } } if (i == NM_BRIDGES && b) { /* name not found, can create entry */ /* initialize the bridge */ strncpy(b->bdg_basename, name, namelen); ND("create new bridge %s with ports %d", b->bdg_basename, b->bdg_active_ports); b->bdg_namelen = namelen; b->bdg_active_ports = 0; for (i = 0; i < NM_BDG_MAXPORTS; i++) b->bdg_port_index[i] = i; /* set the default function */ b->nm_bdg_lookup = netmap_bdg_learning; /* reset the MAC address table */ bzero(b->ht, sizeof(struct nm_hash_ent) * NM_BDG_HASH); } return b; } /* * Free the forwarding tables for rings attached to switch ports. */ static void nm_free_bdgfwd(struct netmap_adapter *na) { int nrings, i; struct netmap_kring *kring; NMG_LOCK_ASSERT(); nrings = nma_is_vp(na) ? na->num_tx_rings : na->num_rx_rings; kring = nma_is_vp(na) ? na->tx_rings : na->rx_rings; for (i = 0; i < nrings; i++) { if (kring[i].nkr_ft) { free(kring[i].nkr_ft, M_DEVBUF); kring[i].nkr_ft = NULL; /* protect from freeing twice */ } } if (nma_is_hw(na)) nm_free_bdgfwd(SWNA(na->ifp)); } /* * Allocate the forwarding tables for the rings attached to the bridge ports. */ static int nm_alloc_bdgfwd(struct netmap_adapter *na) { int nrings, l, i, num_dstq; struct netmap_kring *kring; NMG_LOCK_ASSERT(); /* all port:rings + broadcast */ num_dstq = NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1; l = sizeof(struct nm_bdg_fwd) * NM_BDG_BATCH_MAX; l += sizeof(struct nm_bdg_q) * num_dstq; l += sizeof(uint16_t) * NM_BDG_BATCH_MAX; nrings = nma_is_vp(na) ? na->num_tx_rings : na->num_rx_rings; kring = nma_is_vp(na) ? na->tx_rings : na->rx_rings; for (i = 0; i < nrings; i++) { struct nm_bdg_fwd *ft; struct nm_bdg_q *dstq; int j; ft = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO); if (!ft) { nm_free_bdgfwd(na); return ENOMEM; } dstq = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX); for (j = 0; j < num_dstq; j++) { dstq[j].bq_head = dstq[j].bq_tail = NM_FT_NULL; dstq[j].bq_len = 0; } kring[i].nkr_ft = ft; } if (nma_is_hw(na)) nm_alloc_bdgfwd(SWNA(na->ifp)); return 0; } /* * Fetch configuration from the device, to cope with dynamic * reconfigurations after loading the module. */ static int netmap_update_config(struct netmap_adapter *na) { struct ifnet *ifp = na->ifp; u_int txr, txd, rxr, rxd; txr = txd = rxr = rxd = 0; if (na->nm_config) { na->nm_config(ifp, &txr, &txd, &rxr, &rxd); } else { /* take whatever we had at init time */ txr = na->num_tx_rings; txd = na->num_tx_desc; rxr = na->num_rx_rings; rxd = na->num_rx_desc; } if (na->num_tx_rings == txr && na->num_tx_desc == txd && na->num_rx_rings == rxr && na->num_rx_desc == rxd) return 0; /* nothing changed */ if (netmap_verbose || na->refcount > 0) { D("stored config %s: txring %d x %d, rxring %d x %d", ifp->if_xname, na->num_tx_rings, na->num_tx_desc, na->num_rx_rings, na->num_rx_desc); D("new config %s: txring %d x %d, rxring %d x %d", ifp->if_xname, txr, txd, rxr, rxd); } if (na->refcount == 0) { D("configuration changed (but fine)"); na->num_tx_rings = txr; na->num_tx_desc = txd; na->num_rx_rings = rxr; na->num_rx_desc = rxd; return 0; } D("configuration changed while active, this is bad..."); return 1; } static struct netmap_if* netmap_if_new(const char *ifname, struct netmap_adapter *na) { if (netmap_update_config(na)) { /* configuration mismatch, report and fail */ return NULL; } return netmap_mem_if_new(ifname, na); } /* Structure associated to each thread which registered an interface. * * The first 4 fields of this structure are written by NIOCREGIF and * read by poll() and NIOC?XSYNC. * There is low contention among writers (actually, a correct user program * should have no contention among writers) and among writers and readers, * so we use a single global lock to protect the structure initialization. * Since initialization involves the allocation of memory, we reuse the memory * allocator lock. * Read access to the structure is lock free. Readers must check that * np_nifp is not NULL before using the other fields. * If np_nifp is NULL initialization has not been performed, so they should * return an error to userlevel. * * The ref_done field is used to regulate access to the refcount in the * memory allocator. The refcount must be incremented at most once for * each open("/dev/netmap"). The increment is performed by the first * function that calls netmap_get_memory() (currently called by * mmap(), NIOCGINFO and NIOCREGIF). * If the refcount is incremented, it is then decremented when the * private structure is destroyed. */ struct netmap_priv_d { struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ struct ifnet *np_ifp; /* device for which we hold a ref. */ int np_ringid; /* from the ioctl */ u_int np_qfirst, np_qlast; /* range of rings to scan */ uint16_t np_txpoll; struct netmap_mem_d *np_mref; /* use with NMG_LOCK held */ #ifdef __FreeBSD__ int np_refcount; /* use with NMG_LOCK held */ #endif /* __FreeBSD__ */ }; /* grab a reference to the memory allocator, if we don't have one already. The * reference is taken from the netmap_adapter registered with the priv. * * Before the introduction of private allocators for VALE ports, netmap API * allowed userspace to call mmap() and ioctl(NIOCGINFO) before ioctl(NIOCREGIF). * For compatibility with this behaviour, we grab the global allocator if * the priv doesn't point to a netmap_adapter. The allocator cannot be * changed later on, therefore applications that behave in this way cannot * register VALE ports. */ static int netmap_get_memory_locked(struct netmap_priv_d* p) { struct netmap_adapter *na; struct netmap_mem_d *nmd; int error = 0; na = p->np_ifp ? NA(p->np_ifp) : NULL; nmd = na ? na->nm_mem : &nm_mem; if (!p->np_mref) { error = netmap_mem_finalize(nmd); if (!error) p->np_mref = nmd; } else if (p->np_mref != nmd) { error = EINVAL; } return error; } static int netmap_get_memory(struct netmap_priv_d* p) { int error; NMG_LOCK(); error = netmap_get_memory_locked(p); NMG_UNLOCK(); return error; } static int netmap_have_memory_locked(struct netmap_priv_d* p) { return p->np_mref != NULL; } static void netmap_drop_memory_locked(struct netmap_priv_d* p) { if (p->np_mref) { netmap_mem_deref(p->np_mref); p->np_mref = NULL; } } /* * File descriptor's private data destructor. * * Call nm_register(ifp,0) to stop netmap mode on the interface and * revert to normal operation. We expect that np_ifp has not gone. * The second argument is the nifp to work on. In some cases it is * not attached yet to the netmap_priv_d so we need to pass it as * a separate argument. */ /* call with NMG_LOCK held */ static void netmap_do_unregif(struct netmap_priv_d *priv, struct netmap_if *nifp) { struct ifnet *ifp = priv->np_ifp; struct netmap_adapter *na = NA(ifp); NMG_LOCK_ASSERT(); na->refcount--; if (na->refcount <= 0) { /* last instance */ u_int i; if (netmap_verbose) D("deleting last instance for %s", ifp->if_xname); /* * (TO CHECK) This function is only called * when the last reference to this file descriptor goes * away. This means we cannot have any pending poll() * or interrupt routine operating on the structure. * XXX The file may be closed in a thread while * another thread is using it. * Linux keeps the file opened until the last reference * by any outstanding ioctl/poll or mmap is gone. * FreeBSD does not track mmap()s (but we do) and * wakes up any sleeping poll(). Need to check what * happens if the close() occurs while a concurrent * syscall is running. */ na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */ /* Wake up any sleeping threads. netmap_poll will * then return POLLERR * XXX The wake up now must happen during *_down(), when * we order all activities to stop. -gl */ nm_free_bdgfwd(na); for (i = 0; i < na->num_tx_rings + 1; i++) { mtx_destroy(&na->tx_rings[i].q_lock); } for (i = 0; i < na->num_rx_rings + 1; i++) { mtx_destroy(&na->rx_rings[i].q_lock); } /* XXX kqueue(9) needed; these will mirror knlist_init. */ /* knlist_destroy(&na->tx_si.si_note); */ /* knlist_destroy(&na->rx_si.si_note); */ if (nma_is_hw(na)) SWNA(ifp)->tx_rings = SWNA(ifp)->rx_rings = NULL; } /* * netmap_mem_if_delete() deletes the nifp, and if this is * the last instance also buffers, rings and krings. */ netmap_mem_if_delete(na, nifp); } /* we assume netmap adapter exists * Called with NMG_LOCK held */ static void nm_if_rele(struct ifnet *ifp) { int i, is_hw, hw, sw, lim; struct nm_bridge *b; struct netmap_adapter *na; uint8_t tmp[NM_BDG_MAXPORTS]; NMG_LOCK_ASSERT(); /* I can be called not only for get_ifp()-ed references where netmap's * capability is guaranteed, but also for non-netmap-capable NICs. */ if (!NETMAP_CAPABLE(ifp) || !NA(ifp)->na_bdg) { if_rele(ifp); return; } na = NA(ifp); b = na->na_bdg; is_hw = nma_is_hw(na); ND("%s has %d references", ifp->if_xname, NA(ifp)->na_bdg_refcount); if (!DROP_BDG_REF(ifp)) return; /* New algorithm: make a copy of bdg_port_index; lookup NA(ifp)->bdg_port and SWNA(ifp)->bdg_port in the array of bdg_port_index, replacing them with entries from the bottom of the array; decrement bdg_active_ports; acquire BDG_WLOCK() and copy back the array. */ hw = NA(ifp)->bdg_port; sw = (is_hw && SWNA(ifp)->na_bdg) ? SWNA(ifp)->bdg_port : -1; lim = b->bdg_active_ports; ND("detach %d and %d (lim %d)", hw, sw, lim); /* make a copy of the list of active ports, update it, * and then copy back within BDG_WLOCK(). */ memcpy(tmp, b->bdg_port_index, sizeof(tmp)); for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) { if (hw >= 0 && tmp[i] == hw) { ND("detach hw %d at %d", hw, i); lim--; /* point to last active port */ tmp[i] = tmp[lim]; /* swap with i */ tmp[lim] = hw; /* now this is inactive */ hw = -1; } else if (sw >= 0 && tmp[i] == sw) { ND("detach sw %d at %d", sw, i); lim--; tmp[i] = tmp[lim]; tmp[lim] = sw; sw = -1; } else { i++; } } if (hw >= 0 || sw >= 0) { D("XXX delete failed hw %d sw %d, should panic...", hw, sw); } hw = NA(ifp)->bdg_port; sw = (is_hw && SWNA(ifp)->na_bdg) ? SWNA(ifp)->bdg_port : -1; BDG_WLOCK(b); b->bdg_ports[hw] = NULL; na->na_bdg = NULL; if (sw >= 0) { b->bdg_ports[sw] = NULL; SWNA(ifp)->na_bdg = NULL; } memcpy(b->bdg_port_index, tmp, sizeof(tmp)); b->bdg_active_ports = lim; BDG_WUNLOCK(b); ND("now %d active ports", lim); if (lim == 0) { ND("marking bridge %s as free", b->bdg_basename); b->nm_bdg_lookup = NULL; } if (is_hw) { if_rele(ifp); } else { if (na->na_flags & NAF_MEM_OWNER) netmap_mem_private_delete(na->nm_mem); bzero(na, sizeof(*na)); free(na, M_DEVBUF); bzero(ifp, sizeof(*ifp)); free(ifp, M_DEVBUF); } } /* * returns 1 if this is the last instance and we can free priv */ static int netmap_dtor_locked(struct netmap_priv_d *priv) { struct ifnet *ifp = priv->np_ifp; #ifdef __FreeBSD__ /* * np_refcount is the number of active mmaps on * this file descriptor */ if (--priv->np_refcount > 0) { return 0; } #endif /* __FreeBSD__ */ if (ifp) { netmap_do_unregif(priv, priv->np_nifp); } netmap_drop_memory_locked(priv); if (ifp) { nm_if_rele(ifp); /* might also destroy *na */ } return 1; } static void netmap_dtor(void *data) { struct netmap_priv_d *priv = data; int last_instance; NMG_LOCK(); last_instance = netmap_dtor_locked(priv); NMG_UNLOCK(); if (last_instance) { bzero(priv, sizeof(*priv)); /* for safety */ free(priv, M_DEVBUF); } } #ifdef __FreeBSD__ /* * In order to track whether pages are still mapped, we hook into * the standard cdev_pager and intercept the constructor and * destructor. */ struct netmap_vm_handle_t { struct cdev *dev; struct netmap_priv_d *priv; }; static int netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { struct netmap_vm_handle_t *vmh = handle; D("handle %p size %jd prot %d foff %jd", handle, (intmax_t)size, prot, (intmax_t)foff); dev_ref(vmh->dev); return 0; } static void netmap_dev_pager_dtor(void *handle) { struct netmap_vm_handle_t *vmh = handle; struct cdev *dev = vmh->dev; struct netmap_priv_d *priv = vmh->priv; D("handle %p", handle); netmap_dtor(priv); free(vmh, M_DEVBUF); dev_rel(dev); } static int netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct netmap_vm_handle_t *vmh = object->handle; struct netmap_priv_d *priv = vmh->priv; vm_paddr_t paddr; vm_page_t page; vm_memattr_t memattr; vm_pindex_t pidx; ND("object %p offset %jd prot %d mres %p", object, (intmax_t)offset, prot, mres); memattr = object->memattr; pidx = OFF_TO_IDX(offset); paddr = netmap_mem_ofstophys(priv->np_mref, offset); if (paddr == 0) return VM_PAGER_FAIL; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake page, update it with * the new physical address. */ page = *mres; vm_page_updatefake(page, paddr, memattr); } else { /* * Replace the passed in reqpage page with our own fake page and * free up the all of the original pages. */ #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */ #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK #define VM_OBJECT_WLOCK VM_OBJECT_LOCK #endif /* VM_OBJECT_WUNLOCK */ VM_OBJECT_WUNLOCK(object); page = vm_page_getfake(paddr, memattr); VM_OBJECT_WLOCK(object); vm_page_lock(*mres); vm_page_free(*mres); vm_page_unlock(*mres); *mres = page; vm_page_insert(page, object, pidx); } page->valid = VM_PAGE_BITS_ALL; return (VM_PAGER_OK); } static struct cdev_pager_ops netmap_cdev_pager_ops = { .cdev_pg_ctor = netmap_dev_pager_ctor, .cdev_pg_dtor = netmap_dev_pager_dtor, .cdev_pg_fault = netmap_dev_pager_fault, }; static int netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff, vm_size_t objsize, vm_object_t *objp, int prot) { int error; struct netmap_vm_handle_t *vmh; struct netmap_priv_d *priv; vm_object_t obj; D("cdev %p foff %jd size %jd objp %p prot %d", cdev, (intmax_t )*foff, (intmax_t )objsize, objp, prot); vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (vmh == NULL) return ENOMEM; vmh->dev = cdev; NMG_LOCK(); error = devfs_get_cdevpriv((void**)&priv); if (error) goto err_unlock; vmh->priv = priv; priv->np_refcount++; NMG_UNLOCK(); error = netmap_get_memory(priv); if (error) goto err_deref; obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &netmap_cdev_pager_ops, objsize, prot, *foff, NULL); if (obj == NULL) { D("cdev_pager_allocate failed"); error = EINVAL; goto err_deref; } *objp = obj; return 0; err_deref: NMG_LOCK(); priv->np_refcount--; err_unlock: NMG_UNLOCK(); // err: free(vmh, M_DEVBUF); return error; } // XXX can we remove this ? static int netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { if (netmap_verbose) D("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td); return 0; } static int netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct netmap_priv_d *priv; int error; (void)dev; (void)oflags; (void)devtype; (void)td; // XXX wait or nowait ? priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, M_NOWAIT | M_ZERO); if (priv == NULL) return ENOMEM; error = devfs_set_cdevpriv(priv, netmap_dtor); if (error) return error; priv->np_refcount = 1; return 0; } #endif /* __FreeBSD__ */ /* * Handlers for synchronization of the queues from/to the host. * Netmap has two operating modes: * - in the default mode, the rings connected to the host stack are * just another ring pair managed by userspace; * - in transparent mode (XXX to be defined) incoming packets * (from the host or the NIC) are marked as NS_FORWARD upon * arrival, and the user application has a chance to reset the * flag for packets that should be dropped. * On the RXSYNC or poll(), packets in RX rings between * kring->nr_kcur and ring->cur with NS_FORWARD still set are moved * to the other side. * The transfer NIC --> host is relatively easy, just encapsulate * into mbufs and we are done. The host --> NIC side is slightly * harder because there might not be room in the tx ring so it * might take a while before releasing the buffer. */ /* * pass a chain of buffers to the host stack as coming from 'dst' */ static void netmap_send_up(struct ifnet *dst, struct mbuf *head) { struct mbuf *m; /* send packets up, outside the lock */ while ((m = head) != NULL) { head = head->m_nextpkt; m->m_nextpkt = NULL; if (netmap_verbose & NM_VERB_HOST) D("sending up pkt %p size %d", m, MBUF_LEN(m)); NM_SEND_UP(dst, m); } } struct mbq { struct mbuf *head; struct mbuf *tail; int count; }; /* * put a copy of the buffers marked NS_FORWARD into an mbuf chain. * Run from hwcur to cur - reserved */ static void netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force) { /* Take packets from hwcur to cur-reserved and pass them up. * In case of no buffers we give up. At the end of the loop, * the queue is drained in all cases. * XXX handle reserved */ u_int lim = kring->nkr_num_slots - 1; struct mbuf *m, *tail = q->tail; u_int k = kring->ring->cur, n = kring->ring->reserved; struct netmap_mem_d *nmd = kring->na->nm_mem; /* compute the final position, ring->cur - ring->reserved */ if (n > 0) { if (k < n) k += kring->nkr_num_slots; k += n; } for (n = kring->nr_hwcur; n != k;) { struct netmap_slot *slot = &kring->ring->slot[n]; n = nm_next(n, lim); if ((slot->flags & NS_FORWARD) == 0 && !force) continue; if (slot->len < 14 || slot->len > NETMAP_BDG_BUF_SIZE(nmd)) { D("bad pkt at %d len %d", n, slot->len); continue; } slot->flags &= ~NS_FORWARD; // XXX needed ? /* XXX adapt to the case of a multisegment packet */ m = m_devget(BDG_NMB(nmd, slot), slot->len, 0, kring->na->ifp, NULL); if (m == NULL) break; if (tail) tail->m_nextpkt = m; else q->head = m; tail = m; q->count++; m->m_nextpkt = NULL; } q->tail = tail; } /* * The host ring has packets from nr_hwcur to (cur - reserved) * to be sent down to the NIC. * We need to use the queue lock on the source (host RX ring) * to protect against netmap_transmit. * If the user is well behaved we do not need to acquire locks * on the destination(s), * so we only need to make sure that there are no panics because * of user errors. * XXX verify * * We scan the tx rings, which have just been * flushed so nr_hwcur == cur. Pushing packets down means * increment cur and decrement avail. * XXX to be verified */ static void netmap_sw_to_nic(struct netmap_adapter *na) { struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings]; struct netmap_kring *k1 = &na->tx_rings[0]; u_int i, howmany, src_lim, dst_lim; /* XXX we should also check that the carrier is on */ if (kring->nkr_stopped) return; mtx_lock(&kring->q_lock); if (kring->nkr_stopped) goto out; howmany = kring->nr_hwavail; /* XXX otherwise cur - reserved - nr_hwcur */ src_lim = kring->nkr_num_slots - 1; for (i = 0; howmany > 0 && i < na->num_tx_rings; i++, k1++) { ND("%d packets left to ring %d (space %d)", howmany, i, k1->nr_hwavail); dst_lim = k1->nkr_num_slots - 1; while (howmany > 0 && k1->ring->avail > 0) { struct netmap_slot *src, *dst, tmp; src = &kring->ring->slot[kring->nr_hwcur]; dst = &k1->ring->slot[k1->ring->cur]; tmp = *src; src->buf_idx = dst->buf_idx; src->flags = NS_BUF_CHANGED; dst->buf_idx = tmp.buf_idx; dst->len = tmp.len; dst->flags = NS_BUF_CHANGED; ND("out len %d buf %d from %d to %d", dst->len, dst->buf_idx, kring->nr_hwcur, k1->ring->cur); kring->nr_hwcur = nm_next(kring->nr_hwcur, src_lim); howmany--; kring->nr_hwavail--; k1->ring->cur = nm_next(k1->ring->cur, dst_lim); k1->ring->avail--; } kring->ring->cur = kring->nr_hwcur; // XXX k1++; // XXX why? } out: mtx_unlock(&kring->q_lock); } /* * netmap_txsync_to_host() passes packets up. We are called from a * system call in user process context, and the only contention * can be among multiple user threads erroneously calling * this routine concurrently. */ static void netmap_txsync_to_host(struct netmap_adapter *na) { struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings]; struct netmap_ring *ring = kring->ring; u_int k, lim = kring->nkr_num_slots - 1; struct mbq q = { NULL, NULL, 0 }; if (nm_kr_tryget(kring)) { D("ring %p busy (user error)", kring); return; } k = ring->cur; if (k > lim) { D("invalid ring index in stack TX kring %p", kring); netmap_ring_reinit(kring); nm_kr_put(kring); return; } /* Take packets from hwcur to cur and pass them up. * In case of no buffers we give up. At the end of the loop, * the queue is drained in all cases. */ netmap_grab_packets(kring, &q, 1); kring->nr_hwcur = k; kring->nr_hwavail = ring->avail = lim; nm_kr_put(kring); netmap_send_up(na->ifp, q.head); } /* * This is the 'txsync' handler to send from a software ring to the * host stack. */ /* SWNA(ifp)->txrings[0] is always NA(ifp)->txrings[NA(ifp)->num_txrings] */ static int netmap_bdg_to_host(struct ifnet *ifp, u_int ring_nr, int flags) { (void)ring_nr; (void)flags; if (netmap_verbose > 255) RD(5, "sync to host %s ring %d", ifp->if_xname, ring_nr); netmap_txsync_to_host(NA(ifp)); return 0; } /* * rxsync backend for packets coming from the host stack. * They have been put in the queue by netmap_transmit() so we * need to protect access to the kring using a lock. * * This routine also does the selrecord if called from the poll handler * (we know because td != NULL). * * NOTE: on linux, selrecord() is defined as a macro and uses pwait * as an additional hidden argument. */ static void netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait) { struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings]; struct netmap_ring *ring = kring->ring; u_int j, n, lim = kring->nkr_num_slots; u_int k = ring->cur, resvd = ring->reserved; (void)pwait; /* disable unused warnings */ if (kring->nkr_stopped) /* check a first time without lock */ return; /* XXX as an optimization we could reuse na->core_lock */ mtx_lock(&kring->q_lock); if (kring->nkr_stopped) /* check again with lock held */ goto unlock_out; if (k >= lim) { netmap_ring_reinit(kring); goto unlock_out; } /* new packets are already set in nr_hwavail */ /* skip past packets that userspace has released */ j = kring->nr_hwcur; if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim - resvd; } if (j != k) { n = k >= j ? k - j : k + lim - j; kring->nr_hwavail -= n; kring->nr_hwcur = k; } k = ring->avail = kring->nr_hwavail - resvd; if (k == 0 && td) selrecord(td, &kring->si); if (k && (netmap_verbose & NM_VERB_HOST)) D("%d pkts from stack", k); unlock_out: mtx_unlock(&kring->q_lock); } /* * MUST BE CALLED UNDER NMG_LOCK() * * get a refcounted reference to an interface. * This is always called in the execution of an ioctl(). * * Return ENXIO if the interface does not exist, EINVAL if netmap * is not supported by the interface. * If successful, hold a reference. * * When the NIC is attached to a bridge, reference is managed * at na->na_bdg_refcount using ADD/DROP_BDG_REF() as well as * virtual ports. Hence, on the final DROP_BDG_REF(), the NIC * is detached from the bridge, then ifp's refcount is dropped (this * is equivalent to that ifp is destroyed in case of virtual ports. * * This function uses if_rele() when we want to prevent the NIC from * being detached from the bridge in error handling. But once refcount * is acquired by this function, it must be released using nm_if_rele(). */ static int get_ifp(struct nmreq *nmr, struct ifnet **ifp) { const char *name = nmr->nr_name; int namelen = strlen(name); struct ifnet *iter = NULL; int no_prefix = 0; /* first try to see if this is a bridge port. */ struct nm_bridge *b; struct netmap_adapter *na; int i, j, cand = -1, cand2 = -1; int needed; NMG_LOCK_ASSERT(); *ifp = NULL; /* default */ if (strncmp(name, NM_NAME, sizeof(NM_NAME) - 1)) { no_prefix = 1; /* no VALE prefix */ goto no_bridge_port; } b = nm_find_bridge(name, 1 /* create a new one if no exist */ ); if (b == NULL) { D("no bridges available for '%s'", name); return (ENXIO); } /* Now we are sure that name starts with the bridge's name, * lookup the port in the bridge. We need to scan the entire * list. It is not important to hold a WLOCK on the bridge * during the search because NMG_LOCK already guarantees * that there are no other possible writers. */ /* lookup in the local list of ports */ for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; na = b->bdg_ports[i]; // KASSERT(na != NULL); iter = na->ifp; /* XXX make sure the name only contains one : */ if (!strcmp(iter->if_xname, name) /* virtual port */ || (namelen > b->bdg_namelen && !strcmp(iter->if_xname, name + b->bdg_namelen + 1)) /* NIC */) { ADD_BDG_REF(iter); ND("found existing if %s refs %d", name, NA(iter)->na_bdg_refcount); *ifp = iter; /* we are done, this is surely netmap capable */ return 0; } } /* not found, see if we have space to attach entries */ needed = 2; /* in some cases we only need 1 */ if (b->bdg_active_ports + needed >= NM_BDG_MAXPORTS) { D("bridge full %d, cannot create new port", b->bdg_active_ports); return EINVAL; } /* record the next two ports available, but do not allocate yet */ cand = b->bdg_port_index[b->bdg_active_ports]; cand2 = b->bdg_port_index[b->bdg_active_ports + 1]; ND("+++ bridge %s port %s used %d avail %d %d", b->bdg_basename, name, b->bdg_active_ports, cand, cand2); /* * try see if there is a matching NIC with this name * (after the bridge's name) */ iter = ifunit_ref(name + b->bdg_namelen + 1); if (!iter) { /* this is a virtual port */ /* Create a temporary NA with arguments, then * bdg_netmap_attach() will allocate the real one * and attach it to the ifp */ struct netmap_adapter tmp_na; if (nmr->nr_cmd) { /* nr_cmd must be 0 for a virtual port */ return EINVAL; } bzero(&tmp_na, sizeof(tmp_na)); /* bound checking */ tmp_na.num_tx_rings = nmr->nr_tx_rings; nm_bound_var(&tmp_na.num_tx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); nmr->nr_tx_rings = tmp_na.num_tx_rings; // write back tmp_na.num_rx_rings = nmr->nr_rx_rings; nm_bound_var(&tmp_na.num_rx_rings, 1, 1, NM_BDG_MAXRINGS, NULL); nmr->nr_rx_rings = tmp_na.num_rx_rings; // write back nm_bound_var(&nmr->nr_tx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); tmp_na.num_tx_desc = nmr->nr_tx_slots; nm_bound_var(&nmr->nr_rx_slots, NM_BRIDGE_RINGSIZE, 1, NM_BDG_MAXSLOTS, NULL); tmp_na.num_rx_desc = nmr->nr_rx_slots; /* create a struct ifnet for the new port. * need M_NOWAIT as we are under nma_lock */ iter = malloc(sizeof(*iter), M_DEVBUF, M_NOWAIT | M_ZERO); if (!iter) return ENOMEM; strcpy(iter->if_xname, name); tmp_na.ifp = iter; /* bdg_netmap_attach creates a struct netmap_adapter */ bdg_netmap_attach(&tmp_na); cand2 = -1; /* only need one port */ } else if (NETMAP_CAPABLE(iter)) { /* this is a NIC */ /* make sure the NIC is not already in use */ if (NETMAP_OWNED_BY_ANY(iter)) { D("NIC %s busy, cannot attach to bridge", iter->if_xname); if_rele(iter); /* don't detach from bridge */ return EINVAL; } if (nmr->nr_arg1 != NETMAP_BDG_HOST) cand2 = -1; /* only need one port */ } else { /* not a netmap-capable NIC */ if_rele(iter); /* don't detach from bridge */ return EINVAL; } na = NA(iter); BDG_WLOCK(b); na->bdg_port = cand; ND("NIC %p to bridge port %d", NA(iter), cand); /* bind the port to the bridge (virtual ports are not active) */ b->bdg_ports[cand] = na; na->na_bdg = b; b->bdg_active_ports++; if (cand2 >= 0) { /* also bind the host stack to the bridge */ b->bdg_ports[cand2] = SWNA(iter); SWNA(iter)->bdg_port = cand2; SWNA(iter)->na_bdg = b; b->bdg_active_ports++; ND("host %p to bridge port %d", SWNA(iter), cand2); } ADD_BDG_REF(iter); // XXX one or two ? ND("if %s refs %d", name, NA(iter)->na_bdg_refcount); BDG_WUNLOCK(b); *ifp = iter; return 0; no_bridge_port: *ifp = iter; if (! *ifp) *ifp = ifunit_ref(name); if (*ifp == NULL) return (ENXIO); if (NETMAP_CAPABLE(*ifp)) { /* Users cannot use the NIC attached to a bridge directly */ if (no_prefix && NETMAP_OWNED_BY_KERN(*ifp)) { if_rele(*ifp); /* don't detach from bridge */ return EINVAL; } else return 0; /* valid pointer, we hold the refcount */ } nm_if_rele(*ifp); return EINVAL; // not NETMAP capable } /* * Error routine called when txsync/rxsync detects an error. * Can't do much more than resetting cur = hwcur, avail = hwavail. * Return 1 on reinit. * * This routine is only called by the upper half of the kernel. * It only reads hwcur (which is changed only by the upper half, too) * and hwavail (which may be changed by the lower half, but only on * a tx ring and only to increase it, so any error will be recovered * on the next call). For the above, we don't strictly need to call * it under lock. */ int netmap_ring_reinit(struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; u_int i, lim = kring->nkr_num_slots - 1; int errors = 0; // XXX KASSERT nm_kr_tryget RD(10, "called for %s", kring->na->ifp->if_xname); if (ring->cur > lim) errors++; for (i = 0; i <= lim; i++) { u_int idx = ring->slot[i].buf_idx; u_int len = ring->slot[i].len; if (idx < 2 || idx >= netmap_total_buffers) { if (!errors++) D("bad buffer at slot %d idx %d len %d ", i, idx, len); ring->slot[i].buf_idx = 0; ring->slot[i].len = 0; } else if (len > NETMAP_BDG_BUF_SIZE(kring->na->nm_mem)) { ring->slot[i].len = 0; if (!errors++) D("bad len %d at slot %d idx %d", len, i, idx); } } if (errors) { int pos = kring - kring->na->tx_rings; int n = kring->na->num_tx_rings + 1; RD(10, "total %d errors", errors); errors++; RD(10, "%s %s[%d] reinit, cur %d -> %d avail %d -> %d", kring->na->ifp->if_xname, pos < n ? "TX" : "RX", pos < n ? pos : pos - n, ring->cur, kring->nr_hwcur, ring->avail, kring->nr_hwavail); ring->cur = kring->nr_hwcur; ring->avail = kring->nr_hwavail; } return (errors ? 1 : 0); } /* * Set the ring ID. For devices with a single queue, a request * for all rings is the same as a single ring. */ static int netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid) { struct ifnet *ifp = priv->np_ifp; struct netmap_adapter *na = NA(ifp); u_int i = ringid & NETMAP_RING_MASK; /* initially (np_qfirst == np_qlast) we don't want to lock */ u_int lim = na->num_rx_rings; if (na->num_tx_rings > lim) lim = na->num_tx_rings; if ( (ringid & NETMAP_HW_RING) && i >= lim) { D("invalid ring id %d", i); return (EINVAL); } priv->np_ringid = ringid; if (ringid & NETMAP_SW_RING) { priv->np_qfirst = NETMAP_SW_RING; priv->np_qlast = 0; } else if (ringid & NETMAP_HW_RING) { priv->np_qfirst = i; priv->np_qlast = i + 1; } else { priv->np_qfirst = 0; priv->np_qlast = NETMAP_HW_RING ; } priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1; if (netmap_verbose) { if (ringid & NETMAP_SW_RING) D("ringid %s set to SW RING", ifp->if_xname); else if (ringid & NETMAP_HW_RING) D("ringid %s set to HW RING %d", ifp->if_xname, priv->np_qfirst); else D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim); } return 0; } /* * possibly move the interface to netmap-mode. * If success it returns a pointer to netmap_if, otherwise NULL. * This must be called with NMG_LOCK held. */ static struct netmap_if * netmap_do_regif(struct netmap_priv_d *priv, struct ifnet *ifp, uint16_t ringid, int *err) { struct netmap_adapter *na = NA(ifp); struct netmap_if *nifp = NULL; int error, need_mem; NMG_LOCK_ASSERT(); /* ring configuration may have changed, fetch from the card */ netmap_update_config(na); priv->np_ifp = ifp; /* store the reference */ error = netmap_set_ringid(priv, ringid); if (error) goto out; /* ensure allocators are ready */ need_mem = !netmap_have_memory_locked(priv); if (need_mem) { error = netmap_get_memory_locked(priv); ND("get_memory returned %d", error); if (error) goto out; } nifp = netmap_if_new(ifp->if_xname, na); if (nifp == NULL) { /* allocation failed */ /* we should drop the allocator, but only * if we were the ones who grabbed it */ if (need_mem) netmap_drop_memory_locked(priv); error = ENOMEM; goto out; } na->refcount++; if (ifp->if_capenable & IFCAP_NETMAP) { /* was already set */ } else { u_int i; /* Otherwise set the card in netmap mode * and make it use the shared buffers. * * If the interface is attached to a bridge, lock it. */ if (NETMAP_OWNED_BY_KERN(ifp)) BDG_WLOCK(NA(ifp)->na_bdg); for (i = 0 ; i < na->num_tx_rings + 1; i++) mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", NULL, MTX_DEF); for (i = 0 ; i < na->num_rx_rings + 1; i++) { mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", NULL, MTX_DEF); } if (nma_is_hw(na)) { SWNA(ifp)->tx_rings = &na->tx_rings[na->num_tx_rings]; SWNA(ifp)->rx_rings = &na->rx_rings[na->num_rx_rings]; } /* * do not core lock because the race is harmless here, * there cannot be any traffic to netmap_transmit() */ error = na->nm_register(ifp, 1); /* mode on */ // XXX do we need to nm_alloc_bdgfwd() in all cases ? if (!error) error = nm_alloc_bdgfwd(na); if (error) { netmap_do_unregif(priv, nifp); nifp = NULL; } if (NETMAP_OWNED_BY_KERN(ifp)) BDG_WUNLOCK(NA(ifp)->na_bdg); } out: *err = error; if (nifp != NULL) { /* * advertise that the interface is ready bt setting ni_nifp. * The barrier is needed because readers (poll and *SYNC) * check for priv->np_nifp != NULL without locking */ wmb(); /* make sure previous writes are visible to all CPUs */ priv->np_nifp = nifp; } return nifp; } /* Process NETMAP_BDG_ATTACH and NETMAP_BDG_DETACH */ static int nm_bdg_attach(struct nmreq *nmr) { struct ifnet *ifp; struct netmap_if *nifp; struct netmap_priv_d *npriv; int error; npriv = malloc(sizeof(*npriv), M_DEVBUF, M_NOWAIT|M_ZERO); if (npriv == NULL) return ENOMEM; NMG_LOCK(); error = get_ifp(nmr, &ifp); if (error) /* no device, or another bridge or user owns the device */ goto unlock_exit; /* get_ifp() sets na_bdg if this is a physical interface * that we can attach to a switch. */ if (!NETMAP_OWNED_BY_KERN(ifp)) { /* got reference to a virtual port or direct access to a NIC. * perhaps specified no bridge prefix or wrong NIC name */ error = EINVAL; goto unref_exit; } if (NA(ifp)->refcount > 0) { /* already registered */ error = EBUSY; DROP_BDG_REF(ifp); goto unlock_exit; } nifp = netmap_do_regif(npriv, ifp, nmr->nr_ringid, &error); if (!nifp) { goto unref_exit; } NA(ifp)->na_kpriv = npriv; NMG_UNLOCK(); ND("registered %s to netmap-mode", ifp->if_xname); return 0; unref_exit: nm_if_rele(ifp); unlock_exit: NMG_UNLOCK(); bzero(npriv, sizeof(*npriv)); free(npriv, M_DEVBUF); return error; } static int nm_bdg_detach(struct nmreq *nmr) { struct ifnet *ifp; int error; int last_instance; NMG_LOCK(); error = get_ifp(nmr, &ifp); if (error) { /* no device, or another bridge or user owns the device */ goto unlock_exit; } /* XXX do we need to check this ? */ if (!NETMAP_OWNED_BY_KERN(ifp)) { /* got reference to a virtual port or direct access to a NIC. * perhaps specified no bridge's prefix or wrong NIC's name */ error = EINVAL; goto unref_exit; } if (NA(ifp)->refcount == 0) { /* not registered */ error = EINVAL; goto unref_exit; } DROP_BDG_REF(ifp); /* the one from get_ifp */ last_instance = netmap_dtor_locked(NA(ifp)->na_kpriv); /* unregister */ NMG_UNLOCK(); if (!last_instance) { D("--- error, trying to detach an entry with active mmaps"); error = EINVAL; } else { struct netmap_priv_d *npriv = NA(ifp)->na_kpriv; NA(ifp)->na_kpriv = NULL; bzero(npriv, sizeof(*npriv)); free(npriv, M_DEVBUF); } return error; unref_exit: nm_if_rele(ifp); unlock_exit: NMG_UNLOCK(); return error; } /* Initialize necessary fields of sw adapter located in right after hw's * one. sw adapter attaches a pair of sw rings of the netmap-mode NIC. * It is always activated and deactivated at the same tie with the hw's one. * Thus we don't need refcounting on the sw adapter. * Regardless of NIC's feature we use separate lock so that anybody can lock * me independently from the hw adapter. * Make sure nm_register is NULL to be handled as FALSE in nma_is_hw */ static void netmap_attach_sw(struct ifnet *ifp) { struct netmap_adapter *hw_na = NA(ifp); struct netmap_adapter *na = SWNA(ifp); na->ifp = ifp; na->num_rx_rings = na->num_tx_rings = 1; na->num_tx_desc = hw_na->num_tx_desc; na->num_rx_desc = hw_na->num_rx_desc; na->nm_txsync = netmap_bdg_to_host; /* we use the same memory allocator as the * the hw adapter */ na->nm_mem = hw_na->nm_mem; } /* exported to kernel callers, e.g. OVS ? * Entry point. * Called without NMG_LOCK. */ int netmap_bdg_ctl(struct nmreq *nmr, bdg_lookup_fn_t func) { struct nm_bridge *b; struct netmap_adapter *na; struct ifnet *iter; char *name = nmr->nr_name; int cmd = nmr->nr_cmd, namelen = strlen(name); int error = 0, i, j; switch (cmd) { case NETMAP_BDG_ATTACH: error = nm_bdg_attach(nmr); break; case NETMAP_BDG_DETACH: error = nm_bdg_detach(nmr); break; case NETMAP_BDG_LIST: /* this is used to enumerate bridges and ports */ if (namelen) { /* look up indexes of bridge and port */ if (strncmp(name, NM_NAME, strlen(NM_NAME))) { error = EINVAL; break; } NMG_LOCK(); b = nm_find_bridge(name, 0 /* don't create */); if (!b) { error = ENOENT; NMG_UNLOCK(); break; } error = ENOENT; for (j = 0; j < b->bdg_active_ports; j++) { i = b->bdg_port_index[j]; na = b->bdg_ports[i]; if (na == NULL) { D("---AAAAAAAAARGH-------"); continue; } iter = na->ifp; /* the former and the latter identify a * virtual port and a NIC, respectively */ if (!strcmp(iter->if_xname, name) || (namelen > b->bdg_namelen && !strcmp(iter->if_xname, name + b->bdg_namelen + 1))) { /* bridge index */ nmr->nr_arg1 = b - nm_bridges; nmr->nr_arg2 = i; /* port index */ error = 0; break; } } NMG_UNLOCK(); } else { /* return the first non-empty entry starting from * bridge nr_arg1 and port nr_arg2. * * Users can detect the end of the same bridge by * seeing the new and old value of nr_arg1, and can * detect the end of all the bridge by error != 0 */ i = nmr->nr_arg1; j = nmr->nr_arg2; NMG_LOCK(); for (error = ENOENT; i < NM_BRIDGES; i++) { b = nm_bridges + i; if (j >= b->bdg_active_ports) { j = 0; /* following bridges scan from 0 */ continue; } nmr->nr_arg1 = i; nmr->nr_arg2 = j; j = b->bdg_port_index[j]; na = b->bdg_ports[j]; iter = na->ifp; strncpy(name, iter->if_xname, (size_t)IFNAMSIZ); error = 0; break; } NMG_UNLOCK(); } break; case NETMAP_BDG_LOOKUP_REG: /* register a lookup function to the given bridge. * nmr->nr_name may be just bridge's name (including ':' * if it is not just NM_NAME). */ if (!func) { error = EINVAL; break; } NMG_LOCK(); b = nm_find_bridge(name, 0 /* don't create */); if (!b) { error = EINVAL; } else { b->nm_bdg_lookup = func; } NMG_UNLOCK(); break; default: D("invalid cmd (nmr->nr_cmd) (0x%x)", cmd); error = EINVAL; break; } return error; } /* * ioctl(2) support for the "netmap" device. * * Following a list of accepted commands: * - NIOCGINFO * - SIOCGIFADDR just for convenience * - NIOCREGIF * - NIOCUNREGIF * - NIOCTXSYNC * - NIOCRXSYNC * * Return 0 on success, errno otherwise. */ static int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct netmap_priv_d *priv = NULL; struct ifnet *ifp; struct nmreq *nmr = (struct nmreq *) data; struct netmap_adapter *na; int error; u_int i, lim; struct netmap_if *nifp; struct netmap_kring *krings; (void)dev; /* UNUSED */ (void)fflag; /* UNUSED */ #ifdef linux #define devfs_get_cdevpriv(pp) \ ({ *(struct netmap_priv_d **)pp = ((struct file *)td)->private_data; \ (*pp ? 0 : ENOENT); }) /* devfs_set_cdevpriv cannot fail on linux */ #define devfs_set_cdevpriv(p, fn) \ ({ ((struct file *)td)->private_data = p; (p ? 0 : EINVAL); }) #define devfs_clear_cdevpriv() do { \ netmap_dtor(priv); ((struct file *)td)->private_data = 0; \ } while (0) #endif /* linux */ CURVNET_SET(TD_TO_VNET(td)); error = devfs_get_cdevpriv((void **)&priv); if (error) { CURVNET_RESTORE(); /* XXX ENOENT should be impossible, since the priv * is now created in the open */ return (error == ENOENT ? ENXIO : error); } nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0'; /* truncate name */ switch (cmd) { case NIOCGINFO: /* return capabilities etc */ if (nmr->nr_version != NETMAP_API) { #ifdef TEST_STUFF /* some test code for locks etc */ if (nmr->nr_version == 666) { error = nm_test(nmr); break; } #endif /* TEST_STUFF */ D("API mismatch got %d have %d", nmr->nr_version, NETMAP_API); nmr->nr_version = NETMAP_API; error = EINVAL; break; } if (nmr->nr_cmd == NETMAP_BDG_LIST) { error = netmap_bdg_ctl(nmr, NULL); break; } NMG_LOCK(); do { /* update configuration */ error = netmap_get_memory_locked(priv); ND("get_memory returned %d", error); if (error) break; /* memsize is always valid */ nmr->nr_memsize = netmap_mem_get_totalsize(priv->np_mref); nmr->nr_offset = 0; nmr->nr_rx_slots = nmr->nr_tx_slots = 0; if (nmr->nr_name[0] == '\0') /* just get memory info */ break; error = get_ifp(nmr, &ifp); /* get a refcount */ if (error) break; na = NA(ifp); /* retrieve netmap_adapter */ netmap_update_config(na); nmr->nr_rx_rings = na->num_rx_rings; nmr->nr_tx_rings = na->num_tx_rings; nmr->nr_rx_slots = na->num_rx_desc; nmr->nr_tx_slots = na->num_tx_desc; nm_if_rele(ifp); /* return the refcount */ } while (0); NMG_UNLOCK(); break; case NIOCREGIF: if (nmr->nr_version != NETMAP_API) { nmr->nr_version = NETMAP_API; error = EINVAL; break; } /* possibly attach/detach NIC and VALE switch */ i = nmr->nr_cmd; if (i == NETMAP_BDG_ATTACH || i == NETMAP_BDG_DETACH) { error = netmap_bdg_ctl(nmr, NULL); break; } else if (i != 0) { D("nr_cmd must be 0 not %d", i); error = EINVAL; break; } /* protect access to priv from concurrent NIOCREGIF */ NMG_LOCK(); do { if (priv->np_ifp != NULL) { /* thread already registered */ error = netmap_set_ringid(priv, nmr->nr_ringid); break; } /* find the interface and a reference */ error = get_ifp(nmr, &ifp); /* keep reference */ if (error) break; if (NETMAP_OWNED_BY_KERN(ifp)) { nm_if_rele(ifp); error = EBUSY; break; } nifp = netmap_do_regif(priv, ifp, nmr->nr_ringid, &error); if (!nifp) { /* reg. failed, release priv and ref */ nm_if_rele(ifp); /* return the refcount */ priv->np_ifp = NULL; priv->np_nifp = NULL; break; } /* return the offset of the netmap_if object */ na = NA(ifp); /* retrieve netmap adapter */ nmr->nr_rx_rings = na->num_rx_rings; nmr->nr_tx_rings = na->num_tx_rings; nmr->nr_rx_slots = na->num_rx_desc; nmr->nr_tx_slots = na->num_tx_desc; nmr->nr_memsize = netmap_mem_get_totalsize(na->nm_mem); nmr->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); } while (0); NMG_UNLOCK(); break; case NIOCUNREGIF: // XXX we have no data here ? D("deprecated, data is %p", nmr); error = EINVAL; break; case NIOCTXSYNC: case NIOCRXSYNC: nifp = priv->np_nifp; if (nifp == NULL) { error = ENXIO; break; } rmb(); /* make sure following reads are not from cache */ ifp = priv->np_ifp; /* we have a reference */ if (ifp == NULL) { D("Internal error: nifp != NULL && ifp == NULL"); error = ENXIO; break; } na = NA(ifp); /* retrieve netmap adapter */ if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */ if (cmd == NIOCTXSYNC) netmap_txsync_to_host(na); else netmap_rxsync_from_host(na, NULL, NULL); break; } /* find the last ring to scan */ lim = priv->np_qlast; if (lim == NETMAP_HW_RING) lim = (cmd == NIOCTXSYNC) ? na->num_tx_rings : na->num_rx_rings; krings = (cmd == NIOCTXSYNC) ? na->tx_rings : na->rx_rings; for (i = priv->np_qfirst; i < lim; i++) { struct netmap_kring *kring = krings + i; if (nm_kr_tryget(kring)) { error = EBUSY; goto out; } if (cmd == NIOCTXSYNC) { if (netmap_verbose & NM_VERB_TXSYNC) D("pre txsync ring %d cur %d hwcur %d", i, kring->ring->cur, kring->nr_hwcur); na->nm_txsync(ifp, i, NAF_FORCE_RECLAIM); if (netmap_verbose & NM_VERB_TXSYNC) D("post txsync ring %d cur %d hwcur %d", i, kring->ring->cur, kring->nr_hwcur); } else { na->nm_rxsync(ifp, i, NAF_FORCE_READ); microtime(&na->rx_rings[i].ring->ts); } nm_kr_put(kring); } break; #ifdef __FreeBSD__ case BIOCIMMEDIATE: case BIOCGHDRCMPLT: case BIOCSHDRCMPLT: case BIOCSSEESENT: D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); break; default: /* allow device-specific ioctls */ { struct socket so; bzero(&so, sizeof(so)); NMG_LOCK(); error = get_ifp(nmr, &ifp); /* keep reference */ if (error) { NMG_UNLOCK(); break; } so.so_vnet = ifp->if_vnet; // so->so_proto not null. error = ifioctl(&so, cmd, data, td); nm_if_rele(ifp); NMG_UNLOCK(); break; } #else /* linux */ default: error = EOPNOTSUPP; #endif /* linux */ } out: CURVNET_RESTORE(); return (error); } /* * select(2) and poll(2) handlers for the "netmap" device. * * Can be called for one or more queues. * Return true the event mask corresponding to ready events. * If there are no ready events, do a selrecord on either individual * selinfo or on the global one. * Device-dependent parts (locking and sync of tx/rx rings) * are done through callbacks. * * On linux, arguments are really pwait, the poll table, and 'td' is struct file * * The first one is remapped to pwait as selrecord() uses the name as an * hidden argument. */ static int netmap_poll(struct cdev *dev, int events, struct thread *td) { struct netmap_priv_d *priv = NULL; struct netmap_adapter *na; struct ifnet *ifp; struct netmap_kring *kring; u_int i, check_all, want_tx, want_rx, revents = 0; u_int lim_tx, lim_rx, host_forwarded = 0; struct mbq q = { NULL, NULL, 0 }; void *pwait = dev; /* linux compatibility */ int retry_tx = 1; (void)pwait; if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) return POLLERR; if (priv->np_nifp == NULL) { D("No if registered"); return POLLERR; } rmb(); /* make sure following reads are not from cache */ ifp = priv->np_ifp; // XXX check for deleting() ? if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) return POLLERR; if (netmap_verbose & 0x8000) D("device %s events 0x%x", ifp->if_xname, events); want_tx = events & (POLLOUT | POLLWRNORM); want_rx = events & (POLLIN | POLLRDNORM); na = NA(ifp); /* retrieve netmap adapter */ lim_tx = na->num_tx_rings; lim_rx = na->num_rx_rings; if (priv->np_qfirst == NETMAP_SW_RING) { /* handle the host stack ring */ if (priv->np_txpoll || want_tx) { /* push any packets up, then we are always ready */ netmap_txsync_to_host(na); revents |= want_tx; } if (want_rx) { kring = &na->rx_rings[lim_rx]; if (kring->ring->avail == 0) netmap_rxsync_from_host(na, td, dev); if (kring->ring->avail > 0) { revents |= want_rx; } } return (revents); } /* if we are in transparent mode, check also the host rx ring */ kring = &na->rx_rings[lim_rx]; if ( (priv->np_qlast == NETMAP_HW_RING) // XXX check_all && want_rx && (netmap_fwd || kring->ring->flags & NR_FORWARD) ) { if (kring->ring->avail == 0) netmap_rxsync_from_host(na, td, dev); if (kring->ring->avail > 0) revents |= want_rx; } /* * check_all is set if the card has more than one queue AND * the client is polling all of them. If true, we sleep on * the "global" selinfo, otherwise we sleep on individual selinfo * (FreeBSD only allows two selinfo's per file descriptor). * The interrupt routine in the driver wake one or the other * (or both) depending on which clients are active. * * rxsync() is only called if we run out of buffers on a POLLIN. * txsync() is called if we run out of buffers on POLLOUT, or * there are pending packets to send. The latter can be disabled * passing NETMAP_NO_TX_POLL in the NIOCREG call. */ check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1); if (priv->np_qlast != NETMAP_HW_RING) { lim_tx = lim_rx = priv->np_qlast; } /* * We start with a lock free round which is good if we have * data available. If this fails, then lock and call the sync * routines. */ for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) { kring = &na->rx_rings[i]; if (kring->ring->avail > 0) { revents |= want_rx; want_rx = 0; /* also breaks the loop */ } } for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) { kring = &na->tx_rings[i]; if (kring->ring->avail > 0) { revents |= want_tx; want_tx = 0; /* also breaks the loop */ } } /* * If we to push packets out (priv->np_txpoll) or want_tx is * still set, we do need to run the txsync calls (on all rings, * to avoid that the tx rings stall). */ if (priv->np_txpoll || want_tx) { /* If we really want to be woken up (want_tx), * do a selrecord, either on the global or on * the private structure. Then issue the txsync * so there is no race in the selrecord/selwait */ flush_tx: for (i = priv->np_qfirst; i < lim_tx; i++) { kring = &na->tx_rings[i]; /* * Skip this ring if want_tx == 0 * (we have already done a successful sync on * a previous ring) AND kring->cur == kring->hwcur * (there are no pending transmissions for this ring). */ if (!want_tx && kring->ring->cur == kring->nr_hwcur) continue; /* make sure only one user thread is doing this */ if (nm_kr_tryget(kring)) { ND("ring %p busy is %d", kring, (int)kring->nr_busy); revents |= POLLERR; goto out; } if (netmap_verbose & NM_VERB_TXSYNC) D("send %d on %s %d", kring->ring->cur, ifp->if_xname, i); if (na->nm_txsync(ifp, i, 0)) revents |= POLLERR; /* Check avail/call selrecord only if called with POLLOUT */ if (want_tx) { if (kring->ring->avail > 0) { /* stop at the first ring. We don't risk * starvation. */ revents |= want_tx; want_tx = 0; } } nm_kr_put(kring); } if (want_tx && retry_tx) { selrecord(td, check_all ? &na->tx_si : &na->tx_rings[priv->np_qfirst].si); retry_tx = 0; goto flush_tx; } } /* * now if want_rx is still set we need to lock and rxsync. * Do it on all rings because otherwise we starve. */ if (want_rx) { int retry_rx = 1; do_retry_rx: for (i = priv->np_qfirst; i < lim_rx; i++) { kring = &na->rx_rings[i]; if (nm_kr_tryget(kring)) { revents |= POLLERR; goto out; } /* XXX NR_FORWARD should only be read on * physical or NIC ports */ if (netmap_fwd ||kring->ring->flags & NR_FORWARD) { ND(10, "forwarding some buffers up %d to %d", kring->nr_hwcur, kring->ring->cur); netmap_grab_packets(kring, &q, netmap_fwd); } if (na->nm_rxsync(ifp, i, 0)) revents |= POLLERR; if (netmap_no_timestamp == 0 || kring->ring->flags & NR_TIMESTAMP) { microtime(&kring->ring->ts); } if (kring->ring->avail > 0) { revents |= want_rx; retry_rx = 0; } nm_kr_put(kring); } if (retry_rx) { retry_rx = 0; selrecord(td, check_all ? &na->rx_si : &na->rx_rings[priv->np_qfirst].si); goto do_retry_rx; } } /* forward host to the netmap ring. * I am accessing nr_hwavail without lock, but netmap_transmit * can only increment it, so the operation is safe. */ kring = &na->rx_rings[lim_rx]; if ( (priv->np_qlast == NETMAP_HW_RING) // XXX check_all && (netmap_fwd || kring->ring->flags & NR_FORWARD) && kring->nr_hwavail > 0 && !host_forwarded) { netmap_sw_to_nic(na); host_forwarded = 1; /* prevent another pass */ want_rx = 0; goto flush_tx; } if (q.head) netmap_send_up(na->ifp, q.head); out: return (revents); } /*------- driver support routines ------*/ /* * Initialize a ``netmap_adapter`` object created by driver on attach. * We allocate a block of memory with room for a struct netmap_adapter * plus two sets of N+2 struct netmap_kring (where N is the number * of hardware rings): * krings 0..N-1 are for the hardware queues. * kring N is for the host stack queue * kring N+1 is only used for the selinfo for all queues. * Return 0 on success, ENOMEM otherwise. * * By default the receive and transmit adapter ring counts are both initialized * to num_queues. na->num_tx_rings can be set for cards with different tx/rx * setups. */ int netmap_attach(struct netmap_adapter *arg, u_int num_queues) { struct netmap_adapter *na = NULL; struct ifnet *ifp = arg ? arg->ifp : NULL; size_t len; if (arg == NULL || ifp == NULL) goto fail; /* a VALE port uses two endpoints */ len = nma_is_vp(arg) ? sizeof(*na) : sizeof(*na) * 2; na = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (na == NULL) goto fail; WNA(ifp) = na; *na = *arg; /* copy everything, trust the driver to not pass junk */ NETMAP_SET_CAPABLE(ifp); if (na->num_tx_rings == 0) na->num_tx_rings = num_queues; na->num_rx_rings = num_queues; na->refcount = na->na_single = na->na_multi = 0; /* Core lock initialized here, others after netmap_if_new. */ mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK, MTX_DEF); #ifdef linux if (ifp->netdev_ops) { ND("netdev_ops %p", ifp->netdev_ops); /* prepare a clone of the netdev ops */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) na->nm_ndo.ndo_start_xmit = ifp->netdev_ops; #else na->nm_ndo = *ifp->netdev_ops; #endif } na->nm_ndo.ndo_start_xmit = linux_netmap_start_xmit; #endif /* linux */ na->nm_mem = arg->nm_mem ? arg->nm_mem : &nm_mem; if (!nma_is_vp(arg)) netmap_attach_sw(ifp); D("success for %s", ifp->if_xname); return 0; fail: D("fail, arg %p ifp %p na %p", arg, ifp, na); netmap_detach(ifp); return (na ? EINVAL : ENOMEM); } /* * Free the allocated memory linked to the given ``netmap_adapter`` * object. */ void netmap_detach(struct ifnet *ifp) { struct netmap_adapter *na = NA(ifp); if (!na) return; mtx_destroy(&na->core_lock); if (na->tx_rings) { /* XXX should not happen */ D("freeing leftover tx_rings"); free(na->tx_rings, M_DEVBUF); } if (na->na_flags & NAF_MEM_OWNER) netmap_mem_private_delete(na->nm_mem); bzero(na, sizeof(*na)); WNA(ifp) = NULL; free(na, M_DEVBUF); } int nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_adapter *na, u_int ring_nr); /* * Intercept packets from the network stack and pass them * to netmap as incoming packets on the 'software' ring. * We rely on the OS to make sure that the ifp and na do not go * away (typically the caller checks for IFF_DRV_RUNNING or the like). * In nm_register() or whenever there is a reinitialization, * we make sure to access the core lock and per-ring locks * so that IFCAP_NETMAP is visible here. */ int netmap_transmit(struct ifnet *ifp, struct mbuf *m) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring; u_int i, len = MBUF_LEN(m); u_int error = EBUSY, lim; struct netmap_slot *slot; // XXX [Linux] we do not need this lock // if we follow the down/configure/up protocol -gl // mtx_lock(&na->core_lock); if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) { /* interface not in netmap mode anymore */ error = ENXIO; goto done; } kring = &na->rx_rings[na->num_rx_rings]; lim = kring->nkr_num_slots - 1; if (netmap_verbose & NM_VERB_HOST) D("%s packet %d len %d from the stack", ifp->if_xname, kring->nr_hwcur + kring->nr_hwavail, len); // XXX reconsider long packets if we handle fragments if (len > NETMAP_BDG_BUF_SIZE(na->nm_mem)) { /* too long for us */ D("%s from_host, drop packet size %d > %d", ifp->if_xname, len, NETMAP_BDG_BUF_SIZE(na->nm_mem)); goto done; } if (SWNA(ifp)->na_bdg) { struct nm_bdg_fwd *ft; char *dst; na = SWNA(ifp); /* we operate on the host port */ ft = na->rx_rings[0].nkr_ft; dst = BDG_NMB(na->nm_mem, &na->rx_rings[0].ring->slot[0]); /* use slot 0 in the ft, there is nothing queued here */ /* XXX we can save the copy calling m_copydata in nm_bdg_flush, * need a special flag for this. */ m_copydata(m, 0, (int)len, dst); ft->ft_flags = 0; ft->ft_len = len; ft->ft_buf = dst; ft->ft_next = NM_FT_NULL; ft->ft_frags = 1; if (netmap_verbose & NM_VERB_HOST) RD(5, "pkt %p size %d to bridge port %d", dst, len, na->bdg_port); nm_bdg_flush(ft, 1, na, 0); na = NA(ifp); /* back to the regular object/lock */ error = 0; goto done; } /* protect against other instances of netmap_transmit, * and userspace invocations of rxsync(). * XXX could reuse core_lock */ // XXX [Linux] there can be no other instances of netmap_transmit // on this same ring, but we still need this lock to protect // concurrent access from netmap_sw_to_nic() -gl mtx_lock(&kring->q_lock); if (kring->nr_hwavail >= lim) { if (netmap_verbose) D("stack ring %s full\n", ifp->if_xname); } else { /* compute the insert position */ i = nm_kr_rxpos(kring); slot = &kring->ring->slot[i]; m_copydata(m, 0, (int)len, BDG_NMB(na->nm_mem, slot)); slot->len = len; slot->flags = kring->nkr_slot_flags; kring->nr_hwavail++; if (netmap_verbose & NM_VERB_HOST) D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings); selwakeuppri(&kring->si, PI_NET); error = 0; } mtx_unlock(&kring->q_lock); done: // mtx_unlock(&na->core_lock); /* release the mbuf in either cases of success or failure. As an * alternative, put the mbuf in a free list and free the list * only when really necessary. */ m_freem(m); return (error); } /* * netmap_reset() is called by the driver routines when reinitializing * a ring. The driver is in charge of locking to protect the kring. * If netmap mode is not set just return NULL. */ struct netmap_slot * netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur) { struct netmap_kring *kring; int new_hwofs, lim; if (na == NULL) { D("NULL na, should not happen"); return NULL; /* no netmap support here */ } if (!(na->ifp->if_capenable & IFCAP_NETMAP)) { D("interface not in netmap mode"); return NULL; /* nothing to reinitialize */ } /* XXX note- in the new scheme, we are not guaranteed to be * under lock (e.g. when called on a device reset). * In this case, we should set a flag and do not trust too * much the values. In practice: TODO * - set a RESET flag somewhere in the kring * - do the processing in a conservative way * - let the *sync() fixup at the end. */ if (tx == NR_TX) { if (n >= na->num_tx_rings) return NULL; kring = na->tx_rings + n; new_hwofs = kring->nr_hwcur - new_cur; } else { if (n >= na->num_rx_rings) return NULL; kring = na->rx_rings + n; new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; } lim = kring->nkr_num_slots - 1; if (new_hwofs > lim) new_hwofs -= lim + 1; /* Always set the new offset value and realign the ring. */ D("%s hwofs %d -> %d, hwavail %d -> %d", tx == NR_TX ? "TX" : "RX", kring->nkr_hwofs, new_hwofs, kring->nr_hwavail, tx == NR_TX ? lim : kring->nr_hwavail); kring->nkr_hwofs = new_hwofs; if (tx == NR_TX) kring->nr_hwavail = lim; #if 0 // def linux /* XXX check that the mappings are correct */ /* need ring_nr, adapter->pdev, direction */ buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { D("error mapping rx netmap buffer %d", i); // XXX fix error handling } #endif /* linux */ /* * Wakeup on the individual and global selwait * We do the wakeup here, but the ring is not yet reconfigured. * However, we are under lock so there are no races. */ selwakeuppri(&kring->si, PI_NET); selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET); return kring->ring->slot; } /* * Grab packets from a kring, move them into the ft structure * associated to the tx (input) port. Max one instance per port, * filtered on input (ioctl, poll or XXX). * Returns the next position in the ring. */ static int nm_bdg_preflush(struct netmap_adapter *na, u_int ring_nr, struct netmap_kring *kring, u_int end) { struct netmap_ring *ring = kring->ring; struct nm_bdg_fwd *ft; u_int j = kring->nr_hwcur, lim = kring->nkr_num_slots - 1; u_int ft_i = 0; /* start from 0 */ u_int frags = 1; /* how many frags ? */ struct nm_bridge *b = na->na_bdg; /* To protect against modifications to the bridge we acquire a * shared lock, waiting if we can sleep (if the source port is * attached to a user process) or with a trylock otherwise (NICs). */ ND("wait rlock for %d packets", n); if (na->na_flags & NAF_BDG_MAYSLEEP) BDG_RLOCK(b); else if (!BDG_RTRYLOCK(b)) return 0; ND(5, "rlock acquired for %d packets", n); ft = kring->nkr_ft; for (; likely(j != end); j = nm_next(j, lim)) { struct netmap_slot *slot = &ring->slot[j]; char *buf; ft[ft_i].ft_len = slot->len; ft[ft_i].ft_flags = slot->flags; ND("flags is 0x%x", slot->flags); /* this slot goes into a list so initialize the link field */ ft[ft_i].ft_next = NM_FT_NULL; buf = ft[ft_i].ft_buf = (slot->flags & NS_INDIRECT) ? (void *)slot->ptr : BDG_NMB(na->nm_mem, slot); prefetch(buf); ++ft_i; if (slot->flags & NS_MOREFRAG) { frags++; continue; } if (netmap_verbose && frags > 1) RD(5, "%d frags at %d", frags, ft_i - frags); ft[ft_i - frags].ft_frags = frags; frags = 1; if (unlikely((int)ft_i >= bridge_batch)) ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr); } if (frags > 1) { D("truncate incomplete fragment at %d (%d frags)", ft_i, frags); // ft_i > 0, ft[ft_i-1].flags has NS_MOREFRAG ft[ft_i - 1].ft_frags &= ~NS_MOREFRAG; ft[ft_i - frags].ft_frags = frags - 1; } if (ft_i) ft_i = nm_bdg_flush(ft, ft_i, na, ring_nr); BDG_RUNLOCK(b); return j; } /* * Pass packets from nic to the bridge. * XXX TODO check locking: this is called from the interrupt * handler so we should make sure that the interface is not * disconnected while passing down an interrupt. * * Note, no user process can access this NIC so we can ignore * the info in the 'ring'. */ static void netmap_nic_to_bdg(struct ifnet *ifp, u_int ring_nr) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k; /* make sure that only one thread is ever in here, * after which we can unlock. Probably unnecessary XXX. */ if (nm_kr_tryget(kring)) return; /* fetch packets that have arrived. * XXX maybe do this in a loop ? */ if (na->nm_rxsync(ifp, ring_nr, 0)) goto put_out; if (kring->nr_hwavail == 0 && netmap_verbose) { D("how strange, interrupt with no packets on %s", ifp->if_xname); goto put_out; } k = nm_kr_rxpos(kring); j = nm_bdg_preflush(na, ring_nr, kring, k); /* we consume everything, but we cannot update kring directly * because the nic may have destroyed the info in the NIC ring. * So we need to call rxsync again to restore it. */ ring->cur = j; ring->avail = 0; na->nm_rxsync(ifp, ring_nr, 0); put_out: nm_kr_put(kring); return; } /* * Default functions to handle rx/tx interrupts from a physical device. * "work_done" is non-null on the RX path, NULL for the TX path. * We rely on the OS to make sure that there is only one active * instance per queue, and that there is appropriate locking. * * If the card is not in netmap mode, simply return 0, * so that the caller proceeds with regular processing. * * If the card is connected to a netmap file descriptor, * do a selwakeup on the individual queue, plus one on the global one * if needed (multiqueue card _and_ there are multiqueue listeners), * and return 1. * * Finally, if called on rx from an interface connected to a switch, * calls the proper forwarding routine, and return 1. */ int netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done) { struct netmap_adapter *na; struct netmap_kring *kring; if (!(ifp->if_capenable & IFCAP_NETMAP)) return 0; q &= NETMAP_RING_MASK; if (netmap_verbose) RD(5, "received %s queue %d", work_done ? "RX" : "TX" , q); na = NA(ifp); if (na->na_flags & NAF_SKIP_INTR) { ND("use regular interrupt"); return 0; } if (work_done) { /* RX path */ if (q >= na->num_rx_rings) return 0; // not a physical queue kring = na->rx_rings + q; kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ? if (na->na_bdg != NULL) { netmap_nic_to_bdg(ifp, q); } else { selwakeuppri(&kring->si, PI_NET); if (na->num_rx_rings > 1 /* or multiple listeners */ ) selwakeuppri(&na->rx_si, PI_NET); } *work_done = 1; /* do not fire napi again */ } else { /* TX path */ if (q >= na->num_tx_rings) return 0; // not a physical queue kring = na->tx_rings + q; selwakeuppri(&kring->si, PI_NET); if (na->num_tx_rings > 1 /* or multiple listeners */ ) selwakeuppri(&na->tx_si, PI_NET); } return 1; } #ifdef linux /* linux-specific routines */ /* * Remap linux arguments into the FreeBSD call. * - pwait is the poll table, passed as 'dev'; * If pwait == NULL someone else already woke up before. We can report * events but they are filtered upstream. * If pwait != NULL, then pwait->key contains the list of events. * - events is computed from pwait as above. * - file is passed as 'td'; */ static u_int linux_netmap_poll(struct file * file, struct poll_table_struct *pwait) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) int events = POLLIN | POLLOUT; /* XXX maybe... */ #elif LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) int events = pwait ? pwait->key : POLLIN | POLLOUT; #else /* in 3.4.0 field 'key' was renamed to '_key' */ int events = pwait ? pwait->_key : POLLIN | POLLOUT; #endif return netmap_poll((void *)pwait, events, (void *)file); } static int linux_netmap_mmap(struct file *f, struct vm_area_struct *vma) { int error = 0; unsigned long off, va; vm_ooffset_t pa; struct netmap_priv_d *priv = f->private_data; /* * vma->vm_start: start of mapping user address space * vma->vm_end: end of the mapping user address space * vma->vm_pfoff: offset of first page in the device */ // XXX security checks error = netmap_get_memory(priv); ND("get_memory returned %d", error); if (error) return -error; if ((vma->vm_start & ~PAGE_MASK) || (vma->vm_end & ~PAGE_MASK)) { ND("vm_start = %lx vm_end = %lx", vma->vm_start, vma->vm_end); return -EINVAL; } for (va = vma->vm_start, off = vma->vm_pgoff; va < vma->vm_end; va += PAGE_SIZE, off++) { pa = netmap_mem_ofstophys(priv->np_mref, off << PAGE_SHIFT); if (pa == 0) return -EINVAL; ND("va %lx pa %p", va, pa); error = remap_pfn_range(vma, va, pa >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot); if (error) return error; } return 0; } /* * This one is probably already protected by the netif lock XXX */ static netdev_tx_t linux_netmap_start_xmit(struct sk_buff *skb, struct net_device *dev) { netmap_transmit(dev, skb); return (NETDEV_TX_OK); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) // XXX was 37 #define LIN_IOCTL_NAME .ioctl int linux_netmap_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long data /* arg */) #else #define LIN_IOCTL_NAME .unlocked_ioctl long linux_netmap_ioctl(struct file *file, u_int cmd, u_long data /* arg */) #endif { int ret; struct nmreq nmr; bzero(&nmr, sizeof(nmr)); if (data && copy_from_user(&nmr, (void *)data, sizeof(nmr) ) != 0) return -EFAULT; ret = netmap_ioctl(NULL, cmd, (caddr_t)&nmr, 0, (void *)file); if (data && copy_to_user((void*)data, &nmr, sizeof(nmr) ) != 0) return -EFAULT; return -ret; } static int netmap_release(struct inode *inode, struct file *file) { (void)inode; /* UNUSED */ if (file->private_data) netmap_dtor(file->private_data); return (0); } static int linux_netmap_open(struct inode *inode, struct file *file) { struct netmap_priv_d *priv; (void)inode; /* UNUSED */ priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, M_NOWAIT | M_ZERO); if (priv == NULL) return -ENOMEM; file->private_data = priv; return (0); } static struct file_operations netmap_fops = { .owner = THIS_MODULE, .open = linux_netmap_open, .mmap = linux_netmap_mmap, LIN_IOCTL_NAME = linux_netmap_ioctl, .poll = linux_netmap_poll, .release = netmap_release, }; static struct miscdevice netmap_cdevsw = { /* same name as FreeBSD */ MISC_DYNAMIC_MINOR, "netmap", &netmap_fops, }; static int netmap_init(void); static void netmap_fini(void); /* Errors have negative values on linux */ static int linux_netmap_init(void) { return -netmap_init(); } module_init(linux_netmap_init); module_exit(netmap_fini); /* export certain symbols to other modules */ EXPORT_SYMBOL(netmap_attach); // driver attach routines EXPORT_SYMBOL(netmap_detach); // driver detach routines EXPORT_SYMBOL(netmap_ring_reinit); // ring init on error EXPORT_SYMBOL(netmap_buffer_lut); EXPORT_SYMBOL(netmap_total_buffers); // index check EXPORT_SYMBOL(netmap_buffer_base); EXPORT_SYMBOL(netmap_reset); // ring init routines EXPORT_SYMBOL(netmap_buf_size); EXPORT_SYMBOL(netmap_rx_irq); // default irq handler EXPORT_SYMBOL(netmap_no_pendintr); // XXX mitigation - should go away EXPORT_SYMBOL(netmap_bdg_ctl); // bridge configuration routine EXPORT_SYMBOL(netmap_bdg_learning); // the default lookup function EXPORT_SYMBOL(netmap_disable_all_rings); EXPORT_SYMBOL(netmap_enable_all_rings); MODULE_AUTHOR("http://info.iet.unipi.it/~luigi/netmap/"); MODULE_DESCRIPTION("The netmap packet I/O framework"); MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */ #else /* __FreeBSD__ */ static struct cdevsw netmap_cdevsw = { .d_version = D_VERSION, .d_name = "netmap", .d_open = netmap_open, .d_mmap_single = netmap_mmap_single, .d_ioctl = netmap_ioctl, .d_poll = netmap_poll, .d_close = netmap_close, }; #endif /* __FreeBSD__ */ /* *---- support for virtual bridge ----- */ /* ----- FreeBSD if_bridge hash function ------- */ /* * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). * * http://www.burtleburtle.net/bob/hash/spooky.html */ #define mix(a, b, c) \ do { \ a -= b; a -= c; a ^= (c >> 13); \ b -= c; b -= a; b ^= (a << 8); \ c -= a; c -= b; c ^= (b >> 13); \ a -= b; a -= c; a ^= (c >> 12); \ b -= c; b -= a; b ^= (a << 16); \ c -= a; c -= b; c ^= (b >> 5); \ a -= b; a -= c; a ^= (c >> 3); \ b -= c; b -= a; b ^= (a << 10); \ c -= a; c -= b; c ^= (b >> 15); \ } while (/*CONSTCOND*/0) static __inline uint32_t nm_bridge_rthash(const uint8_t *addr) { uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key b += addr[5] << 8; b += addr[4]; a += addr[3] << 24; a += addr[2] << 16; a += addr[1] << 8; a += addr[0]; mix(a, b, c); #define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1) return (c & BRIDGE_RTHASH_MASK); } #undef mix static int bdg_netmap_reg(struct ifnet *ifp, int onoff) { /* the interface is already attached to the bridge, * so we only need to toggle IFCAP_NETMAP. */ if (onoff) { ifp->if_capenable |= IFCAP_NETMAP; } else { ifp->if_capenable &= ~IFCAP_NETMAP; } return 0; } /* * Lookup function for a learning bridge. * Update the hash table with the source address, * and then returns the destination port index, and the * ring in *dst_ring (at the moment, always use ring 0) */ u_int netmap_bdg_learning(char *buf, u_int buf_len, uint8_t *dst_ring, struct netmap_adapter *na) { struct nm_hash_ent *ht = na->na_bdg->ht; uint32_t sh, dh; u_int dst, mysrc = na->bdg_port; uint64_t smac, dmac; if (buf_len < 14) { D("invalid buf length %d", buf_len); return NM_BDG_NOPORT; } dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff; smac = le64toh(*(uint64_t *)(buf + 4)); smac >>= 16; /* * The hash is somewhat expensive, there might be some * worthwhile optimizations here. */ if ((buf[6] & 1) == 0) { /* valid src */ uint8_t *s = buf+6; sh = nm_bridge_rthash(buf+6); // XXX hash of source /* update source port forwarding entry */ ht[sh].mac = smac; /* XXX expire ? */ ht[sh].ports = mysrc; if (netmap_verbose) D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d", s[0], s[1], s[2], s[3], s[4], s[5], mysrc); } dst = NM_BDG_BROADCAST; if ((buf[0] & 1) == 0) { /* unicast */ dh = nm_bridge_rthash(buf); // XXX hash of dst if (ht[dh].mac == dmac) { /* found dst */ dst = ht[dh].ports; } /* XXX otherwise return NM_BDG_UNKNOWN ? */ } *dst_ring = 0; return dst; } /* * This flush routine supports only unicast and broadcast but a large * number of ports, and lets us replace the learn and dispatch functions. */ int nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_adapter *na, u_int ring_nr) { struct nm_bdg_q *dst_ents, *brddst; uint16_t num_dsts = 0, *dsts; struct nm_bridge *b = na->na_bdg; u_int i, j, me = na->bdg_port; /* * The work area (pointed by ft) is followed by an array of * pointers to queues , dst_ents; there are NM_BDG_MAXRINGS * queues per port plus one for the broadcast traffic. * Then we have an array of destination indexes. */ dst_ents = (struct nm_bdg_q *)(ft + NM_BDG_BATCH_MAX); dsts = (uint16_t *)(dst_ents + NM_BDG_MAXPORTS * NM_BDG_MAXRINGS + 1); /* first pass: find a destination for each packet in the batch */ for (i = 0; likely(i < n); i += ft[i].ft_frags) { uint8_t dst_ring = ring_nr; /* default, same ring as origin */ uint16_t dst_port, d_i; struct nm_bdg_q *d; ND("slot %d frags %d", i, ft[i].ft_frags); dst_port = b->nm_bdg_lookup(ft[i].ft_buf, ft[i].ft_len, &dst_ring, na); if (netmap_verbose > 255) RD(5, "slot %d port %d -> %d", i, me, dst_port); if (dst_port == NM_BDG_NOPORT) continue; /* this packet is identified to be dropped */ else if (unlikely(dst_port > NM_BDG_MAXPORTS)) continue; else if (dst_port == NM_BDG_BROADCAST) dst_ring = 0; /* broadcasts always go to ring 0 */ else if (unlikely(dst_port == me || !b->bdg_ports[dst_port])) continue; /* get a position in the scratch pad */ d_i = dst_port * NM_BDG_MAXRINGS + dst_ring; d = dst_ents + d_i; /* append the first fragment to the list */ if (d->bq_head == NM_FT_NULL) { /* new destination */ d->bq_head = d->bq_tail = i; /* remember this position to be scanned later */ if (dst_port != NM_BDG_BROADCAST) dsts[num_dsts++] = d_i; } else { ft[d->bq_tail].ft_next = i; d->bq_tail = i; } d->bq_len += ft[i].ft_frags; } /* * Broadcast traffic goes to ring 0 on all destinations. * So we need to add these rings to the list of ports to scan. * XXX at the moment we scan all NM_BDG_MAXPORTS ports, which is * expensive. We should keep a compact list of active destinations * so we could shorten this loop. */ brddst = dst_ents + NM_BDG_BROADCAST * NM_BDG_MAXRINGS; if (brddst->bq_head != NM_FT_NULL) { for (j = 0; likely(j < b->bdg_active_ports); j++) { uint16_t d_i; i = b->bdg_port_index[j]; if (unlikely(i == me)) continue; d_i = i * NM_BDG_MAXRINGS; if (dst_ents[d_i].bq_head == NM_FT_NULL) dsts[num_dsts++] = d_i; } } ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts); /* second pass: scan destinations (XXX will be modular somehow) */ for (i = 0; i < num_dsts; i++) { struct ifnet *dst_ifp; struct netmap_adapter *dst_na; struct netmap_kring *kring; struct netmap_ring *ring; u_int dst_nr, is_vp, lim, j, sent = 0, d_i, next, brd_next; u_int needed, howmany; int retry = netmap_txsync_retry; struct nm_bdg_q *d; uint32_t my_start = 0, lease_idx = 0; int nrings; d_i = dsts[i]; ND("second pass %d port %d", i, d_i); d = dst_ents + d_i; // XXX fix the division dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS]; /* protect from the lookup function returning an inactive * destination port */ if (unlikely(dst_na == NULL)) goto cleanup; if (dst_na->na_flags & NAF_SW_ONLY) goto cleanup; dst_ifp = dst_na->ifp; /* * The interface may be in !netmap mode in two cases: * - when na is attached but not activated yet; * - when na is being deactivated but is still attached. */ if (unlikely(!(dst_ifp->if_capenable & IFCAP_NETMAP))) { ND("not in netmap mode!"); goto cleanup; } /* there is at least one either unicast or broadcast packet */ brd_next = brddst->bq_head; next = d->bq_head; /* we need to reserve this many slots. If fewer are * available, some packets will be dropped. * Packets may have multiple fragments, so we may not use * there is a chance that we may not use all of the slots * we have claimed, so we will need to handle the leftover * ones when we regain the lock. */ needed = d->bq_len + brddst->bq_len; is_vp = nma_is_vp(dst_na); ND(5, "pass 2 dst %d is %x %s", i, d_i, is_vp ? "virtual" : "nic/host"); dst_nr = d_i & (NM_BDG_MAXRINGS-1); if (is_vp) { /* virtual port */ nrings = dst_na->num_rx_rings; } else { nrings = dst_na->num_tx_rings; } if (dst_nr >= nrings) dst_nr = dst_nr % nrings; kring = is_vp ? &dst_na->rx_rings[dst_nr] : &dst_na->tx_rings[dst_nr]; ring = kring->ring; lim = kring->nkr_num_slots - 1; retry: /* reserve the buffers in the queue and an entry * to report completion, and drop lock. * XXX this might become a helper function. */ mtx_lock(&kring->q_lock); if (kring->nkr_stopped) { mtx_unlock(&kring->q_lock); goto cleanup; } /* on physical interfaces, do a txsync to recover * slots for packets already transmitted. * XXX maybe we could be optimistic and rely on a retry * in case of failure. */ if (nma_is_hw(dst_na)) { dst_na->nm_txsync(dst_ifp, dst_nr, 0); } my_start = j = kring->nkr_hwlease; howmany = nm_kr_space(kring, is_vp); if (needed < howmany) howmany = needed; lease_idx = nm_kr_lease(kring, howmany, is_vp); mtx_unlock(&kring->q_lock); /* only retry if we need more than available slots */ if (retry && needed <= howmany) retry = 0; /* copy to the destination queue */ while (howmany > 0) { struct netmap_slot *slot; struct nm_bdg_fwd *ft_p, *ft_end; u_int cnt; /* find the queue from which we pick next packet. * NM_FT_NULL is always higher than valid indexes * so we never dereference it if the other list * has packets (and if both are empty we never * get here). */ if (next < brd_next) { ft_p = ft + next; next = ft_p->ft_next; } else { /* insert broadcast */ ft_p = ft + brd_next; brd_next = ft_p->ft_next; } cnt = ft_p->ft_frags; // cnt > 0 if (unlikely(cnt > howmany)) break; /* no more space */ howmany -= cnt; if (netmap_verbose && cnt > 1) RD(5, "rx %d frags to %d", cnt, j); ft_end = ft_p + cnt; do { void *dst, *src = ft_p->ft_buf; size_t len = (ft_p->ft_len + 63) & ~63; slot = &ring->slot[j]; dst = BDG_NMB(dst_na->nm_mem, slot); /* round to a multiple of 64 */ ND("send %d %d bytes at %s:%d", i, ft_p->ft_len, dst_ifp->if_xname, j); if (ft_p->ft_flags & NS_INDIRECT) { if (copyin(src, dst, len)) { // invalid user pointer, pretend len is 0 ft_p->ft_len = 0; } } else { //memcpy(dst, src, len); pkt_copy(src, dst, (int)len); } slot->len = ft_p->ft_len; slot->flags = (cnt << 8)| NS_MOREFRAG; j = nm_next(j, lim); ft_p++; sent++; } while (ft_p != ft_end); slot->flags = (cnt << 8); /* clear flag on last entry */ /* are we done ? */ if (next == NM_FT_NULL && brd_next == NM_FT_NULL) break; } { /* current position */ uint32_t *p = kring->nkr_leases; /* shorthand */ uint32_t update_pos; int still_locked = 1; mtx_lock(&kring->q_lock); if (unlikely(howmany > 0)) { /* not used all bufs. If i am the last one * i can recover the slots, otherwise must * fill them with 0 to mark empty packets. */ ND("leftover %d bufs", howmany); if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) { /* yes i am the last one */ ND("roll back nkr_hwlease to %d", j); kring->nkr_hwlease = j; } else { while (howmany-- > 0) { ring->slot[j].len = 0; ring->slot[j].flags = 0; j = nm_next(j, lim); } } } p[lease_idx] = j; /* report I am done */ update_pos = is_vp ? nm_kr_rxpos(kring) : ring->cur; if (my_start == update_pos) { /* all slots before my_start have been reported, * so scan subsequent leases to see if other ranges * have been completed, and to a selwakeup or txsync. */ while (lease_idx != kring->nkr_lease_idx && p[lease_idx] != NR_NOSLOT) { j = p[lease_idx]; p[lease_idx] = NR_NOSLOT; lease_idx = nm_next(lease_idx, lim); } /* j is the new 'write' position. j != my_start * means there are new buffers to report */ if (likely(j != my_start)) { if (is_vp) { uint32_t old_avail = kring->nr_hwavail; kring->nr_hwavail = (j >= kring->nr_hwcur) ? j - kring->nr_hwcur : j + lim + 1 - kring->nr_hwcur; if (kring->nr_hwavail < old_avail) { D("avail shrink %d -> %d", old_avail, kring->nr_hwavail); } still_locked = 0; mtx_unlock(&kring->q_lock); selwakeuppri(&kring->si, PI_NET); } else { ring->cur = j; /* XXX update avail ? */ still_locked = 0; dst_na->nm_txsync(dst_ifp, dst_nr, 0); mtx_unlock(&kring->q_lock); /* retry to send more packets */ if (nma_is_hw(dst_na) && retry--) goto retry; } } } if (still_locked) mtx_unlock(&kring->q_lock); } cleanup: d->bq_head = d->bq_tail = NM_FT_NULL; /* cleanup */ d->bq_len = 0; } brddst->bq_head = brddst->bq_tail = NM_FT_NULL; /* cleanup */ brddst->bq_len = 0; return 0; } /* * main dispatch routine for the bridge. * We already know that only one thread is running this. * we must run nm_bdg_preflush without lock. */ static int bdg_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, lim = kring->nkr_num_slots - 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); if (bridge_batch <= 0) { /* testing only */ j = k; // used all goto done; } if (bridge_batch > NM_BDG_BATCH) bridge_batch = NM_BDG_BATCH; j = nm_bdg_preflush(na, ring_nr, kring, k); if (j != k) D("early break at %d/ %d, avail %d", j, k, kring->nr_hwavail); /* k-j modulo ring size is the number of slots processed */ if (k < j) k += kring->nkr_num_slots; kring->nr_hwavail = lim - (k - j); done: kring->nr_hwcur = j; ring->avail = kring->nr_hwavail; if (netmap_verbose) D("%s ring %d flags %d", ifp->if_xname, ring_nr, flags); return 0; } /* * user process reading from a VALE switch. * Already protected against concurrent calls from userspace, * but we must acquire the queue's lock to protect against * writers on the same queue. */ static int bdg_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, lim = kring->nkr_num_slots - 1; u_int k = ring->cur, resvd = ring->reserved; int n; mtx_lock(&kring->q_lock); if (k > lim) { D("ouch dangerous reset!!!"); n = netmap_ring_reinit(kring); goto done; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ n = k - j; if (n < 0) n += kring->nkr_num_slots; ND("userspace releases %d packets", n); for (n = 0; likely(j != k); n++) { struct netmap_slot *slot = &ring->slot[j]; void *addr = BDG_NMB(na->nm_mem, slot); if (addr == netmap_buffer_base) { /* bad buf */ D("bad buffer index %d, ignore ?", slot->buf_idx); } slot->flags &= ~NS_BUF_CHANGED; j = nm_next(j, lim); } kring->nr_hwavail -= n; kring->nr_hwcur = k; } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; n = 0; done: mtx_unlock(&kring->q_lock); return n; } static void bdg_netmap_attach(struct netmap_adapter *arg) { struct netmap_adapter na; ND("attaching virtual bridge"); bzero(&na, sizeof(na)); na.ifp = arg->ifp; na.na_flags = NAF_BDG_MAYSLEEP | NAF_MEM_OWNER; na.num_tx_rings = arg->num_tx_rings; na.num_rx_rings = arg->num_rx_rings; na.num_tx_desc = arg->num_tx_desc; na.num_rx_desc = arg->num_rx_desc; na.nm_txsync = bdg_netmap_txsync; na.nm_rxsync = bdg_netmap_rxsync; na.nm_register = bdg_netmap_reg; na.nm_mem = netmap_mem_private_new(arg->ifp->if_xname, na.num_tx_rings, na.num_tx_desc, na.num_rx_rings, na.num_rx_desc); netmap_attach(&na, na.num_tx_rings); } static struct cdev *netmap_dev; /* /dev/netmap character device. */ /* * Module loader. * * Create the /dev/netmap device and initialize all global * variables. * * Return 0 on success, errno on failure. */ static int netmap_init(void) { int i, error; NMG_LOCK_INIT(); error = netmap_mem_init(); if (error != 0) { printf("netmap: unable to initialize the memory allocator.\n"); return (error); } printf("netmap: loaded module\n"); netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, "netmap"); bzero(nm_bridges, sizeof(struct nm_bridge) * NM_BRIDGES); /* safety */ for (i = 0; i < NM_BRIDGES; i++) BDG_RWINIT(&nm_bridges[i]); return (error); } /* * Module unloader. * * Free all the memory, and destroy the ``/dev/netmap`` device. */ static void netmap_fini(void) { destroy_dev(netmap_dev); netmap_mem_fini(); NMG_LOCK_DESTROY(); printf("netmap: unloaded module.\n"); } #ifdef __FreeBSD__ /* * Kernel entry point. * * Initialize/finalize the module and return. * * Return 0 on success, errno on failure. */ static int netmap_loader(__unused struct module *module, int event, __unused void *arg) { int error = 0; switch (event) { case MOD_LOAD: error = netmap_init(); break; case MOD_UNLOAD: netmap_fini(); break; default: error = EOPNOTSUPP; break; } return (error); } DEV_MODULE(netmap, netmap_loader, NULL); #endif /* __FreeBSD__ */ netmap-release/sys/dev/netmap/netmap_kern.h000644 000765 000024 00000054375 12230530510 021575 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/netmap_kern.h 238985 2012-08-02 11:59:43Z luigi $ * * The header contains the definitions of constants and function * prototypes used only in kernelspace. */ #ifndef _NET_NETMAP_KERN_H_ #define _NET_NETMAP_KERN_H_ #if defined(__FreeBSD__) #define likely(x) __builtin_expect((long)!!(x), 1L) #define unlikely(x) __builtin_expect((long)!!(x), 0L) #define NM_LOCK_T struct mtx #define NM_SELINFO_T struct selinfo #define MBUF_LEN(m) ((m)->m_pkthdr.len) #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) #define NM_ATOMIC_T volatile int #elif defined (linux) #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h #define NM_SELINFO_T wait_queue_head_t #define MBUF_LEN(m) ((m)->len) #define NM_SEND_UP(ifp, m) netif_rx(m) #define NM_ATOMIC_T volatile long unsigned int #ifndef DEV_NETMAP #define DEV_NETMAP #endif /* DEV_NETMAP */ /* * IFCAP_NETMAP goes into net_device's priv_flags (if_capenable). * This was 16 bits up to linux 2.6.36, so we need a 16 bit value on older * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT. * For the 32-bit value, 0x100000 has no clashes until at least 3.5.1 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) #define IFCAP_NETMAP 0x8000 #else #define IFCAP_NETMAP 0x200000 #endif #elif defined (__APPLE__) #warning apple support is incomplete. #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define NM_LOCK_T IOLock * #define NM_SELINFO_T struct selinfo #define MBUF_LEN(m) ((m)->m_pkthdr.len) #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) #else #error unsupported platform #endif /* end - platform-specific code */ #define ND(format, ...) #define D(format, ...) \ do { \ struct timeval __xxts; \ microtime(&__xxts); \ printf("%03d.%06d %s [%d] " format "\n", \ (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0) /* rate limited, lps indicates how many per second */ #define RD(lps, format, ...) \ do { \ static int t0, __cnt; \ if (t0 != time_second) { \ t0 = time_second; \ __cnt = 0; \ } \ if (__cnt++ < lps) \ D(format, ##__VA_ARGS__); \ } while (0) struct netmap_adapter; struct nm_bdg_fwd; struct nm_bridge; struct netmap_priv_d; const char *nm_dump_buf(char *p, int len, int lim, char *dst); /* * private, kernel view of a ring. Keeps track of the status of * a ring across system calls. * * nr_hwcur index of the next buffer to refill. * It corresponds to ring->cur - ring->reserved * * nr_hwavail the number of slots "owned" by userspace. * nr_hwavail =:= ring->avail + ring->reserved * * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. * This is so that, on a reset, buffers owned by userspace are not * modified by the kernel. In particular: * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with * the next empty buffer as known by the hardware (next_to_check or so). * TX rings: hwcur + hwofs coincides with next_to_send * * Clients cannot issue concurrent syscall on a ring. The system * detects this and reports an error using two flags, * NKR_WBUSY and NKR_RBUSY * For received packets, slot->flags is set to nkr_slot_flags * so we can provide a proper initial value (e.g. set NS_FORWARD * when operating in 'transparent' mode). * * The following fields are used to implement lock-free copy of packets * from input to output ports in VALE switch: * nkr_hwlease buffer after the last one being copied. * A writer in nm_bdg_flush reserves N buffers * from nr_hwlease, advances it, then does the * copy outside the lock. * In RX rings (used for VALE ports), * nkr_hwcur + nkr_hwavail <= nkr_hwlease < nkr_hwcur+N-1 * In TX rings (used for NIC or host stack ports) * nkr_hwcur <= nkr_hwlease < nkr_hwcur+ nkr_hwavail * nkr_leases array of nkr_num_slots where writers can report * completion of their block. NR_NOSLOT (~0) indicates * that the writer has not finished yet * nkr_lease_idx index of next free slot in nr_leases, to be assigned * * The kring is manipulated by txsync/rxsync and generic netmap function. * q_lock is used to arbitrate access to the kring from within the netmap * code, and this and other protections guarantee that there is never * more than 1 concurrent call to txsync or rxsync. So we are free * to manipulate the kring from within txsync/rxsync without any extra * locks. */ struct netmap_kring { struct netmap_ring *ring; uint32_t nr_hwcur; uint32_t nr_hwavail; uint32_t nr_kflags; /* private driver flags */ #define NKR_PENDINTR 0x1 // Pending interrupt. uint32_t nkr_num_slots; int32_t nkr_hwofs; /* offset between NIC and netmap ring */ uint16_t nkr_slot_flags; /* initial value for flags */ struct netmap_adapter *na; struct nm_bdg_fwd *nkr_ft; uint32_t *nkr_leases; #define NR_NOSLOT ((uint32_t)~0) uint32_t nkr_hwlease; uint32_t nkr_lease_idx; NM_SELINFO_T si; /* poll/select wait queue */ NM_LOCK_T q_lock; /* protects kring and ring. */ NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ volatile int nkr_stopped; } __attribute__((__aligned__(64))); /* return the next index, with wraparound */ static inline uint32_t nm_next(uint32_t i, uint32_t lim) { return unlikely (i == lim) ? 0 : i + 1; } /* * * Here is the layout for the Rx and Tx rings. RxRING TxRING +-----------------+ +-----------------+ | | | | |XXX free slot XXX| |XXX free slot XXX| +-----------------+ +-----------------+ | |<-hwcur | |<-hwcur | reserved h | | (ready | +----------- w -+ | to be | cur->| a | | sent) h | | v | +---------- w | | a | cur->| (being a | | i | | prepared) v | | avail l | | a | +-----------------+ + a ------ i + | | ... | v l |<-hwlease | (being | ... | a | ... | prepared) | ... | i | ... +-----------------+ ... | l | ... | |<-hwlease +-----------------+ | | | | | | | | | | | | | | | | +-----------------+ +-----------------+ * The cur/avail (user view) and hwcur/hwavail (kernel view) * are used in the normal operation of the card. * * When a ring is the output of a switch port (Rx ring for * a VALE port, Tx ring for the host stack or NIC), slots * are reserved in blocks through 'hwlease' which points * to the next unused slot. * On an Rx ring, hwlease is always after hwavail, * and completions cause avail to advance. * On a Tx ring, hwlease is always between cur and hwavail, * and completions cause cur to advance. * * nm_kr_space() returns the maximum number of slots that * can be assigned. * nm_kr_lease() reserves the required number of buffers, * advances nkr_hwlease and also returns an entry in * a circular array where completions should be reported. */ /* * This struct extends the 'struct adapter' (or * equivalent) device descriptor. It contains all fields needed to * support netmap operation. */ struct netmap_adapter { /* * On linux we do not have a good way to tell if an interface * is netmap-capable. So we use the following trick: * NA(ifp) points here, and the first entry (which hopefully * always exists and is at least 32 bits) contains a magic * value which we can use to detect that the interface is good. */ uint32_t magic; uint32_t na_flags; /* future place for IFCAP_NETMAP */ #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. * useful during initialization */ #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when * forwarding packets coming from this * interface */ #define NAF_MEM_OWNER 8 /* the adapter is responsible for the * deallocation of the memory allocator */ int refcount; /* number of user-space descriptors using this interface, which is equal to the number of struct netmap_if objs in the mapped region. */ /* * The selwakeup in the interrupt thread can use per-ring * and/or global wait queues. We track how many clients * of each type we have so we can optimize the drivers, * and especially avoid huge contention on the locks. */ int na_single; /* threads attached to a single hw queue */ int na_multi; /* threads attached to multiple hw queues */ u_int num_rx_rings; /* number of adapter receive rings */ u_int num_tx_rings; /* number of adapter transmit rings */ u_int num_tx_desc; /* number of descriptor in each queue */ u_int num_rx_desc; /* tx_rings and rx_rings are private but allocated * as a contiguous chunk of memory. Each array has * N+1 entries, for the adapter queues and for the host queue. */ struct netmap_kring *tx_rings; /* array of TX rings. */ struct netmap_kring *rx_rings; /* array of RX rings. */ NM_SELINFO_T tx_si, rx_si; /* global wait queues */ /* copy of if_qflush and if_transmit pointers, to intercept * packets from the network stack when netmap is active. */ int (*if_transmit)(struct ifnet *, struct mbuf *); /* references to the ifnet and device routines, used by * the generic netmap functions. */ struct ifnet *ifp; /* adapter is ifp->if_softc */ NM_LOCK_T core_lock; /* used if no device lock available */ int (*nm_register)(struct ifnet *, int onoff); int (*nm_txsync)(struct ifnet *, u_int ring, int flags); int (*nm_rxsync)(struct ifnet *, u_int ring, int flags); #define NAF_FORCE_READ 1 #define NAF_FORCE_RECLAIM 2 /* return configuration information */ int (*nm_config)(struct ifnet *, u_int *txr, u_int *txd, u_int *rxr, u_int *rxd); /* * Bridge support: * * bdg_port is the port number used in the bridge; * na_bdg_refcount is a refcount used for bridge ports, * when it goes to 0 we can detach+free this port * (a bridge port is always attached if it exists; * it is not always registered) * na_bdg points to the bridge this NA is attached to. */ int bdg_port; int na_bdg_refcount; struct nm_bridge *na_bdg; /* When we attach a physical interface to the bridge, we * allow the controlling process to terminate, so we need * a place to store the netmap_priv_d data structure. * This is only done when physical interfaces are attached to a bridge. */ struct netmap_priv_d *na_kpriv; /* memory allocator */ struct netmap_mem_d *nm_mem; #ifdef linux struct net_device_ops nm_ndo; #endif /* linux */ }; /* * Available space in the ring. */ static inline uint32_t nm_kr_space(struct netmap_kring *k, int is_rx) { int space; if (is_rx) { int busy = k->nkr_hwlease - k->nr_hwcur; if (busy < 0) busy += k->nkr_num_slots; space = k->nkr_num_slots - 1 - busy; } else { space = k->nr_hwcur + k->nr_hwavail - k->nkr_hwlease; if (space < 0) space += k->nkr_num_slots; } #if 0 // sanity check if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_hwavail >= k->nkr_num_slots || busy < 0 || busy >= k->nkr_num_slots) { D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } #endif return space; } /* return update position */ static inline uint32_t nm_kr_rxpos(struct netmap_kring *k) { uint32_t pos = k->nr_hwcur + k->nr_hwavail; if (pos >= k->nkr_num_slots) pos -= k->nkr_num_slots; #if 0 if (pos >= k->nkr_num_slots || k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_hwavail >= k->nkr_num_slots || k->nkr_lease_idx >= k->nkr_num_slots) { D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } #endif return pos; } /* make a lease on the kring for N positions. return the * lease index */ static inline uint32_t nm_kr_lease(struct netmap_kring *k, u_int n, int is_rx) { uint32_t lim = k->nkr_num_slots - 1; uint32_t lease_idx = k->nkr_lease_idx; k->nkr_leases[lease_idx] = NR_NOSLOT; k->nkr_lease_idx = nm_next(lease_idx, lim); if (n > nm_kr_space(k, is_rx)) { D("invalid request for %d slots", n); panic("x"); } /* XXX verify that there are n slots */ k->nkr_hwlease += n; if (k->nkr_hwlease > lim) k->nkr_hwlease -= lim + 1; if (k->nkr_hwlease >= k->nkr_num_slots || k->nr_hwcur >= k->nkr_num_slots || k->nr_hwavail >= k->nkr_num_slots || k->nkr_lease_idx >= k->nkr_num_slots) { D("invalid kring %s, cur %d avail %d lease %d lease_idx %d lim %d", k->na->ifp->if_xname, k->nr_hwcur, k->nr_hwavail, k->nkr_hwlease, k->nkr_lease_idx, k->nkr_num_slots); } return lease_idx; } /* * XXX NETMAP_DELETING() is unused * * The combination of "enable" (ifp->if_capenable & IFCAP_NETMAP) * and refcount gives the status of the interface, namely: * * enable refcount Status * * FALSE 0 normal operation * FALSE != 0 -- (impossible) * TRUE 1 netmap mode * TRUE 0 being deleted. */ #define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \ ( (_na)->ifp->if_capenable & IFCAP_NETMAP) ) /* * The following are support routines used by individual drivers to * support netmap operation. * * netmap_attach() initializes a struct netmap_adapter, allocating the * struct netmap_ring's and the struct selinfo. * * netmap_detach() frees the memory allocated by netmap_attach(). * * netmap_transmit() replaces the if_transmit routine of the interface, * and is used to intercept packets coming from the stack. * * netmap_load_map/netmap_reload_map are helper routines to set/reset * the dmamap for a packet buffer * * netmap_reset() is a helper routine to be called in the driver * when reinitializing a ring. */ int netmap_attach(struct netmap_adapter *, u_int); void netmap_detach(struct ifnet *); int netmap_transmit(struct ifnet *, struct mbuf *); enum txrx { NR_RX = 0, NR_TX = 1 }; struct netmap_slot *netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, u_int new_cur); int netmap_ring_reinit(struct netmap_kring *); u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); /* * The following bridge-related interfaces are used by other kernel modules * In the version that only supports unicast or broadcast, the lookup * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports, * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown. * XXX in practice "unknown" might be handled same as broadcast. */ typedef u_int (*bdg_lookup_fn_t)(char *buf, u_int len, uint8_t *ring_nr, struct netmap_adapter *); int netmap_bdg_ctl(struct nmreq *nmr, bdg_lookup_fn_t func); u_int netmap_bdg_learning(char *, u_int, uint8_t *, struct netmap_adapter *); #define NM_NAME "vale" /* prefix for the bridge port name */ #define NM_BDG_MAXPORTS 254 /* up to 32 for bitmap, 254 ok otherwise */ #define NM_BDG_BROADCAST NM_BDG_MAXPORTS #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1) extern u_int netmap_buf_size; #define NETMAP_BUF_SIZE netmap_buf_size // XXX remove extern int netmap_mitigate; extern int netmap_no_pendintr; extern u_int netmap_total_buffers; extern char *netmap_buffer_base; extern int netmap_verbose; // XXX debugging enum { /* verbose flags */ NM_VERB_ON = 1, /* generic verbose */ NM_VERB_HOST = 0x2, /* verbose host stack */ NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ NM_VERB_TXSYNC = 0x20, NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ NM_VERB_TXINTR = 0x200, NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ NM_VERB_NIC_TXSYNC = 0x2000, }; /* * NA returns a pointer to the struct netmap adapter from the ifp, * WNA is used to write it. * SWNA() is used for the "host stack" endpoint associated * to an interface. It is allocated together with the main NA(), * as an array of two objects. */ #ifndef WNA #define WNA(_ifp) (_ifp)->if_pspare[0] #endif #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) #define SWNA(_ifp) (NA(_ifp) + 1) /* * Macros to determine if an interface is netmap capable or netmap enabled. * See the magic field in struct netmap_adapter. */ #ifdef __FreeBSD__ /* * on FreeBSD just use if_capabilities and if_capenable. */ #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ (ifp)->if_capabilities & IFCAP_NETMAP ) #define NETMAP_SET_CAPABLE(ifp) \ (ifp)->if_capabilities |= IFCAP_NETMAP #else /* linux */ /* * on linux: * we check if NA(ifp) is set and its first element has a related * magic value. The capenable is within the struct netmap_adapter. */ #define NETMAP_MAGIC 0x52697a7a #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) #define NETMAP_SET_CAPABLE(ifp) \ NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC #endif /* linux */ #ifdef __FreeBSD__ /* Callback invoked by the dma machinery after a successfull dmamap_load */ static void netmap_dmamap_cb(__unused void *arg, __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) { } /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. * XXX can we do it without a callback ? */ static inline void netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { if (map) bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); } /* update the map when a buffer changes. */ static inline void netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) { if (map) { bus_dmamap_unload(tag, map); bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); } } #else /* linux */ /* * XXX How do we redefine these functions: * * on linux we need * dma_map_single(&pdev->dev, virt_addr, len, direction) * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) * unfortunately the direction is not, so we need to change * something to have a cross API */ #define netmap_load_map(_t, _m, _b) #define netmap_reload_map(_t, _m, _b) #if 0 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = false; buffer_info->length = len; //buffer_info->next_to_watch = l; /* reload dma map */ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, NETMAP_BUF_SIZE, DMA_TO_DEVICE); buffer_info->dma = dma_map_single(&adapter->pdev->dev, addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { D("dma mapping error"); /* goto dma_error; See e1000_put_txbuf() */ /* XXX reset */ } tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX #endif /* * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. */ #define bus_dmamap_sync(_a, _b, _c) #endif /* linux */ /* * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) */ static inline int netmap_idx_n2k(struct netmap_kring *kr, int idx) { int n = kr->nkr_num_slots; idx += kr->nkr_hwofs; if (idx < 0) return idx + n; else if (idx < n) return idx; else return idx - n; } static inline int netmap_idx_k2n(struct netmap_kring *kr, int idx) { int n = kr->nkr_num_slots; idx -= kr->nkr_hwofs; if (idx < 0) return idx + n; else if (idx < n) return idx; else return idx - n; } /* Entries of the look-up table. */ struct lut_entry { void *vaddr; /* virtual address. */ vm_paddr_t paddr; /* physical address. */ }; struct netmap_obj_pool; extern struct lut_entry *netmap_buffer_lut; #define NMB_VA(i) (netmap_buffer_lut[i].vaddr) #define NMB_PA(i) (netmap_buffer_lut[i].paddr) /* * NMB return the virtual address of a buffer (buffer 0 on bad index) * PNMB also fills the physical address */ static inline void * NMB(struct netmap_slot *slot) { uint32_t i = slot->buf_idx; return (unlikely(i >= netmap_total_buffers)) ? NMB_VA(0) : NMB_VA(i); } static inline void * PNMB(struct netmap_slot *slot, uint64_t *pp) { uint32_t i = slot->buf_idx; void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i); *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i); return ret; } /* default functions to handle rx/tx interrupts */ int netmap_rx_irq(struct ifnet *, u_int, u_int *); #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) #ifdef __FreeBSD__ MALLOC_DECLARE(M_NETMAP); #endif /* __FreeBSD__ */ void netmap_disable_all_rings(struct ifnet *); void netmap_enable_all_rings(struct ifnet *); #endif /* _NET_NETMAP_KERN_H_ */ netmap-release/sys/dev/netmap/netmap_mem1.c000644 000765 000024 00000037104 12220335545 021471 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/netmap_mem1.c 234174 2012-04-12 11:27:09Z luigi $ * * The original netmap memory allocator, using a single large * chunk of memory allocated with contigmalloc. */ /* * Default amount of memory pre-allocated by the module. * We start with a large size and then shrink our demand * according to what is avalable when the module is loaded. */ #define NETMAP_MEMORY_SIZE (64 * 1024 * PAGE_SIZE) static void * netmap_malloc(size_t size, const char *msg); static void netmap_free(void *addr, const char *msg); #define netmap_if_malloc(len) netmap_malloc(len, "nifp") #define netmap_if_free(v) netmap_free((v), "nifp") #define netmap_ring_malloc(len) netmap_malloc(len, "ring") #define netmap_free_rings(na) \ netmap_free((na)->tx_rings[0].ring, "shadow rings"); /* * Allocator for a pool of packet buffers. For each buffer we have * one entry in the bitmap to signal the state. Allocation scans * the bitmap, but since this is done only on attach, we are not * too worried about performance * XXX if we need to allocate small blocks, a translation * table is used both for kernel virtual address and physical * addresses. */ struct netmap_buf_pool { u_int total_buffers; /* total buffers. */ u_int free; u_int bufsize; char *base; /* buffer base address */ uint32_t *bitmap; /* one bit per buffer, 1 means free */ }; struct netmap_buf_pool nm_buf_pool; SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers, CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers"); SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers, CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers"); /* * Allocate n buffers from the ring, and fill the slot. * Buffer 0 is the 'junk' buffer. */ static void netmap_new_bufs(struct netmap_if *nifp __unused, struct netmap_slot *slot, u_int n) { struct netmap_buf_pool *p = &nm_buf_pool; uint32_t bi = 0; /* index in the bitmap */ uint32_t mask, j, i = 0; /* slot counter */ if (n > p->free) { D("only %d out of %d buffers available", i, n); return; } /* termination is guaranteed by p->free */ while (i < n && p->free > 0) { uint32_t cur = p->bitmap[bi]; if (cur == 0) { /* bitmask is fully used */ bi++; continue; } /* locate a slot */ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; p->bitmap[bi] &= ~mask; /* slot in use */ p->free--; slot[i].buf_idx = bi*32+j; slot[i].len = p->bufsize; slot[i].flags = NS_BUF_CHANGED; i++; } ND("allocated %d buffers, %d available", n, p->free); } static void netmap_free_buf(struct netmap_if *nifp __unused, uint32_t i) { struct netmap_buf_pool *p = &nm_buf_pool; uint32_t pos, mask; if (i >= p->total_buffers) { D("invalid free index %d", i); return; } pos = i / 32; mask = 1 << (i % 32); if (p->bitmap[pos] & mask) { D("slot %d already free", i); return; } p->bitmap[pos] |= mask; p->free++; } /* Descriptor of the memory objects handled by our memory allocator. */ struct netmap_mem_obj { TAILQ_ENTRY(netmap_mem_obj) nmo_next; /* next object in the chain. */ int nmo_used; /* flag set on used memory objects. */ size_t nmo_size; /* size of the memory area reserved for the object. */ void *nmo_data; /* pointer to the memory area. */ }; /* Wrap our memory objects to make them ``chainable``. */ TAILQ_HEAD(netmap_mem_obj_h, netmap_mem_obj); /* Descriptor of our custom memory allocator. */ struct netmap_mem_d { struct mtx nm_mtx; /* lock used to handle the chain of memory objects. */ struct netmap_mem_obj_h nm_molist; /* list of memory objects */ size_t nm_size; /* total amount of memory used for rings etc. */ size_t nm_totalsize; /* total amount of allocated memory (the difference is used for buffers) */ size_t nm_buf_start; /* offset of packet buffers. This is page-aligned. */ size_t nm_buf_len; /* total memory for buffers */ void *nm_buffer; /* pointer to the whole pre-allocated memory area. */ }; /* Shorthand to compute a netmap interface offset. */ #define netmap_if_offset(v) \ ((char *) (v) - (char *) nm_mem->nm_buffer) /* .. and get a physical address given a memory offset */ #define netmap_ofstophys(o) \ (vtophys(nm_mem->nm_buffer) + (o)) /*------ netmap memory allocator -------*/ /* * Request for a chunk of memory. * * Memory objects are arranged into a list, hence we need to walk this * list until we find an object with the needed amount of data free. * This sounds like a completely inefficient implementation, but given * the fact that data allocation is done once, we can handle it * flawlessly. * * Return NULL on failure. */ static void * netmap_malloc(size_t size, __unused const char *msg) { struct netmap_mem_obj *mem_obj, *new_mem_obj; void *ret = NULL; NMA_LOCK(); TAILQ_FOREACH(mem_obj, &nm_mem->nm_molist, nmo_next) { if (mem_obj->nmo_used != 0 || mem_obj->nmo_size < size) continue; new_mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, M_WAITOK | M_ZERO); TAILQ_INSERT_BEFORE(mem_obj, new_mem_obj, nmo_next); new_mem_obj->nmo_used = 1; new_mem_obj->nmo_size = size; new_mem_obj->nmo_data = mem_obj->nmo_data; memset(new_mem_obj->nmo_data, 0, new_mem_obj->nmo_size); mem_obj->nmo_size -= size; mem_obj->nmo_data = (char *) mem_obj->nmo_data + size; if (mem_obj->nmo_size == 0) { TAILQ_REMOVE(&nm_mem->nm_molist, mem_obj, nmo_next); free(mem_obj, M_NETMAP); } ret = new_mem_obj->nmo_data; break; } NMA_UNLOCK(); ND("%s: %d bytes at %p", msg, size, ret); return (ret); } /* * Return the memory to the allocator. * * While freeing a memory object, we try to merge adjacent chunks in * order to reduce memory fragmentation. */ static void netmap_free(void *addr, const char *msg) { size_t size; struct netmap_mem_obj *cur, *prev, *next; if (addr == NULL) { D("NULL addr for %s", msg); return; } NMA_LOCK(); TAILQ_FOREACH(cur, &nm_mem->nm_molist, nmo_next) { if (cur->nmo_data == addr && cur->nmo_used) break; } if (cur == NULL) { NMA_UNLOCK(); D("invalid addr %s %p", msg, addr); return; } size = cur->nmo_size; cur->nmo_used = 0; /* merge current chunk of memory with the previous one, if present. */ prev = TAILQ_PREV(cur, netmap_mem_obj_h, nmo_next); if (prev && prev->nmo_used == 0) { TAILQ_REMOVE(&nm_mem->nm_molist, cur, nmo_next); prev->nmo_size += cur->nmo_size; free(cur, M_NETMAP); cur = prev; } /* merge with the next one */ next = TAILQ_NEXT(cur, nmo_next); if (next && next->nmo_used == 0) { TAILQ_REMOVE(&nm_mem->nm_molist, next, nmo_next); cur->nmo_size += next->nmo_size; free(next, M_NETMAP); } NMA_UNLOCK(); ND("freed %s %d bytes at %p", msg, size, addr); } /* * Create and return a new ``netmap_if`` object, and possibly also * rings and packet buffors. * * Return NULL on failure. */ static void * netmap_if_new(const char *ifname, struct netmap_adapter *na) { struct netmap_if *nifp; struct netmap_ring *ring; struct netmap_kring *kring; char *buff; u_int i, len, ofs, numdesc; u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack queue */ u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack queue */ /* * the descriptor is followed inline by an array of offsets * to the tx and rx rings in the shared memory region. */ len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); nifp = netmap_if_malloc(len); if (nifp == NULL) return (NULL); /* initialize base fields */ *(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; *(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; strncpy(nifp->ni_name, ifname, IFNAMSIZ); (na->refcount)++; /* XXX atomic ? we are under lock */ if (na->refcount > 1) goto final; /* * First instance. Allocate the netmap rings * (one for each hw queue, one pair for the host). * The rings are contiguous, but have variable size. * The entire block is reachable at * na->tx_rings[0] */ len = (ntx + nrx) * sizeof(struct netmap_ring) + (ntx * na->num_tx_desc + nrx * na->num_rx_desc) * sizeof(struct netmap_slot); buff = netmap_ring_malloc(len); if (buff == NULL) { D("failed to allocate %d bytes for %s shadow ring", len, ifname); error: (na->refcount)--; netmap_if_free(nifp); return (NULL); } /* Check whether we have enough buffers */ len = ntx * na->num_tx_desc + nrx * na->num_rx_desc; NMA_LOCK(); if (nm_buf_pool.free < len) { NMA_UNLOCK(); netmap_free(buff, "not enough bufs"); goto error; } /* * in the kring, store the pointers to the shared rings * and initialize the rings. We are under NMA_LOCK(). */ ofs = 0; for (i = 0; i < ntx; i++) { /* Transmit rings */ kring = &na->tx_rings[i]; numdesc = na->num_tx_desc; bzero(kring, sizeof(*kring)); kring->na = na; ring = kring->ring = (struct netmap_ring *)(buff + ofs); *(ssize_t *)(uintptr_t)&ring->buf_ofs = nm_buf_pool.base - (char *)ring; ND("txring[%d] at %p ofs %d", i, ring, ring->buf_ofs); *(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = numdesc; /* * IMPORTANT: * Always keep one slot empty, so we can detect new * transmissions comparing cur and nr_hwcur (they are * the same only if there are no new transmissions). */ ring->avail = kring->nr_hwavail = numdesc - 1; ring->cur = kring->nr_hwcur = 0; *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; netmap_new_bufs(nifp, ring->slot, numdesc); ofs += sizeof(struct netmap_ring) + numdesc * sizeof(struct netmap_slot); } for (i = 0; i < nrx; i++) { /* Receive rings */ kring = &na->rx_rings[i]; numdesc = na->num_rx_desc; bzero(kring, sizeof(*kring)); kring->na = na; ring = kring->ring = (struct netmap_ring *)(buff + ofs); *(ssize_t *)(uintptr_t)&ring->buf_ofs = nm_buf_pool.base - (char *)ring; ND("rxring[%d] at %p offset %d", i, ring, ring->buf_ofs); *(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = numdesc; ring->cur = kring->nr_hwcur = 0; ring->avail = kring->nr_hwavail = 0; /* empty */ *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; netmap_new_bufs(nifp, ring->slot, numdesc); ofs += sizeof(struct netmap_ring) + numdesc * sizeof(struct netmap_slot); } NMA_UNLOCK(); // XXX initialize the selrecord structs. final: /* * fill the slots for the rx and tx queues. They contain the offset * between the ring and nifp, so the information is usable in * userspace to reach the ring from the nifp. */ for (i = 0; i < ntx; i++) { *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = (char *)na->tx_rings[i].ring - (char *)nifp; } for (i = 0; i < nrx; i++) { *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = (char *)na->rx_rings[i].ring - (char *)nifp; } return (nifp); } /* * Initialize the memory allocator. * * Create the descriptor for the memory , allocate the pool of memory * and initialize the list of memory objects with a single chunk * containing the whole pre-allocated memory marked as free. * * Start with a large size, then halve as needed if we fail to * allocate the block. While halving, always add one extra page * because buffers 0 and 1 are used for special purposes. * Return 0 on success, errno otherwise. */ static int netmap_memory_init(void) { struct netmap_mem_obj *mem_obj; void *buf = NULL; int i, n, sz = NETMAP_MEMORY_SIZE; int extra_sz = 0; // space for rings and two spare buffers for (; sz >= 1<<20; sz >>=1) { extra_sz = sz/200; extra_sz = (extra_sz + 2*PAGE_SIZE - 1) & ~(PAGE_SIZE-1); buf = contigmalloc(sz + extra_sz, M_NETMAP, M_WAITOK | M_ZERO, 0, /* low address */ -1UL, /* high address */ PAGE_SIZE, /* alignment */ 0 /* boundary */ ); if (buf) break; } if (buf == NULL) return (ENOMEM); sz += extra_sz; nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP, M_WAITOK | M_ZERO); mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF); TAILQ_INIT(&nm_mem->nm_molist); nm_mem->nm_buffer = buf; nm_mem->nm_totalsize = sz; /* * A buffer takes 2k, a slot takes 8 bytes + ring overhead, * so the ratio is 200:1. In other words, we can use 1/200 of * the memory for the rings, and the rest for the buffers, * and be sure we never run out. */ nm_mem->nm_size = sz/200; nm_mem->nm_buf_start = (nm_mem->nm_size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1); nm_mem->nm_buf_len = sz - nm_mem->nm_buf_start; nm_buf_pool.base = nm_mem->nm_buffer; nm_buf_pool.base += nm_mem->nm_buf_start; netmap_buffer_base = nm_buf_pool.base; D("netmap_buffer_base %p (offset %d)", netmap_buffer_base, (int)nm_mem->nm_buf_start); /* number of buffers, they all start as free */ netmap_total_buffers = nm_buf_pool.total_buffers = nm_mem->nm_buf_len / NETMAP_BUF_SIZE; nm_buf_pool.bufsize = NETMAP_BUF_SIZE; D("Have %d MB, use %dKB for rings, %d buffers at %p", (sz >> 20), (int)(nm_mem->nm_size >> 10), nm_buf_pool.total_buffers, nm_buf_pool.base); /* allocate and initialize the bitmap. Entry 0 is considered * always busy (used as default when there are no buffers left). */ n = (nm_buf_pool.total_buffers + 31) / 32; nm_buf_pool.bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_WAITOK | M_ZERO); nm_buf_pool.bitmap[0] = ~3; /* slot 0 and 1 always busy */ for (i = 1; i < n; i++) nm_buf_pool.bitmap[i] = ~0; nm_buf_pool.free = nm_buf_pool.total_buffers - 2; mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, M_WAITOK | M_ZERO); TAILQ_INSERT_HEAD(&nm_mem->nm_molist, mem_obj, nmo_next); mem_obj->nmo_used = 0; mem_obj->nmo_size = nm_mem->nm_size; mem_obj->nmo_data = nm_mem->nm_buffer; return (0); } /* * Finalize the memory allocator. * * Free all the memory objects contained inside the list, and deallocate * the pool of memory; finally free the memory allocator descriptor. */ static void netmap_memory_fini(void) { struct netmap_mem_obj *mem_obj; while (!TAILQ_EMPTY(&nm_mem->nm_molist)) { mem_obj = TAILQ_FIRST(&nm_mem->nm_molist); TAILQ_REMOVE(&nm_mem->nm_molist, mem_obj, nmo_next); if (mem_obj->nmo_used == 1) { printf("netmap: leaked %d bytes at %p\n", (int)mem_obj->nmo_size, mem_obj->nmo_data); } free(mem_obj, M_NETMAP); } contigfree(nm_mem->nm_buffer, nm_mem->nm_totalsize, M_NETMAP); // XXX mutex_destroy(nm_mtx); free(nm_mem, M_NETMAP); } /*------------- end of memory allocator -----------------*/ netmap-release/sys/dev/netmap/netmap_mem2.c000644 000765 000024 00000073327 12230530510 021467 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef linux #include "bsd_glue.h" #endif /* linux */ #ifdef __APPLE__ #include "osx_glue.h" #endif /* __APPLE__ */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 241723 2012-10-19 09:41:45Z glebius $"); #include #include #include #include /* vtophys */ #include /* vtophys */ #include /* sockaddrs */ #include #include #include #include #include /* bus_dmamap_* */ #endif /* __FreeBSD__ */ #include #include #include "netmap_mem2.h" #ifdef linux #define NMA_LOCK_INIT(n) sema_init(&(n)->nm_mtx, 1) #define NMA_LOCK_DESTROY(n) #define NMA_LOCK(n) down(&(n)->nm_mtx) #define NMA_UNLOCK(n) up(&(n)->nm_mtx) #else /* !linux */ #define NMA_LOCK_INIT(n) mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF) #define NMA_LOCK_DESTROY(n) mtx_destroy(&(n)->nm_mtx) #define NMA_LOCK(n) mtx_lock(&(n)->nm_mtx) #define NMA_UNLOCK(n) mtx_unlock(&(n)->nm_mtx) #endif /* linux */ struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 100, }, [NETMAP_RING_POOL] = { .size = 9*PAGE_SIZE, .num = 200, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = NETMAP_BUF_MAX_NUM, }, }; /* * nm_mem is the memory allocator used for all physical interfaces * running in netmap mode. * Virtual (VALE) ports will have each its own allocator. */ static int netmap_mem_global_finalize(struct netmap_mem_d *nmd); static void netmap_mem_global_deref(struct netmap_mem_d *nmd); struct netmap_mem_d nm_mem = { /* Our memory allocator. */ .pools = { [NETMAP_IF_POOL] = { .name = "netmap_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 10, /* don't be stingy */ .nummax = 10000, /* XXX very large */ }, [NETMAP_RING_POOL] = { .name = "netmap_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "netmap_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .finalize = netmap_mem_global_finalize, .deref = netmap_mem_global_deref, }; // XXX logically belongs to nm_mem struct lut_entry *netmap_buffer_lut; /* exported */ /* blueprint for the private memory allocators */ static int netmap_mem_private_finalize(struct netmap_mem_d *nmd); static void netmap_mem_private_deref(struct netmap_mem_d *nmd); const struct netmap_mem_d nm_blueprint = { .pools = { [NETMAP_IF_POOL] = { .name = "%s_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 1, .nummax = 10, }, [NETMAP_RING_POOL] = { .name = "%s_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "%s_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .finalize = netmap_mem_private_finalize, .deref = netmap_mem_private_deref, }; /* memory allocator related sysctls */ #define STRINGIFY(x) #x #define DECLARE_SYSCTLS(id, name) \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s") SYSCTL_DECL(_dev_netmap); DECLARE_SYSCTLS(NETMAP_IF_POOL, if); DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); /* * First, find the allocator that contains the requested offset, * then locate the cluster through a lookup table. */ vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) { int i; vm_ooffset_t o = offset; vm_paddr_t pa; struct netmap_obj_pool *p; NMA_LOCK(nmd); p = nmd->pools; for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { if (offset >= p[i].memtotal) continue; // now lookup the cluster's address pa = p[i].lut[offset / p[i]._objsize].paddr + offset % p[i]._objsize; NMA_UNLOCK(nmd); return pa; } /* this is only in case of errors */ D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, p[NETMAP_IF_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal + p[NETMAP_BUF_POOL].memtotal); NMA_UNLOCK(nmd); return 0; // XXX bad address } u_int netmap_mem_get_totalsize(struct netmap_mem_d* nmd) { u_int size; NMA_LOCK(nmd); size = nmd->nm_totalsize; NMA_UNLOCK(nmd); return size; } /* * we store objects by kernel address, need to find the offset * within the pool to export the value to userspace. * Algorithm: scan until we find the cluster, then add the * actual offset in the cluster */ static ssize_t netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) { int i, k = p->_clustentries, n = p->objtotal; ssize_t ofs = 0; for (i = 0; i < n; i += k, ofs += p->_clustsize) { const char *base = p->lut[i].vaddr; ssize_t relofs = (const char *) vaddr - base; if (relofs < 0 || relofs >= p->_clustsize) continue; ofs = ofs + relofs; ND("%s: return offset %d (cluster %d) for pointer %p", p->name, ofs, i, vaddr); return ofs; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); return 0; /* An error occurred */ } /* Helper functions which convert virtual addresses to offsets */ #define netmap_if_offset(n, v) \ netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) #define netmap_buf_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ (n)->pools[NETMAP_RING_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v))) ssize_t netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr) { ssize_t v; NMA_LOCK(nmd); v = netmap_if_offset(nmd, addr); NMA_UNLOCK(nmd); return v; } /* * report the index, and use start position as a hint, * otherwise buffer allocation becomes terribly expensive. */ static void * netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) { uint32_t i = 0; /* index in the bitmap */ uint32_t mask, j; /* slot counter */ void *vaddr = NULL; if (len > p->_objsize) { D("%s request size %d too large", p->name, len); // XXX cannot reduce the size return NULL; } if (p->objfree == 0) { D("%s allocator: run out of memory", p->name); return NULL; } if (start) i = *start; /* termination is guaranteed by p->free, but better check bounds on i */ while (vaddr == NULL && i < p->bitmap_slots) { uint32_t cur = p->bitmap[i]; if (cur == 0) { /* bitmask is fully used */ i++; continue; } /* locate a slot */ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; p->bitmap[i] &= ~mask; /* mark object as in use */ p->objfree--; vaddr = p->lut[i * 32 + j].vaddr; if (index) *index = i * 32 + j; } ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); if (start) *start = i; return vaddr; } /* * free by index, not by address. This is slow, but is only used * for a small number of objects (rings, nifp) */ static void netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) { if (j >= p->objtotal) { D("invalid index %u, max %u", j, p->objtotal); return; } p->bitmap[j / 32] |= (1 << (j % 32)); p->objfree++; return; } static void netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) { u_int i, j, n = p->numclusters; for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { void *base = p->lut[i * p->_clustentries].vaddr; ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; /* Given address, is out of the scope of the current cluster.*/ if (vaddr < base || relofs >= p->_clustsize) continue; j = j + relofs / p->_objsize; /* KASSERT(j != 0, ("Cannot free object 0")); */ netmap_obj_free(p, j); return; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); } #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) #define netmap_buf_malloc(n, _pos, _index) \ netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index) /* Return the index associated to the given packet buffer */ #define netmap_buf_index(n, v) \ (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) /* Return nonzero on error */ static int netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_if *nifp, struct netmap_slot *slot, u_int n) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i = 0; /* slot counter */ uint32_t pos = 0; /* slot in p->bitmap */ uint32_t index = 0; /* buffer index */ (void)nifp; /* UNUSED */ for (i = 0; i < n; i++) { void *vaddr = netmap_buf_malloc(nmd, &pos, &index); if (vaddr == NULL) { D("unable to locate empty packet buffer"); goto cleanup; } slot[i].buf_idx = index; slot[i].len = p->_objsize; /* XXX setting flags=NS_BUF_CHANGED forces a pointer reload * in the NIC ring. This is a hack that hides missing * initializations in the drivers, and should go away. */ // slot[i].flags = NS_BUF_CHANGED; } ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); return (0); cleanup: while (i > 0) { i--; netmap_obj_free(p, slot[i].buf_idx); } bzero(slot, n * sizeof(slot[0])); return (ENOMEM); } static void netmap_free_buf(struct netmap_mem_d *nmd, struct netmap_if *nifp, uint32_t i) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; (void)nifp; if (i < 2 || i >= p->objtotal) { D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); return; } netmap_obj_free(p, i); } static void netmap_reset_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; if (p->bitmap) free(p->bitmap, M_NETMAP); p->bitmap = NULL; if (p->lut) { u_int i; size_t sz = p->_clustsize; for (i = 0; i < p->objtotal; i += p->_clustentries) { if (p->lut[i].vaddr) contigfree(p->lut[i].vaddr, sz, M_NETMAP); } bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); #ifdef linux vfree(p->lut); #else free(p->lut, M_NETMAP); #endif } p->lut = NULL; p->objtotal = 0; p->memtotal = 0; p->numclusters = 0; p->objfree = 0; } /* * Free all resources related to an allocator. */ static void netmap_destroy_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; netmap_reset_obj_allocator(p); } /* * We receive a request for objtotal objects, of size objsize each. * Internally we may round up both numbers, as we allocate objects * in small clusters multiple of the page size. * We need to keep track of objtotal and clustentries, * as they are needed when freeing memory. * * XXX note -- userspace needs the buffers to be contiguous, * so we cannot afford gaps at the end of a cluster. */ /* call with NMA_LOCK held */ static int netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) { int i; u_int clustsize; /* the cluster size, multiple of page size */ u_int clustentries; /* how many objects per entry */ /* we store the current request, so we can * detect configuration changes later */ p->r_objtotal = objtotal; p->r_objsize = objsize; #define MAX_CLUSTSIZE (1<<17) #define LINE_ROUND 64 if (objsize >= MAX_CLUSTSIZE) { /* we could do it but there is no point */ D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* make sure objsize is a multiple of LINE_ROUND */ i = (objsize & (LINE_ROUND - 1)); if (i) { D("XXX aligning object by %d bytes", LINE_ROUND - i); objsize += LINE_ROUND - i; } if (objsize < p->objminsize || objsize > p->objmaxsize) { D("requested objsize %d out of range [%d, %d]", objsize, p->objminsize, p->objmaxsize); return EINVAL; } if (objtotal < p->nummin || objtotal > p->nummax) { D("requested objtotal %d out of range [%d, %d]", objtotal, p->nummin, p->nummax); return EINVAL; } /* * Compute number of objects using a brute-force approach: * given a max cluster size, * we try to fill it with objects keeping track of the * wasted space to the next page boundary. */ for (clustentries = 0, i = 1;; i++) { u_int delta, used = i * objsize; if (used > MAX_CLUSTSIZE) break; delta = used % PAGE_SIZE; if (delta == 0) { // exact solution clustentries = i; break; } if (delta > ( (clustentries*objsize) % PAGE_SIZE) ) clustentries = i; } // D("XXX --- ouch, delta %d (bad for buffers)", delta); /* compute clustsize and round to the next page */ clustsize = clustentries * objsize; i = (clustsize & (PAGE_SIZE - 1)); if (i) clustsize += PAGE_SIZE - i; if (netmap_verbose) D("objsize %d clustsize %d objects %d", objsize, clustsize, clustentries); /* * The number of clusters is n = ceil(objtotal/clustentries) * objtotal' = n * clustentries */ p->_clustentries = clustentries; p->_clustsize = clustsize; p->_numclusters = (objtotal + clustentries - 1) / clustentries; /* actual values (may be larger than requested) */ p->_objsize = objsize; p->_objtotal = p->_numclusters * clustentries; return 0; } /* call with NMA_LOCK held */ static int netmap_finalize_obj_allocator(struct netmap_obj_pool *p) { int i; /* must be signed */ size_t n; /* optimistically assume we have enough memory */ p->numclusters = p->_numclusters; p->objtotal = p->_objtotal; n = sizeof(struct lut_entry) * p->objtotal; #ifdef linux p->lut = vmalloc(n); #else p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); #endif if (p->lut == NULL) { D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name); goto clean; } /* Allocate the bitmap */ n = (p->objtotal + 31) / 32; p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); if (p->bitmap == NULL) { D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, p->name); goto clean; } p->bitmap_slots = n; /* * Allocate clusters, init pointers and bitmap */ n = p->_clustsize; for (i = 0; i < (int)p->objtotal;) { int lim = i + p->_clustentries; char *clust; clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (clust == NULL) { /* * If we get here, there is a severe memory shortage, * so halve the allocated memory to reclaim some. */ D("Unable to create cluster at %d for '%s' allocator", i, p->name); if (i < 2) /* nothing to halve */ goto out; lim = i / 2; for (i--; i >= lim; i--) { p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); if (i % p->_clustentries == 0 && p->lut[i].vaddr) contigfree(p->lut[i].vaddr, n, M_NETMAP); } out: p->objtotal = i; /* we may have stopped in the middle of a cluster */ p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; break; } for (; i < lim; i++, clust += p->_objsize) { p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); p->lut[i].vaddr = clust; p->lut[i].paddr = vtophys(clust); } } p->objfree = p->objtotal; p->memtotal = p->numclusters * p->_clustsize; if (p->objfree == 0) goto clean; if (netmap_verbose) D("Pre-allocated %d clusters (%d/%dKB) for '%s'", p->numclusters, p->_clustsize >> 10, p->memtotal >> 10, p->name); return 0; clean: netmap_reset_obj_allocator(p); return ENOMEM; } /* call with lock held */ static int netmap_memory_config_changed(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { if (nmd->pools[i]._objsize != netmap_params[i].size || nmd->pools[i].objtotal != netmap_params[i].num) if (nmd->pools[i].r_objsize != netmap_params[i].size || nmd->pools[i].r_objtotal != netmap_params[i].num) return 1; } return 0; } static void netmap_mem_reset_all(struct netmap_mem_d *nmd) { int i; D("resetting %p", nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->finalized = 0; } static int netmap_mem_finalize_all(struct netmap_mem_d *nmd) { int i; if (nmd->finalized) return 0; nmd->lasterr = 0; nmd->nm_totalsize = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); if (nmd->lasterr) goto error; nmd->nm_totalsize += nmd->pools[i].memtotal; } /* buffers 0 and 1 are reserved */ nmd->pools[NETMAP_BUF_POOL].objfree -= 2; nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; nmd->finalized = 1; D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers", nmd->pools[NETMAP_IF_POOL].memtotal >> 10, nmd->pools[NETMAP_RING_POOL].memtotal >> 10, nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); return 0; error: netmap_mem_reset_all(nmd); return nmd->lasterr; } void netmap_mem_private_delete(struct netmap_mem_d *nmd) { if (nmd == NULL) return; D("deleting %p", nmd); if (nmd->refcount > 0) D("bug: deleting mem allocator with refcount=%d!", nmd->refcount); D("done deleting %p", nmd); NMA_LOCK_DESTROY(nmd); free(nmd, M_DEVBUF); } static int netmap_mem_private_finalize(struct netmap_mem_d *nmd) { int err; NMA_LOCK(nmd); nmd->refcount++; err = netmap_mem_finalize_all(nmd); NMA_UNLOCK(nmd); return err; } static void netmap_mem_private_deref(struct netmap_mem_d *nmd) { NMA_LOCK(nmd); if (--nmd->refcount <= 0) netmap_mem_reset_all(nmd); NMA_UNLOCK(nmd); } struct netmap_mem_d * netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd) { struct netmap_mem_d *d = NULL; struct netmap_obj_params p[NETMAP_POOLS_NR]; int i; u_int maxd; d = malloc(sizeof(struct netmap_mem_d), M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) return NULL; *d = nm_blueprint; /* XXX the rest of the code assumes the stack rings are alwasy present */ txr++; rxr++; p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); p[NETMAP_IF_POOL].num = 2; maxd = (txd > rxd) ? txd : rxd; p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; p[NETMAP_RING_POOL].num = txr + rxr; p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */ p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2); D("req if %d*%d ring %d*%d buf %d*%d", p[NETMAP_IF_POOL].num, p[NETMAP_IF_POOL].size, p[NETMAP_RING_POOL].num, p[NETMAP_RING_POOL].size, p[NETMAP_BUF_POOL].num, p[NETMAP_BUF_POOL].size); for (i = 0; i < NETMAP_POOLS_NR; i++) { snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, nm_blueprint.pools[i].name, name); if (netmap_config_obj_allocator(&d->pools[i], p[i].num, p[i].size)) goto error; } d->finalized = 0; NMA_LOCK_INIT(d); return d; error: netmap_mem_private_delete(d); return NULL; } /* call with lock held */ static int netmap_memory_config(struct netmap_mem_d *nmd) { int i; if (!netmap_memory_config_changed(nmd)) goto out; D("reconfiguring"); if (nmd->finalized) { /* reset previous allocation */ for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->finalized = 0; } for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], netmap_params[i].num, netmap_params[i].size); if (nmd->lasterr) goto out; } out: return nmd->lasterr; } static int netmap_mem_global_finalize(struct netmap_mem_d *nmd) { int err; NMA_LOCK(nmd); nmd->refcount++; if (nmd->refcount > 1) { ND("busy (refcount %d)", nmd->refcount); goto out; } /* update configuration if changed */ if (netmap_memory_config(nmd)) goto out; if (nmd->finalized) { /* may happen if config is not changed */ ND("nothing to do"); goto out; } if (netmap_mem_finalize_all(nmd)) goto out; /* backward compatibility */ netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize; netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal; netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut; netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr; nmd->lasterr = 0; out: if (nmd->lasterr) nmd->refcount--; err = nmd->lasterr; NMA_UNLOCK(nmd); return err; } int netmap_mem_init(void) { NMA_LOCK_INIT(&nm_mem); return (0); } void netmap_mem_fini(void) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_destroy_obj_allocator(&nm_mem.pools[i]); } NMA_LOCK_DESTROY(&nm_mem); } static void netmap_free_rings(struct netmap_adapter *na) { u_int i; if (!na->tx_rings) return; for (i = 0; i < na->num_tx_rings + 1; i++) { if (na->tx_rings[i].ring) { netmap_ring_free(na->nm_mem, na->tx_rings[i].ring); na->tx_rings[i].ring = NULL; } } for (i = 0; i < na->num_rx_rings + 1; i++) { if (na->rx_rings[i].ring) { netmap_ring_free(na->nm_mem, na->rx_rings[i].ring); na->rx_rings[i].ring = NULL; } } free(na->tx_rings, M_DEVBUF); na->tx_rings = na->rx_rings = NULL; } /* call with NMA_LOCK held */ /* * Allocate the per-fd structure netmap_if. * If this is the first instance, also allocate the krings, rings etc. * * We assume that the configuration stored in na * (number of tx/rx rings and descs) does not change while * the interface is in netmap mode. */ extern int nma_is_vp(struct netmap_adapter *na); void * netmap_mem_if_new(const char *ifname, struct netmap_adapter *na) { struct netmap_if *nifp; struct netmap_ring *ring; ssize_t base; /* handy for relative offsets between rings and nifp */ u_int i, len, ndesc, ntx, nrx; struct netmap_kring *kring; uint32_t *tx_leases = NULL, *rx_leases = NULL; /* * verify whether virtual port need the stack ring */ ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */ nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */ /* * the descriptor is followed inline by an array of offsets * to the tx and rx rings in the shared memory region. * For virtual rx rings we also allocate an array of * pointers to assign to nkr_leases. */ NMA_LOCK(na->nm_mem); len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); nifp = netmap_if_malloc(na->nm_mem, len); if (nifp == NULL) { NMA_UNLOCK(na->nm_mem); return NULL; } /* initialize base fields -- override const */ *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ); if (na->refcount) { /* already setup, we are done */ goto final; } len = (ntx + nrx) * sizeof(struct netmap_kring); /* * Leases are attached to TX rings on NIC/host ports, * and to RX rings on VALE ports. */ if (nma_is_vp(na)) { len += sizeof(uint32_t) * na->num_rx_desc * na->num_rx_rings; } else { len += sizeof(uint32_t) * na->num_tx_desc * ntx; } na->tx_rings = malloc((size_t)len, M_DEVBUF, M_NOWAIT | M_ZERO); if (na->tx_rings == NULL) { D("Cannot allocate krings for %s", ifname); goto cleanup; } na->rx_rings = na->tx_rings + ntx; if (nma_is_vp(na)) { rx_leases = (uint32_t *)(na->rx_rings + nrx); } else { tx_leases = (uint32_t *)(na->rx_rings + nrx); } /* * First instance, allocate netmap rings and buffers for this card * The rings are contiguous, but have variable size. */ for (i = 0; i < ntx; i++) { /* Transmit rings */ kring = &na->tx_rings[i]; ndesc = na->num_tx_desc; bzero(kring, sizeof(*kring)); len = sizeof(struct netmap_ring) + ndesc * sizeof(struct netmap_slot); ring = netmap_ring_malloc(na->nm_mem, len); if (ring == NULL) { D("Cannot allocate tx_ring[%d] for %s", i, ifname); goto cleanup; } ND("txring[%d] at %p ofs %d", i, ring); kring->na = na; kring->ring = ring; if (tx_leases) { kring->nkr_leases = tx_leases; tx_leases += ndesc; } *(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; *(ssize_t *)(uintptr_t)&ring->buf_ofs = (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - netmap_ring_offset(na->nm_mem, ring); /* * IMPORTANT: * Always keep one slot empty, so we can detect new * transmissions comparing cur and nr_hwcur (they are * the same only if there are no new transmissions). */ ring->avail = kring->nr_hwavail = ndesc - 1; ring->cur = kring->nr_hwcur = 0; *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BDG_BUF_SIZE(na->nm_mem); ND("initializing slots for txring[%d]", i); if (netmap_new_bufs(na->nm_mem, nifp, ring->slot, ndesc)) { D("Cannot allocate buffers for tx_ring[%d] for %s", i, ifname); goto cleanup; } } for (i = 0; i < nrx; i++) { /* Receive rings */ kring = &na->rx_rings[i]; ndesc = na->num_rx_desc; bzero(kring, sizeof(*kring)); len = sizeof(struct netmap_ring) + ndesc * sizeof(struct netmap_slot); ring = netmap_ring_malloc(na->nm_mem, len); if (ring == NULL) { D("Cannot allocate rx_ring[%d] for %s", i, ifname); goto cleanup; } ND("rxring[%d] at %p ofs %d", i, ring); kring->na = na; kring->ring = ring; if (rx_leases && i < na->num_rx_rings) { kring->nkr_leases = rx_leases; rx_leases += ndesc; } *(uint32_t *)(uintptr_t)&ring->num_slots = kring->nkr_num_slots = ndesc; *(ssize_t *)(uintptr_t)&ring->buf_ofs = (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - netmap_ring_offset(na->nm_mem, ring); ring->cur = kring->nr_hwcur = 0; ring->avail = kring->nr_hwavail = 0; /* empty */ *(int *)(uintptr_t)&ring->nr_buf_size = NETMAP_BDG_BUF_SIZE(na->nm_mem); ND("initializing slots for rxring[%d]", i); if (netmap_new_bufs(na->nm_mem, nifp, ring->slot, ndesc)) { D("Cannot allocate buffers for rx_ring[%d] for %s", i, ifname); goto cleanup; } } #ifdef linux // XXX initialize the selrecord structs. for (i = 0; i < ntx; i++) init_waitqueue_head(&na->tx_rings[i].si); for (i = 0; i < nrx; i++) init_waitqueue_head(&na->rx_rings[i].si); init_waitqueue_head(&na->tx_si); init_waitqueue_head(&na->rx_si); #endif final: /* * fill the slots for the rx and tx rings. They contain the offset * between the ring and nifp, so the information is usable in * userspace to reach the ring from the nifp. */ base = netmap_if_offset(na->nm_mem, nifp); for (i = 0; i < ntx; i++) { *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; } for (i = 0; i < nrx; i++) { *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; } NMA_UNLOCK(na->nm_mem); return (nifp); cleanup: netmap_free_rings(na); netmap_if_free(na->nm_mem, nifp); NMA_UNLOCK(na->nm_mem); return NULL; } void netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { if (nifp == NULL) /* nothing to do */ return; NMA_LOCK(na->nm_mem); if (na->refcount <= 0) { /* last instance, release bufs and rings */ u_int i, j, lim; struct netmap_ring *ring; for (i = 0; i < na->num_tx_rings + 1; i++) { ring = na->tx_rings[i].ring; lim = na->tx_rings[i].nkr_num_slots; for (j = 0; j < lim; j++) netmap_free_buf(na->nm_mem, nifp, ring->slot[j].buf_idx); } for (i = 0; i < na->num_rx_rings + 1; i++) { ring = na->rx_rings[i].ring; lim = na->rx_rings[i].nkr_num_slots; for (j = 0; j < lim; j++) netmap_free_buf(na->nm_mem, nifp, ring->slot[j].buf_idx); } netmap_free_rings(na); } netmap_if_free(na->nm_mem, nifp); NMA_UNLOCK(na->nm_mem); } static void netmap_mem_global_deref(struct netmap_mem_d *nmd) { NMA_LOCK(nmd); nmd->refcount--; if (netmap_verbose) D("refcount = %d", nmd->refcount); NMA_UNLOCK(nmd); } int netmap_mem_finalize(struct netmap_mem_d *nmd) { return nmd->finalize(nmd); } void netmap_mem_deref(struct netmap_mem_d *nmd) { return nmd->deref(nmd); } netmap-release/sys/dev/netmap/netmap_mem2.h000644 000765 000024 00000020150 12220335545 021470 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012-2013 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/netmap_mem2.c 234290 2012-04-14 16:44:18Z luigi $ * * (New) memory allocator for netmap */ /* * This allocator creates three memory pools: * nm_if_pool for the struct netmap_if * nm_ring_pool for the struct netmap_ring * nm_buf_pool for the packet buffers. * * that contain netmap objects. Each pool is made of a number of clusters, * multiple of a page size, each containing an integer number of objects. * The clusters are contiguous in user space but not in the kernel. * Only nm_buf_pool needs to be dma-able, * but for convenience use the same type of allocator for all. * * Once mapped, the three pools are exported to userspace * as a contiguous block, starting from nm_if_pool. Each * cluster (and pool) is an integral number of pages. * [ . . . ][ . . . . . .][ . . . . . . . . . .] * nm_if nm_ring nm_buf * * The userspace areas contain offsets of the objects in userspace. * When (at init time) we write these offsets, we find out the index * of the object, and from there locate the offset from the beginning * of the region. * * The invididual allocators manage a pool of memory for objects of * the same size. * The pool is split into smaller clusters, whose size is a * multiple of the page size. The cluster size is chosen * to minimize the waste for a given max cluster size * (we do it by brute force, as we have relatively few objects * per cluster). * * Objects are aligned to the cache line (64 bytes) rounding up object * sizes when needed. A bitmap contains the state of each object. * Allocation scans the bitmap; this is done only on attach, so we are not * too worried about performance * * For each allocator we can define (thorugh sysctl) the size and * number of each object. Memory is allocated at the first use of a * netmap file descriptor, and can be freed when all such descriptors * have been released (including unmapping the memory). * If memory is scarce, the system tries to get as much as possible * and the sysctl values reflect the actual allocation. * Together with desired values, the sysctl export also absolute * min and maximum values that cannot be overridden. * * struct netmap_if: * variable size, max 16 bytes per ring pair plus some fixed amount. * 1024 bytes should be large enough in practice. * * In the worst case we have one netmap_if per ring in the system. * * struct netmap_ring * variable size, 8 byte per slot plus some fixed amount. * Rings can be large (e.g. 4k slots, or >32Kbytes). * We default to 36 KB (9 pages), and a few hundred rings. * * struct netmap_buffer * The more the better, both because fast interfaces tend to have * many slots, and because we may want to use buffers to store * packets in userspace avoiding copies. * Must contain a full frame (eg 1518, or more for vlans, jumbo * frames etc.) plus be nicely aligned, plus some NICs restrict * the size to multiple of 1K or so. Default to 2K */ #ifndef _NET_NETMAP_MEM2_H_ #define _NET_NETMAP_MEM2_H_ #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ #define NETMAP_POOL_MAX_NAMSZ 32 enum { NETMAP_IF_POOL = 0, NETMAP_RING_POOL, NETMAP_BUF_POOL, NETMAP_POOLS_NR }; struct netmap_obj_params { u_int size; u_int num; }; struct netmap_obj_pool { char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ /* ---------------------------------------------------*/ /* these are only meaningful if the pool is finalized */ /* (see 'finalized' field in netmap_mem_d) */ u_int objtotal; /* actual total number of objects. */ u_int memtotal; /* actual total memory space */ u_int numclusters; /* actual number of clusters */ u_int objfree; /* number of free objects. */ struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ uint32_t *bitmap; /* one bit per buffer, 1 means free */ uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ /* ---------------------------------------------------*/ /* limits */ u_int objminsize; /* minimum object size */ u_int objmaxsize; /* maximum object size */ u_int nummin; /* minimum number of objects */ u_int nummax; /* maximum number of objects */ /* these are changed only by config */ u_int _objtotal; /* total number of objects */ u_int _objsize; /* object size */ u_int _clustsize; /* cluster size */ u_int _clustentries; /* objects per cluster */ u_int _numclusters; /* number of clusters */ /* requested values */ u_int r_objtotal; u_int r_objsize; }; #ifdef linux // XXX a mtx would suffice here 20130415 lr #define NMA_LOCK_T struct semaphore #else /* !linux */ #define NMA_LOCK_T struct mtx #endif /* linux */ typedef int (*netmap_mem_finalize_t)(struct netmap_mem_d*); typedef void (*netmap_mem_deref_t)(struct netmap_mem_d*); /* We implement two kinds of netmap_mem_d structures: * * - global: used by hardware NICS; * * - private: used by VALE ports. * * In both cases, the netmap_mem_d structure has the same lifetime as the * netmap_adapter of the corresponding NIC or port. It is the responsibility of * the client code to delete the private allocator when the associated * netmap_adapter is freed (this is implemented by the NAF_MEM_OWNER flag in * netmap.c). The 'refcount' field counts the number of active users of the * structure. The global allocator uses this information to prevent/allow * reconfiguration. The private allocators release all their memory when there * are no active users. By 'active user' we mean an existing netmap_priv * structure holding a reference to the allocator. */ struct netmap_mem_d { NMA_LOCK_T nm_mtx; /* protect the allocator */ u_int nm_totalsize; /* shorthand */ int finalized; /* !=0 iff preallocation done */ int lasterr; /* last error for curr config */ int refcount; /* existing priv structures */ /* the three allocators */ struct netmap_obj_pool pools[NETMAP_POOLS_NR]; netmap_mem_finalize_t finalize; netmap_mem_deref_t deref; }; extern struct netmap_mem_d nm_mem; vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d *nm_mem, vm_ooffset_t offset); int netmap_mem_finalize(struct netmap_mem_d *nm_mem); int netmap_mem_init(void); void netmap_mem_fini(void); void* netmap_mem_if_new(const char *ifname, struct netmap_adapter *na); void netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp); void netmap_mem_deref(struct netmap_mem_d *nm_mem); u_int netmap_mem_get_totalsize(struct netmap_mem_d *nm_mem); ssize_t netmap_mem_if_offset(struct netmap_mem_d *nm_mem, const void *vaddr); struct netmap_mem_d* netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd); void netmap_mem_private_delete(struct netmap_mem_d *nm_mem); #define NETMAP_BDG_BUF_SIZE(n) ((n)->pools[NETMAP_BUF_POOL]._objsize) #endif netmap-release/sys/dev/netmap/README000644 000765 000024 00000004147 12220335545 020003 0ustar00luigistaff000000 000000 NOTES on debugging netmap performance /* * debugging support to analyse syscall behaviour * netmap_drop is the point where to drop Path is: ./libthr/thread/thr_syscalls.c lib/libc/i386/SYS.h lib/libc/i386/sys/syscall.S head/sys/kern/syscall.master ; Processed to created init_sysent.c, syscalls.c and syscall.h. sys/kern/uipc_syscalls.c::sys_sendto() sendit() kern_sendit() sosend() sys/kern/uipc_socket.c::sosend() so->so_proto->pr_usrreqs->pru_sosend(...) sys/netinet/udp_usrreq.c::udp_usrreqs { } .pru_sosend = sosend_dgram, .pru_send = udp_send, .pru_soreceive = soreceive_dgram, sys/kern/uipc_socket.c::sosend_dgram() m_uiotombuf() (*so->so_proto->pr_usrreqs->pru_send) sys/netinet/udp_usrreq.c::udp_send() sotoinpcb(so); udp_output() INP_RLOCK(inp); INP_HASH_RLOCK(&V_udbinfo); fill udp and ip headers ip_output() 30 udp_send() before udp_output 31 udp_output before ip_output 32 udp_output beginning 33 before in_pcbbind_setup 34 after in_pcbbind_setup 35 before prison_remote_ip4 36 after prison_remote_ip4 37 before computing udp 20 beginning of sys_sendto 21 beginning of sendit 22 sendit after getsockaddr 23 before kern_sendit 24 kern_sendit before getsock_cap() 25 kern_sendit before sosend() 40 sosend_dgram beginning 41 sosend_dgram after sbspace 42 sosend_dgram after m_uiotombuf 43 sosend_dgram after SO_DONTROUTE 44 sosend_dgram after pru_send (useless) 50 ip_output beginning 51 ip_output after flowtable 52 ip_output at sendit 53 ip_output after pfil_hooked 54 ip_output at passout 55 ip_output before if_output 56 ip_output after rtalloc etc. 60 uiomove print 70 pfil.c:: pfil_run_hooks beginning 71 print number of pfil entries 80 ether_output start 81 ether_output after first switch 82 ether_output after M_PREPEND 83 ether_output after simloop 84 ether_output after carp and netgraph 85 ether_output_frame before if_transmit() 90 ixgbe_mq_start (if_transmit) beginning 91 ixgbe_mq_start_locked before ixgbe_xmit FLAGS: 1 disable ETHER_BPF_MTAP 2 disable drbr stats update 4 8 16 32 64 128 */ netmap-release/sys/dev/netmap/vale.4000644 000765 000024 00000016064 12220335545 020140 0ustar00luigistaff000000 000000 .\" Copyright (c) 2012-2013 Luigi Rizzo, Universita` di Pisa .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" This document is derived in part from the enet man page (enet.4) .\" distributed with 4.3BSD Unix. .\" .\" $FreeBSD: head/share/man/man4/vale.4 228017 2011-11-27 06:55:57Z gjb $ .\" .Dd July 27, 2012 .Dt VALE 4 .Os .Sh NAME .Nm vale .Nd a very fast Virtual Local Ethernet using the netmap API .Sh SYNOPSIS .Cd device netmap .Sh DESCRIPTION .Nm is a feature of the .Nm netmap module that implements multiple Virtual switches that can be used to interconnect netmap clients, including traffic sources and sinks, packet forwarders, userspace firewalls, and so on. .Pp .Nm is implemented completely in software, and is extremely fast. On a modern machine it can move almost 20 Million packets per second (Mpps) per core with small frames, and about 70 Gbit/s with 1500 byte frames. .Pp .Sh OPERATION .Nm dynamically creates switches and ports as client connect to it using the .Xr netmap 4 API. .Pp .Nm ports are named .Pa vale[bdg:][port] where .Pa vale is the prefix indicating a VALE switch rather than a standard interface, .Pa bdg indicates a specific switch (the colon is a separator), and .Pa port indicates a port within the switch. Bridge and ports names are arbitrary strings, the only constraint being that the full name must fit within 16 characters. .Pp .Nm ports can be physical network interfaces that support .Xr netmap 4 API by specifying the interface name for .Pa [port]. See .Nm OPERATION section in .Xr netmap 4 for details of the naming rule. .Pp Physical interfaces are attached using .Pa NIOCGREGIF command of .Pa ioctl(), and .Pa NETMAP_BDG_ATTACH at .Em nr_cmd field in .Em struct nmreq . The corresponding host stack can also be attached to the bridge, specifying .Pa NETMAP_BDG_HOST in .Em nr_arg1 . To detach the interface from the bridge, .Pa NETMAP_BDG_DETACH is used instead of NETMAP_BDG_ATTACH. The host stack is also detached from the bridge at the same time if it has been attached. .Pp Physical interfaces are treated as system configuration; they are kept being attached even after the configuring process dies, and detached by any process. .Pp Once a physical interface is attached, this interface is no longer available to be directly accessed by netmap clients (user processes) or to be attached by another bridge. On the other hand, when any netmap client holds the physical interface, this interface cannot be attached to a bridge. .Pp .Pa NETMAP_BDG_LIST subcommand in nr_cmd of .Em struct nmreq is used to obtain bridge and port information. There are two modes of how it works; If any .Em nr_name starting from non '\\0' is provided, .Pa ioctl() returning indicates the position of the named interface. This position is represented by an index of the bridge and the port, and put in .Em nr_arg1 and .Em nr_arg2 fields, respectively. If the named interface does not exist, .Pa ioctl() returns .Pa EINVAL . .Pp If .Em nr_name starting from '\\0' is provided, .Pa ioctl() returning indicates the first existing interface on and after the position specified in .Em nr_arg1 and .Em nr_arg2. If the caller specified a port index greater than the highest index of the ports, it is recognized as port index 0 of the next bridge ( .Em nr_arg1 + 1, .Em nr_arg2 = 0). .Pa ioctl() returns .Pa EINVAL if the given position is higher than that of any existing interface. On successful return of .Pa ioctl() , the interface name is also stored in .Em nr_name . .Pa NETMAP_BDG_LIST is always used with .Pa NIOCGINFO command of .Pa ioctl() .Pp Below is an example of printing all the existing ports walking through all the bridges. .Bd -literal -compact struct nmreq nmr; int fd = open("/dev/netmap", O_RDWR); bzero(&nmr, sizeof(nmr)); nmr.nr_version = NETMAP_API; nmr.nr_cmd = NETMAP_BDG_LIST; nmr.nr_arg1 = nmr.nr_arg2 = 0; /* start from bridge:0 port:0 */ for (; !ioctl(fd, NIOCGINFO, &nmr); nmr.nr_arg2++) { D("bridge:%d port:%d %s", nmr.nr_arg1, nmr.nr_arg2, nmr.nr_name); nmr.nr_name[0] = '\\0'; } .Ed .Pp See .Xr netmap 4 for details on the API. .Ss LIMITS .Nm currently supports up to 8 switches, 254 ports per switch, 1024 buffers per port. These hard limits will be changed to sysctl variables in future releases. .Pp Attaching the host stack to the bridge imposes significant performance degradation when many packets are forwarded to the host stack by either unicast or broadcast. This is because every single packet going to the host stack causes mbuf allocation in the same thread context as one forwarding packets. .Pp .Sh SYSCTL VARIABLES .Nm uses the following sysctl variables to control operation: .Bl -tag -width 12 .It dev.netmap.bridge The maximum number of packets processed internally in each iteration. Defaults to 1024, use lower values to trade latency with throughput. .Pp .It dev.netmap.verbose Set to non-zero values to enable in-kernel diagnostics. .El .Pp .Sh EXAMPLES Create one switch, with a traffic generator connected to one port, and a netmap-enabled tcpdump instance on another port: .Bd -literal -offset indent tcpdump -ni vale-a:1 & pkt-gen -i vale-a:0 -f tx & .Ed .Pp Create two switches, each connected to two qemu machines on different ports. .Bd -literal -offset indent qemu -net nic -net netmap,ifname=vale-1:a ... & qemu -net nic -net netmap,ifname=vale-1:b ... & qemu -net nic -net netmap,ifname=vale-2:c ... & qemu -net nic -net netmap,ifname=vale-2:d ... & .Ed .Sh SEE ALSO .Xr netmap 4 .Pp .Xr http://info.iet.unipi.it/~luigi/vale/ .Pp Luigi Rizzo, Giuseppe Lettieri: VALE, a switched ethernet for virtual machines, June 2012, http://info.iet.unipi.it/~luigi/vale/ .Sh AUTHORS .An -nosplit The .Nm switch has been designed and implemented in 2012 by .An Luigi Rizzo and .An Giuseppe Lettieri at the Universita` di Pisa. .Pp .Nm has been funded by the European Commission within FP7 Projects CHANGE (257422) and OPENLAB (287581). netmap-release/sys/modules/000755 000765 000024 00000000000 12220335545 016523 5ustar00luigistaff000000 000000 netmap-release/sys/modules/netmap/000755 000765 000024 00000000000 12220335545 020007 5ustar00luigistaff000000 000000 netmap-release/sys/modules/netmap/Makefile000644 000765 000024 00000000503 12220335545 021445 0ustar00luigistaff000000 000000 # $FreeBSD$ # # Compile netmap as a module, useful if you want a netmap bridge # or loadable drivers. .PATH: ${.CURDIR}/../../dev/netmap .PATH.h: ${.CURDIR}/../../net KMOD = netmap SRCS = device_if.h bus_if.h opt_netmap.h SRCS += netmap.c netmap.h netmap_kern.h SRCS += netmap_mem2.c netmap_mem2.h .include netmap-release/examples/000755 000765 000024 00000000000 12230345061 016046 5ustar00luigistaff000000 000000 netmap-release/examples/bridge.c000644 000765 000024 00000015650 12220335545 017462 0ustar00luigistaff000000 000000 /* * (C) 2011 Luigi Rizzo, Matteo Landi * * BSD license * * A netmap client to bridge two network interfaces * (or one interface and the host stack). * * $FreeBSD: head/tools/tools/netmap/bridge.c 228975 2011-12-30 00:04:11Z uqs $ */ #include "nm_util.h" int verbose = 0; char *version = "$Id$"; static int do_abort = 0; static void sigint_h(int sig) { (void)sig; /* UNUSED */ do_abort = 1; signal(SIGINT, SIG_DFL); } /* * move up to 'limit' pkts from rxring to txring swapping buffers. */ static int process_rings(struct netmap_ring *rxring, struct netmap_ring *txring, u_int limit, const char *msg) { u_int j, k, m = 0; /* print a warning if any of the ring flags is set (e.g. NM_REINIT) */ if (rxring->flags || txring->flags) D("%s rxflags %x txflags %x", msg, rxring->flags, txring->flags); j = rxring->cur; /* RX */ k = txring->cur; /* TX */ if (rxring->avail < limit) limit = rxring->avail; if (txring->avail < limit) limit = txring->avail; m = limit; while (limit-- > 0) { struct netmap_slot *rs = &rxring->slot[j]; struct netmap_slot *ts = &txring->slot[k]; #ifdef NO_SWAP char *rxbuf = NETMAP_BUF(rxring, rs->buf_idx); char *txbuf = NETMAP_BUF(txring, ts->buf_idx); #else uint32_t pkt; #endif /* swap packets */ if (ts->buf_idx < 2 || rs->buf_idx < 2) { D("wrong index rx[%d] = %d -> tx[%d] = %d", j, rs->buf_idx, k, ts->buf_idx); sleep(2); } #ifndef NO_SWAP pkt = ts->buf_idx; ts->buf_idx = rs->buf_idx; rs->buf_idx = pkt; #endif /* copy the packet length. */ if (rs->len < 14 || rs->len > 2048) D("wrong len %d rx[%d] -> tx[%d]", rs->len, j, k); else if (verbose > 1) D("%s send len %d rx[%d] -> tx[%d]", msg, rs->len, j, k); ts->len = rs->len; #ifdef NO_SWAP pkt_copy(rxbuf, txbuf, ts->len); #else /* report the buffer change. */ ts->flags |= NS_BUF_CHANGED; rs->flags |= NS_BUF_CHANGED; #endif /* NO_SWAP */ j = NETMAP_RING_NEXT(rxring, j); k = NETMAP_RING_NEXT(txring, k); } rxring->avail -= m; txring->avail -= m; rxring->cur = j; txring->cur = k; if (verbose && m > 0) D("%s sent %d packets to %p", msg, m, txring); return (m); } /* move packts from src to destination */ static int move(struct my_ring *src, struct my_ring *dst, u_int limit) { struct netmap_ring *txring, *rxring; u_int m = 0, si = src->begin, di = dst->begin; const char *msg = (src->queueid & NETMAP_SW_RING) ? "host->net" : "net->host"; while (si < src->end && di < dst->end) { rxring = NETMAP_RXRING(src->nifp, si); txring = NETMAP_TXRING(dst->nifp, di); ND("txring %p rxring %p", txring, rxring); if (rxring->avail == 0) { si++; continue; } if (txring->avail == 0) { di++; continue; } m += process_rings(rxring, txring, limit, msg); } return (m); } /* * how many packets on this set of queues ? */ static int pkt_queued(struct my_ring *me, int tx) { u_int i, tot = 0; ND("me %p begin %d end %d", me, me->begin, me->end); for (i = me->begin; i < me->end; i++) { struct netmap_ring *ring = tx ? NETMAP_TXRING(me->nifp, i) : NETMAP_RXRING(me->nifp, i); tot += ring->avail; } if (0 && verbose && tot && !tx) D("ring %s %s %s has %d avail at %d", me->ifname, tx ? "tx": "rx", me->end >= me->nifp->ni_tx_rings ? // XXX who comes first ? "host":"net", tot, NETMAP_TXRING(me->nifp, me->begin)->cur); return tot; } static void usage(void) { fprintf(stderr, "usage: bridge [-v] [-i ifa] [-i ifb] [-b burst] [-w wait_time] [iface]\n"); exit(1); } /* * bridge [-v] if1 [if2] * * If only one name, or the two interfaces are the same, * bridges userland and the adapter. Otherwise bridge * two intefaces. */ int main(int argc, char **argv) { struct pollfd pollfd[2]; int i, ch; u_int burst = 1024, wait_link = 4; struct my_ring me[2]; char *ifa = NULL, *ifb = NULL; fprintf(stderr, "%s %s built %s %s\n", argv[0], version, __DATE__, __TIME__); bzero(me, sizeof(me)); while ( (ch = getopt(argc, argv, "b:i:vw:")) != -1) { switch (ch) { default: D("bad option %c %s", ch, optarg); usage(); break; case 'b': /* burst */ burst = atoi(optarg); break; case 'i': /* interface */ if (ifa == NULL) ifa = optarg; else if (ifb == NULL) ifb = optarg; else D("%s ignored, already have 2 interfaces", optarg); break; case 'v': verbose++; break; case 'w': wait_link = atoi(optarg); break; } } argc -= optind; argv += optind; if (argc > 1) ifa = argv[1]; if (argc > 2) ifb = argv[2]; if (argc > 3) burst = atoi(argv[3]); if (!ifb) ifb = ifa; if (!ifa) { D("missing interface"); usage(); } if (burst < 1 || burst > 8192) { D("invalid burst %d, set to 1024", burst); burst = 1024; } if (wait_link > 100) { D("invalid wait_link %d, set to 4", wait_link); wait_link = 4; } /* setup netmap interface #1. */ me[0].ifname = ifa; me[1].ifname = ifb; if (!strcmp(ifa, ifb)) { D("same interface, endpoint 0 goes to host"); i = NETMAP_SW_RING; } else { /* two different interfaces. Take all rings on if1 */ i = 0; // all hw rings } if (netmap_open(me, i, 1)) return (1); me[1].mem = me[0].mem; /* copy the pointer, so only one mmap */ if (netmap_open(me+1, 0, 1)) return (1); /* setup poll(2) variables. */ memset(pollfd, 0, sizeof(pollfd)); for (i = 0; i < 2; i++) { pollfd[i].fd = me[i].fd; pollfd[i].events = (POLLIN); } D("Wait %d secs for link to come up...", wait_link); sleep(wait_link); D("Ready to go, %s 0x%x/%d <-> %s 0x%x/%d.", me[0].ifname, me[0].queueid, me[0].nifp->ni_rx_rings, me[1].ifname, me[1].queueid, me[1].nifp->ni_rx_rings); /* main loop */ signal(SIGINT, sigint_h); while (!do_abort) { int n0, n1, ret; pollfd[0].events = pollfd[1].events = 0; pollfd[0].revents = pollfd[1].revents = 0; n0 = pkt_queued(me, 0); n1 = pkt_queued(me + 1, 0); if (n0) pollfd[1].events |= POLLOUT; else pollfd[0].events |= POLLIN; if (n1) pollfd[0].events |= POLLOUT; else pollfd[1].events |= POLLIN; ret = poll(pollfd, 2, 2500); if (ret <= 0 || verbose) D("poll %s [0] ev %x %x rx %d@%d tx %d," " [1] ev %x %x rx %d@%d tx %d", ret <= 0 ? "timeout" : "ok", pollfd[0].events, pollfd[0].revents, pkt_queued(me, 0), me[0].rx->cur, pkt_queued(me, 1), pollfd[1].events, pollfd[1].revents, pkt_queued(me+1, 0), me[1].rx->cur, pkt_queued(me+1, 1) ); if (ret < 0) continue; if (pollfd[0].revents & POLLERR) { D("error on fd0, rxcur %d@%d", me[0].rx->avail, me[0].rx->cur); } if (pollfd[1].revents & POLLERR) { D("error on fd1, rxcur %d@%d", me[1].rx->avail, me[1].rx->cur); } if (pollfd[0].revents & POLLOUT) { move(me + 1, me, burst); // XXX we don't need the ioctl */ // ioctl(me[0].fd, NIOCTXSYNC, NULL); } if (pollfd[1].revents & POLLOUT) { move(me, me + 1, burst); // XXX we don't need the ioctl */ // ioctl(me[1].fd, NIOCTXSYNC, NULL); } } D("exiting"); netmap_close(me + 1); netmap_close(me + 0); return (0); } netmap-release/examples/click-test.cfg000644 000765 000024 00000000644 12220335545 020602 0ustar00luigistaff000000 000000 // // $FreeBSD: head/tools/tools/netmap/click-test.cfg 227614 2011-11-17 12:17:39Z luigi $ // // A sample test configuration for click // // // create a switch myswitch :: EtherSwitch; // two input devices c0 :: FromDevice(ix0, PROMISC true); c1 :: FromDevice(ix1, PROMISC true); // and now pass packets around c0[0] -> [0]sw[0] -> Queue(10000) -> ToDevice(ix0); c1[0] -> [1]sw[1] -> Queue(10000) -> ToDevice(ix1); netmap-release/examples/Makefile000644 000765 000024 00000002354 12220335545 017517 0ustar00luigistaff000000 000000 # For multiple programs using a single source file each, # we can just define 'progs' and create custom targets. PROGS = pkt-gen bridge testpcap libnetmap.so #PROGS += pingd PROGS += testlock testcsum test_select kern_test testmmap vale-ctl CLEANFILES = $(PROGS) pcap.o nm_util.o *.o NO_MAN= CFLAGS = -O2 -pipe CFLAGS += -Werror -Wall CFLAGS += -I ../sys # -I/home/luigi/FreeBSD/head/sys -I../sys CFLAGS += -Wextra CFLAGS += -DNO_PCAP LDFLAGS += -lpthread #LDFLAGS += -lpcap LDFLAGS += -lrt # needed on linux, does not harm on BSD #SRCS = pkt-gen.c all: $(PROGS) testpcap: pcap.c libnetmap.so nm_util.o $(CC) $(CFLAGS) -DTEST -o testpcap pcap.c nm_util.o $(LDFLAGS) -lpcap kern_test: testmod/kern_test.c nm_util.o pkt-gen.o bridge.o libnetmap.so pcap.o: nm_util.h pkt-gen: pkt-gen.o nm_util.o $(CC) $(CFLAGS) -o pkt-gen pkt-gen.o nm_util.o $(LDFLAGS) bridge: bridge.o nm_util.o $(CC) $(CFLAGS) -o bridge bridge.o nm_util.o vale-ctl: vale-ctl.o $(CC) $(CFLAGS) -o vale-ctl vale-ctl.o libnetmap.so: pcap.c nm_util.c $(CC) $(CFLAGS) -fpic -c pcap.c $(CC) $(CFLAGS) -fpic -c nm_util.c $(CC) -shared -o libnetmap.so pcap.o nm_util.o clean: -@rm -rf $(CLEANFILES) testlock: testlock.c $(CC) $(CFLAGS) -o testlock testlock.c -lpthread $(LDFLAGS) netmap-release/examples/nm_util.c000644 000765 000024 00000014375 12220335545 017700 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD$ * $Id$ * * utilities to use netmap devices. * This does the basic functions of opening a device and issuing * ioctls() */ #include "nm_util.h" extern int verbose; int nm_do_ioctl(struct my_ring *me, u_long what, int subcmd) { struct ifreq ifr; int error; #if defined( __FreeBSD__ ) || defined (__APPLE__) int fd = me->fd; #endif #ifdef linux struct ethtool_value eval; int fd; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) { printf("Error: cannot get device control socket.\n"); return -1; } #endif /* linux */ (void)subcmd; // unused bzero(&ifr, sizeof(ifr)); strncpy(ifr.ifr_name, me->ifname, sizeof(ifr.ifr_name)); switch (what) { case SIOCSIFFLAGS: #ifndef __APPLE__ ifr.ifr_flagshigh = me->if_flags >> 16; #endif ifr.ifr_flags = me->if_flags & 0xffff; break; #if defined( __FreeBSD__ ) case SIOCSIFCAP: ifr.ifr_reqcap = me->if_reqcap; ifr.ifr_curcap = me->if_curcap; break; #endif #ifdef linux case SIOCETHTOOL: eval.cmd = subcmd; eval.data = 0; ifr.ifr_data = (caddr_t)&eval; break; #endif /* linux */ } error = ioctl(fd, what, &ifr); if (error) goto done; switch (what) { case SIOCGIFFLAGS: #ifndef __APPLE__ me->if_flags = (ifr.ifr_flagshigh << 16) | (0xffff & ifr.ifr_flags); #endif if (verbose) D("flags are 0x%x", me->if_flags); break; #if defined( __FreeBSD__ ) case SIOCGIFCAP: me->if_reqcap = ifr.ifr_reqcap; me->if_curcap = ifr.ifr_curcap; if (verbose) D("curcap are 0x%x", me->if_curcap); break; #endif /* __FreeBSD__ */ } done: #ifdef linux close(fd); #endif if (error) D("ioctl error %d %lu", error, what); return error; } /* * open a device. if me->mem is null then do an mmap. * Returns the file descriptor. * The extra flag checks configures promisc mode. */ int netmap_open(struct my_ring *me, int ringid, int promisc) { int fd, err, l; struct nmreq req; me->fd = fd = open("/dev/netmap", O_RDWR); if (fd < 0) { D("Unable to open /dev/netmap"); return (-1); } bzero(&req, sizeof(req)); req.nr_version = NETMAP_API; strncpy(req.nr_name, me->ifname, sizeof(req.nr_name)); req.nr_ringid = ringid; err = ioctl(fd, NIOCGINFO, &req); if (err) { D("cannot get info on %s, errno %d ver %d", me->ifname, errno, req.nr_version); goto error; } me->memsize = l = req.nr_memsize; if (verbose) D("memsize is %d MB", l>>20); err = ioctl(fd, NIOCREGIF, &req); if (err) { D("Unable to register %s", me->ifname); goto error; } if (me->mem == NULL) { me->mem = mmap(0, l, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0); if (me->mem == MAP_FAILED) { D("Unable to mmap"); me->mem = NULL; goto error; } } /* Set the operating mode. */ if (ringid != NETMAP_SW_RING) { nm_do_ioctl(me, SIOCGIFFLAGS, 0); if ((me[0].if_flags & IFF_UP) == 0) { D("%s is down, bringing up...", me[0].ifname); me[0].if_flags |= IFF_UP; } if (promisc) { me[0].if_flags |= IFF_PPROMISC; nm_do_ioctl(me, SIOCSIFFLAGS, 0); } #ifdef __FreeBSD__ /* also disable checksums etc. */ nm_do_ioctl(me, SIOCGIFCAP, 0); me[0].if_reqcap = me[0].if_curcap; me[0].if_reqcap &= ~(IFCAP_HWCSUM | IFCAP_TSO | IFCAP_TOE); nm_do_ioctl(me+0, SIOCSIFCAP, 0); #endif #ifdef linux /* disable: * - generic-segmentation-offload * - tcp-segmentation-offload * - rx-checksumming * - tx-checksumming * XXX check how to set back the caps. */ nm_do_ioctl(me, SIOCETHTOOL, ETHTOOL_SGSO); nm_do_ioctl(me, SIOCETHTOOL, ETHTOOL_STSO); nm_do_ioctl(me, SIOCETHTOOL, ETHTOOL_SRXCSUM); nm_do_ioctl(me, SIOCETHTOOL, ETHTOOL_STXCSUM); #endif /* linux */ } me->nifp = NETMAP_IF(me->mem, req.nr_offset); me->queueid = ringid; if (ringid & NETMAP_SW_RING) { me->begin = req.nr_rx_rings; me->end = me->begin + 1; me->tx = NETMAP_TXRING(me->nifp, req.nr_tx_rings); me->rx = NETMAP_RXRING(me->nifp, req.nr_rx_rings); } else if (ringid & NETMAP_HW_RING) { D("XXX check multiple threads"); me->begin = ringid & NETMAP_RING_MASK; me->end = me->begin + 1; me->tx = NETMAP_TXRING(me->nifp, me->begin); me->rx = NETMAP_RXRING(me->nifp, me->begin); } else { me->begin = 0; me->end = req.nr_rx_rings; // XXX max of the two me->tx = NETMAP_TXRING(me->nifp, 0); me->rx = NETMAP_RXRING(me->nifp, 0); } return (0); error: close(me->fd); return -1; } int netmap_close(struct my_ring *me) { D(""); if (me->mem) munmap(me->mem, me->memsize); close(me->fd); return (0); } /* * how many packets on this set of queues ? */ int pkt_queued(struct my_ring *me, int tx) { u_int i, tot = 0; ND("me %p begin %d end %d", me, me->begin, me->end); for (i = me->begin; i < me->end; i++) { struct netmap_ring *ring = tx ? NETMAP_TXRING(me->nifp, i) : NETMAP_RXRING(me->nifp, i); tot += ring->avail; } if (0 && verbose && tot && !tx) D("ring %s %s %s has %d avail at %d", me->ifname, tx ? "tx": "rx", me->end >= me->nifp->ni_tx_rings ? // XXX who comes first ? "host":"net", tot, NETMAP_TXRING(me->nifp, me->begin)->cur); return tot; } netmap-release/examples/nm_util.h000644 000765 000024 00000012127 12220335545 017676 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id$ * * Some utilities to build netmap-based programs. */ #ifndef _NM_UTIL_H #define _NM_UTIL_H #include #include /* signal */ #include #include #include /* PRI* macros */ #include /* strcmp */ #include /* open */ #include /* close */ #include /* getifaddrs */ #include /* PROT_* */ #include /* ioctl */ #include #include /* sockaddr.. */ #include /* ntohs */ #include #include /* sysctl */ #include /* timersub */ #include #include /* ifreq */ #include #include #include #include #include #ifndef MY_PCAP /* use the system's pcap if available */ #ifdef NO_PCAP #define PCAP_ERRBUF_SIZE 512 typedef void pcap_t; struct pcap_pkthdr; #define pcap_inject(a,b,c) ((void)a, (void)b, (void)c, -1) #define pcap_dispatch(a, b, c, d) (void)c #define pcap_open_live(a, b, c, d, e) ((void)e, NULL) #else /* !NO_PCAP */ #include // XXX do we need it ? #endif /* !NO_PCAP */ #endif // XXX hack #include /* pthread_* */ #ifdef linux #define ifr_flagshigh ifr_flags #define ifr_curcap ifr_flags #define ifr_reqcap ifr_flags #define IFF_PPROMISC IFF_PROMISC #include #include #define CLOCK_REALTIME_PRECISE CLOCK_REALTIME #include /* ether_aton */ #include /* sockaddr_ll */ #endif /* linux */ #ifdef __FreeBSD__ #include /* le64toh */ #include #include /* pthread w/ affinity */ #include /* cpu_set */ #include /* LLADDR */ #endif /* __FreeBSD__ */ #ifdef __APPLE__ #define ifr_flagshigh ifr_flags // XXX #define IFF_PPROMISC IFF_PROMISC #include /* LLADDR */ #define clock_gettime(a,b) \ do {struct timespec t0 = {0,0}; *(b) = t0; } while (0) #endif /* __APPLE__ */ static inline int min(int a, int b) { return a < b ? a : b; } extern int time_second; /* debug support */ #define ND(format, ...) do {} while(0) #define D(format, ...) \ fprintf(stderr, "%s [%d] " format "\n", \ __FUNCTION__, __LINE__, ##__VA_ARGS__) #define RD(lps, format, ...) \ do { \ static int t0, cnt; \ if (t0 != time_second) { \ t0 = time_second; \ cnt = 0; \ } \ if (cnt++ < lps) \ D(format, ##__VA_ARGS__); \ } while (0) // XXX does it work on 32-bit machines ? static inline void prefetch (const void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(const unsigned long *)x)); } // XXX only for multiples of 64 bytes, non overlapped. static inline void pkt_copy(const void *_src, void *_dst, int l) { const uint64_t *src = _src; uint64_t *dst = _dst; #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) if (unlikely(l >= 1024)) { bcopy(src, dst, l); return; } for (; l > 0; l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } /* * info on a ring we handle */ struct my_ring { const char *ifname; int fd; char *mem; /* userspace mmap address */ u_int memsize; u_int queueid; u_int begin, end; /* first..last+1 rings to check */ struct netmap_if *nifp; struct netmap_ring *tx, *rx; /* shortcuts */ uint32_t if_flags; uint32_t if_reqcap; uint32_t if_curcap; }; int netmap_open(struct my_ring *me, int ringid, int promisc); int netmap_close(struct my_ring *me); int nm_do_ioctl(struct my_ring *me, u_long what, int subcmd); #endif /* _NM_UTIL_H */ netmap-release/examples/pcap.c000644 000765 000024 00000032363 12220335545 017151 0ustar00luigistaff000000 000000 /* * (C) 2011-2012 Luigi Rizzo * * BSD license * * A simple library that maps some pcap functions onto netmap * This is not 100% complete but enough to let tcpdump, trafshow * and other apps work. * * $FreeBSD: head/tools/tools/netmap/pcap.c 227614 2011-11-17 12:17:39Z luigi $ */ #define MY_PCAP #include "nm_util.h" char *version = "$Id$"; int verbose = 0; /* * We redefine here a number of structures that are in pcap.h * so we can compile this file without the system header. */ #ifndef PCAP_ERRBUF_SIZE #define PCAP_ERRBUF_SIZE 128 /* * Each packet is accompanied by a header including the timestamp, * captured size and actual size. */ struct pcap_pkthdr { struct timeval ts; /* time stamp */ uint32_t caplen; /* length of portion present */ uint32_t len; /* length this packet (off wire) */ }; typedef struct pcap_if pcap_if_t; /* * Representation of an interface address. */ struct pcap_addr { struct pcap_addr *next; struct sockaddr *addr; /* address */ struct sockaddr *netmask; /* netmask for the above */ struct sockaddr *broadaddr; /* broadcast addr for the above */ struct sockaddr *dstaddr; /* P2P dest. address for the above */ }; struct pcap_if { struct pcap_if *next; char *name; /* name to hand to "pcap_open_live()" */ char *description; /* textual description of interface, or NULL */ struct pcap_addr *addresses; uint32_t flags; /* PCAP_IF_ interface flags */ }; /* * We do not support stats (yet) */ struct pcap_stat { u_int ps_recv; /* number of packets received */ u_int ps_drop; /* number of packets dropped */ u_int ps_ifdrop; /* drops by interface XXX not yet supported */ #ifdef WIN32 u_int bs_capt; /* number of packets that reach the app. */ #endif /* WIN32 */ }; typedef void pcap_t; typedef enum { PCAP_D_INOUT = 0, PCAP_D_IN, PCAP_D_OUT } pcap_direction_t; typedef void (*pcap_handler)(u_char *user, const struct pcap_pkthdr *h, const u_char *bytes); char errbuf[PCAP_ERRBUF_SIZE]; pcap_t *pcap_open_live(const char *device, int snaplen, int promisc, int to_ms, char *errbuf); int pcap_findalldevs(pcap_if_t **alldevsp, char *errbuf); void pcap_close(pcap_t *p); int pcap_get_selectable_fd(pcap_t *p); int pcap_dispatch(pcap_t *p, int cnt, pcap_handler callback, u_char *user); int pcap_setnonblock(pcap_t *p, int nonblock, char *errbuf); int pcap_setdirection(pcap_t *p, pcap_direction_t d); char *pcap_lookupdev(char *errbuf); int pcap_inject(pcap_t *p, const void *buf, size_t size); int pcap_fileno(pcap_t *p); const char *pcap_lib_version(void); struct eproto { const char *s; u_short p; }; #endif /* !PCAP_ERRBUF_SIZE */ #ifndef TEST /* * build as a shared library */ char pcap_version[] = "libnetmap version 0.3"; /* * Our equivalent of pcap_t */ struct pcap_ring { struct my_ring me; #if 0 const char *ifname; //struct nmreq nmr; int fd; char *mem; /* userspace mmap address */ u_int memsize; u_int queueid; u_int begin, end; /* first..last+1 rings to check */ struct netmap_if *nifp; uint32_t if_flags; uint32_t if_reqcap; uint32_t if_curcap; #endif int snaplen; char *errbuf; int promisc; int to_ms; struct pcap_pkthdr hdr; struct pcap_stat st; char msg[PCAP_ERRBUF_SIZE]; }; /* * There is a set of functions that tcpdump expects even if probably * not used */ struct eproto eproto_db[] = { { "ip", ETHERTYPE_IP }, { "arp", ETHERTYPE_ARP }, { (char *)0, 0 } }; const char *pcap_lib_version(void) { return pcap_version; } int pcap_findalldevs(pcap_if_t **alldevsp, char *errbuf) { pcap_if_t *top = NULL; #ifndef linux struct ifaddrs *i_head, *i; pcap_if_t *cur; struct pcap_addr *tail = NULL; int l; D("listing all devs"); *alldevsp = NULL; i_head = NULL; if (getifaddrs(&i_head)) { D("cannot get if addresses"); return -1; } for (i = i_head; i; i = i->ifa_next) { //struct ifaddrs *ifa; struct pcap_addr *pca; //struct sockaddr *sa; D("got interface %s", i->ifa_name); if (!top || strcmp(top->name, i->ifa_name)) { /* new interface */ l = sizeof(*top) + strlen(i->ifa_name) + 1; cur = calloc(1, l); if (cur == NULL) { D("no space for if descriptor"); continue; } cur->name = (char *)(cur + 1); //cur->flags = i->ifa_flags; strcpy(cur->name, i->ifa_name); cur->description = NULL; cur->next = top; top = cur; tail = NULL; } /* now deal with addresses */ D("%s addr family %d len %d %s %s", top->name, i->ifa_addr->sa_family, i->ifa_addr->sa_len, i->ifa_netmask ? "Netmask" : "", i->ifa_broadaddr ? "Broadcast" : ""); l = sizeof(struct pcap_addr) + (i->ifa_addr ? i->ifa_addr->sa_len:0) + (i->ifa_netmask ? i->ifa_netmask->sa_len:0) + (i->ifa_broadaddr? i->ifa_broadaddr->sa_len:0); pca = calloc(1, l); if (pca == NULL) { D("no space for if addr"); continue; } #define SA_NEXT(x) ((struct sockaddr *)((char *)(x) + (x)->sa_len)) pca->addr = (struct sockaddr *)(pca + 1); pkt_copy(i->ifa_addr, pca->addr, i->ifa_addr->sa_len); if (i->ifa_netmask) { pca->netmask = SA_NEXT(pca->addr); bcopy(i->ifa_netmask, pca->netmask, i->ifa_netmask->sa_len); if (i->ifa_broadaddr) { pca->broadaddr = SA_NEXT(pca->netmask); bcopy(i->ifa_broadaddr, pca->broadaddr, i->ifa_broadaddr->sa_len); } } if (tail == NULL) { top->addresses = pca; } else { tail->next = pca; } tail = pca; } freeifaddrs(i_head); #endif /* !linux */ (void)errbuf; /* UNUSED */ *alldevsp = top; return 0; } void pcap_freealldevs(pcap_if_t *alldevs) { (void)alldevs; /* UNUSED */ D("unimplemented"); } char * pcap_lookupdev(char *buf) { D("%s", buf); strcpy(buf, "/dev/netmap"); return buf; } pcap_t * pcap_create(const char *source, char *errbuf) { D("src %s (call open liveted)", source); return pcap_open_live(source, 0, 1, 100, errbuf); } int pcap_activate(pcap_t *p) { D("pcap %p running", p); return 0; } int pcap_can_set_rfmon(pcap_t *p) { (void)p; /* UNUSED */ D(""); return 0; /* no we can't */ } int pcap_set_snaplen(pcap_t *p, int snaplen) { struct pcap_ring *me = p; D("len %d", snaplen); me->snaplen = snaplen; return 0; } int pcap_snapshot(pcap_t *p) { struct pcap_ring *me = p; D("len %d", me->snaplen); return me->snaplen; } int pcap_lookupnet(const char *device, uint32_t *netp, uint32_t *maskp, char *errbuf) { (void)errbuf; /* UNUSED */ D("device %s", device); inet_aton("10.0.0.255", (struct in_addr *)netp); inet_aton("255.255.255.0",(struct in_addr *) maskp); return 0; } int pcap_set_promisc(pcap_t *p, int promisc) { struct pcap_ring *me = p; D("promisc %d", promisc); if (nm_do_ioctl(&me->me, SIOCGIFFLAGS, 0)) D("SIOCGIFFLAGS failed"); if (promisc) { me->me.if_flags |= IFF_PPROMISC; } else { me->me.if_flags &= ~IFF_PPROMISC; } if (nm_do_ioctl(&me->me, SIOCSIFFLAGS, 0)) D("SIOCSIFFLAGS failed"); return 0; } int pcap_set_timeout(pcap_t *p, int to_ms) { struct pcap_ring *me = p; D("%d ms", to_ms); me->to_ms = to_ms; return 0; } struct bpf_program; int pcap_compile(pcap_t *p, struct bpf_program *fp, const char *str, int optimize, uint32_t netmask) { (void)p; /* UNUSED */ (void)fp; /* UNUSED */ (void)optimize; /* UNUSED */ (void)netmask; /* UNUSED */ D("%s", str); return 0; } int pcap_setfilter(pcap_t *p, struct bpf_program *fp) { (void)p; /* UNUSED */ (void)fp; /* UNUSED */ D(""); return 0; } int pcap_datalink(pcap_t *p) { (void)p; /* UNUSED */ D("returns 1"); return 1; // ethernet } const char * pcap_datalink_val_to_name(int dlt) { D("%d returns DLT_EN10MB", dlt); return "DLT_EN10MB"; } const char * pcap_datalink_val_to_description(int dlt) { D("%d returns Ethernet link", dlt); return "Ethernet link"; } struct pcap_stat; int pcap_stats(pcap_t *p, struct pcap_stat *ps) { struct pcap_ring *me = p; ND(""); *ps = me->st; return 0; /* accumulate from pcap_dispatch() */ }; char * pcap_geterr(pcap_t *p) { struct pcap_ring *me = p; D(""); return me->msg; } pcap_t * pcap_open_live(const char *device, int snaplen, int promisc, int to_ms, char *errbuf) { struct pcap_ring *me; int l; (void)snaplen; /* UNUSED */ (void)errbuf; /* UNUSED */ if (!device) { D("missing device name"); return NULL; } l = strlen(device) + 1; D("request to open %s snaplen %d promisc %d timeout %dms", device, snaplen, promisc, to_ms); me = calloc(1, sizeof(*me) + l); if (me == NULL) { D("failed to allocate struct for %s", device); return NULL; } me->me.ifname = (char *)(me + 1); strcpy((char *)me->me.ifname, device); if (netmap_open(&me->me, 0, promisc)) { D("error opening %s", device); free(me); return NULL; } me->to_ms = to_ms; return (pcap_t *)me; } void pcap_close(pcap_t *p) { struct my_ring *me = p; D(""); if (!me) return; if (me->mem) munmap(me->mem, me->memsize); /* restore original flags ? */ close(me->fd); bzero(me, sizeof(*me)); free(me); } int pcap_fileno(pcap_t *p) { struct my_ring *me = p; D("returns %d", me->fd); return me->fd; } int pcap_get_selectable_fd(pcap_t *p) { struct my_ring *me = p; ND(""); return me->fd; } int pcap_setnonblock(pcap_t *p, int nonblock, char *errbuf) { (void)p; /* UNUSED */ (void)errbuf; /* UNUSED */ D("mode is %d", nonblock); return 0; /* ignore */ } int pcap_setdirection(pcap_t *p, pcap_direction_t d) { (void)p; /* UNUSED */ (void)d; /* UNUSED */ D(""); return 0; /* ignore */ }; int pcap_dispatch(pcap_t *p, int cnt, pcap_handler callback, u_char *user) { struct pcap_ring *pme = p; struct my_ring *me = &pme->me; int got = 0; u_int si; ND("cnt %d", cnt); if (cnt == 0) cnt = -1; /* scan all rings */ for (si = me->begin; si < me->end; si++) { struct netmap_ring *ring = NETMAP_RXRING(me->nifp, si); ND("ring has %d pkts", ring->avail); if (ring->avail == 0) continue; pme->hdr.ts = ring->ts; /* * XXX a proper prefetch should be done as * prefetch(i); callback(i-1); ... */ while ((cnt == -1 || cnt != got) && ring->avail > 0) { u_int i = ring->cur; u_int idx = ring->slot[i].buf_idx; if (idx < 2) { D("%s bogus RX index %d at offset %d", me->nifp->ni_name, idx, i); sleep(2); } u_char *buf = (u_char *)NETMAP_BUF(ring, idx); prefetch(buf); pme->hdr.len = pme->hdr.caplen = ring->slot[i].len; // D("call %p len %d", p, me->hdr.len); callback(user, &pme->hdr, buf); ring->cur = NETMAP_RING_NEXT(ring, i); ring->avail--; got++; } } pme->st.ps_recv += got; return got; } int pcap_inject(pcap_t *p, const void *buf, size_t size) { struct my_ring *me = p; u_int si; ND("cnt %d", cnt); /* scan all rings */ for (si = me->begin; si < me->end; si++) { struct netmap_ring *ring = NETMAP_TXRING(me->nifp, si); ND("ring has %d pkts", ring->avail); if (ring->avail == 0) continue; u_int i = ring->cur; u_int idx = ring->slot[i].buf_idx; if (idx < 2) { D("%s bogus TX index %d at offset %d", me->nifp->ni_name, idx, i); sleep(2); } u_char *dst = (u_char *)NETMAP_BUF(ring, idx); ring->slot[i].len = size; pkt_copy(buf, dst, size); ring->cur = NETMAP_RING_NEXT(ring, i); ring->avail--; // if (ring->avail == 0) ioctl(me->fd, NIOCTXSYNC, NULL); return size; } errno = ENOBUFS; return -1; } int pcap_loop(pcap_t *p, int cnt, pcap_handler callback, u_char *user) { struct pcap_ring *me = p; struct pollfd fds[1]; int i; ND("cnt %d", cnt); memset(fds, 0, sizeof(fds)); fds[0].fd = me->me.fd; fds[0].events = (POLLIN); while (cnt == -1 || cnt > 0) { if (poll(fds, 1, me->to_ms) <= 0) { D("poll error/timeout"); continue; } i = pcap_dispatch(p, cnt, callback, user); if (cnt > 0) cnt -= i; } return 0; } #endif /* !TEST */ #ifdef TEST /* build test code */ void do_send(u_char *user, const struct pcap_pkthdr *h, const u_char *buf) { pcap_inject((pcap_t *)user, buf, h->caplen); } /* * a simple pcap test program, bridge between two interfaces. */ int main(int argc, char **argv) { pcap_t *p0, *p1; int burst = 1024; struct pollfd pollfd[2]; fprintf(stderr, "%s %s built %s %s\n", argv[0], version, __DATE__, __TIME__); while (argc > 1 && !strcmp(argv[1], "-v")) { verbose++; argv++; argc--; } if (argc < 3 || argc > 4 || !strcmp(argv[1], argv[2])) { D("Usage: %s IFNAME1 IFNAME2 [BURST]", argv[0]); return (1); } if (argc > 3) burst = atoi(argv[3]); p0 = pcap_open_live(argv[1], 0, 1, 100, NULL); p1 = pcap_open_live(argv[2], 0, 1, 100, NULL); D("%s", version); D("open returns %p %p", p0, p1); if (!p0 || !p1) return(1); bzero(pollfd, sizeof(pollfd)); pollfd[0].fd = pcap_fileno(p0); pollfd[1].fd = pcap_fileno(p1); pollfd[0].events = pollfd[1].events = POLLIN; for (;;) { /* do i need to reset ? */ pollfd[0].revents = pollfd[1].revents = 0; int ret = poll(pollfd, 2, 1000); if (ret <= 0 || verbose) D("poll %s [0] ev %x %x [1] ev %x %x", ret <= 0 ? "timeout" : "ok", pollfd[0].events, pollfd[0].revents, pollfd[1].events, pollfd[1].revents); if (ret < 0) continue; if (pollfd[0].revents & POLLIN) pcap_dispatch(p0, burst, do_send, p1); if (pollfd[1].revents & POLLIN) pcap_dispatch(p1, burst, do_send, p0); } return (0); } #endif /* TEST */ netmap-release/examples/pingd.c000644 000765 000024 00000031704 12220335545 017325 0ustar00luigistaff000000 000000 /* * (C) 2011 Luigi Rizzo, Matteo Landi, Davide Barelli * * BSD license * * A simple program to bridge two network interfaces */ #include #include /* signal */ #include #include #include /* strcmp */ #include /* open */ #include /* close */ #include /* le64toh */ #include /* PROT_* */ #include /* ioctl */ #include #include #include /* sockaddr.. */ #include /* ntohs */ #include /* ifreq */ #include #include #include #include /* sockaddr_in */ #include #include #define MIN(a, b) ((a) < (b) ? (a) : (b)) int verbose = 0; int report = 0; /* debug support */ #define ND(format, ...) {} #define D(format, ...) do { \ if (!verbose) break; \ struct timeval _xxts; \ gettimeofday(&_xxts, NULL); \ fprintf(stderr, "%03d.%06d %s [%d] " format "\n", \ (int)_xxts.tv_sec %1000, (int)_xxts.tv_usec, \ __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0) char *version = "$Id$"; static int ABORT = 0; /* * info on a ring we handle */ struct my_ring { const char *ifname; int fd; char *mem; /* userspace mmap address */ u_int memsize; u_int queueid; u_int begin, end; /* first..last+1 rings to check */ struct netmap_if *nifp; struct netmap_ring *tx, *rx; /* shortcuts */ uint32_t if_flags; uint32_t if_reqcap; uint32_t if_curcap; }; static void sigint_h(__unused int sig) { ABORT = 1; signal(SIGINT, SIG_DFL); } static int do_ioctl(struct my_ring *me, int what) { struct ifreq ifr; int error; bzero(&ifr, sizeof(ifr)); strncpy(ifr.ifr_name, me->ifname, sizeof(ifr.ifr_name)); switch (what) { case SIOCSIFFLAGS: ifr.ifr_flagshigh = me->if_flags >> 16; ifr.ifr_flags = me->if_flags & 0xffff; break; case SIOCSIFCAP: ifr.ifr_reqcap = me->if_reqcap; ifr.ifr_curcap = me->if_curcap; break; } error = ioctl(me->fd, what, &ifr); if (error) { D("ioctl error %d", what); return error; } switch (what) { case SIOCGIFFLAGS: me->if_flags = (ifr.ifr_flagshigh << 16) | (0xffff & ifr.ifr_flags); if (verbose) D("flags are 0x%x", me->if_flags); break; case SIOCGIFCAP: me->if_reqcap = ifr.ifr_reqcap; me->if_curcap = ifr.ifr_curcap; if (verbose) D("curcap are 0x%x", me->if_curcap); break; } return 0; } /* * open a device. if me->mem is null then do an mmap. */ static int netmap_open(struct my_ring *me, int ringid) { int fd, err, l; struct nmreq req; me->fd = fd = open("/dev/netmap", O_RDWR); if (fd < 0) { D("Unable to open /dev/netmap"); return (-1); } bzero(&req, sizeof(req)); strncpy(req.nr_name, me->ifname, sizeof(req.nr_name)); req.nr_ringid = ringid; err = ioctl(fd, NIOCGINFO, &req); if (err) { D("cannot get info on %s", me->ifname); goto error; } me->memsize = l = req.nr_memsize; if (verbose) D("memsize is %d MB", l>>20); err = ioctl(fd, NIOCREGIF, &req); if (err) { D("Unable to register %s", me->ifname); goto error; } if (me->mem == NULL) { me->mem = mmap(0, l, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0); if (me->mem == MAP_FAILED) { D("Unable to mmap"); me->mem = NULL; goto error; } } me->nifp = NETMAP_IF(me->mem, req.nr_offset); me->queueid = ringid; if (ringid & NETMAP_SW_RING) { me->begin = req.nr_numrings; me->end = me->begin + 1; } else if (ringid & NETMAP_HW_RING) { me->begin = ringid & NETMAP_RING_MASK; me->end = me->begin + 1; } else { me->begin = 0; me->end = req.nr_numrings; } me->tx = NETMAP_TXRING(me->nifp, me->begin); me->rx = NETMAP_RXRING(me->nifp, me->begin); return (0); error: close(me->fd); return -1; } static int netmap_close(struct my_ring *me) { D(""); if (me->mem) munmap(me->mem, me->memsize); close(me->fd); return (0); } /* Compute the checksum of the given ip header. */ /* len = number of byte. */ static uint16_t checksum(const void *data, uint16_t len) { const uint8_t *addr = data; uint32_t sum = 0; while (len > 1) { sum += addr[0] * 256 + addr[1]; addr += 2; len -= 2; } if (len == 1) sum += *addr * 256; sum = (sum >> 16) + (sum & 0xffff); sum += (sum >> 16); sum = htons(sum); return ~sum; } /* * Checksum routine for Internet Protocol family headers (C Version) */ u_short icmp_cksum(u_short *addr, int len) { int nleft, sum; u_short *w; union { u_short us; u_char uc[2]; } last; u_short answer; nleft = len; sum = 0; w = addr; /* * Our algorithm is simple, using a 32 bit accumulator (sum), we add * sequential 16 bit words to it, and at the end, fold back all the * carry bits from the top 16 bits into the lower 16 bits. */ while (nleft > 1) { sum += *w++; nleft -= 2; } /* mop up an odd byte, if necessary */ if (nleft == 1) { last.uc[0] = *(u_char *)w; last.uc[1] = 0; sum += last.us; } /* add back carry outs from top 16 bits to low 16 bits */ sum = (sum >> 16) + (sum & 0xffff); /* add hi 16 to low 16 */ sum += (sum >> 16); /* add carry */ answer = ~sum; /* truncate to 16 bits */ return(answer); } /* * - Switch addresses (both MAC and IP) * - Change echo request into echo reply * - Recompute checksums (IP and ICMP) */ void icmp_packet_process(char *pkt, uint16_t slot_len) { struct ether_header *eh; struct ip *ip; struct icmp *icmp; int len; u_char MAC_src[6]; struct in_addr IP_src; bzero(MAC_src, sizeof(MAC_src)); bzero(&IP_src, sizeof(IP_src)); eh = (struct ether_header *) pkt; ip = (struct ip *) &eh[1]; icmp = (struct icmp *) &ip[1]; // Copy MAC address memcpy(MAC_src, &eh->ether_shost, ETHER_ADDR_LEN); // Switch them memcpy(&eh->ether_shost, &eh->ether_dhost, ETHER_ADDR_LEN); memcpy(&eh->ether_dhost, MAC_src, ETHER_ADDR_LEN); // Copy IP adress memcpy(&IP_src, &ip->ip_src, sizeof(ip->ip_src)); // Switch them memcpy(&ip->ip_src, &ip->ip_dst, sizeof(ip->ip_src)); memcpy(&ip->ip_dst, &IP_src , sizeof(ip->ip_src)); // Setting ICMP Type and Code to 0 (ICMP echo reply) icmp->icmp_type = 0; icmp->icmp_code = 0; // Update IP checksum ip->ip_sum = 0; ip->ip_sum = checksum(ip, sizeof(*ip)); // Update ICMP checksum len = slot_len - 14 - 20; icmp->icmp_cksum = 0; icmp->icmp_cksum = icmp_cksum((u_short *) icmp, len); } int is_icmp(char *pkt) { struct ether_header *eh; struct ip *ip; struct icmp *icmp; eh = (struct ether_header *) pkt; ip = (struct ip *) &eh[1]; icmp = (struct icmp *) &ip[1]; if (ntohs(eh->ether_type) != ETHERTYPE_IP || ip->ip_p != 1 || icmp->icmp_type != ICMP_ECHO) return 0; return 1; } /* * move up to 'limit' pkts from rxring to txring swapping buffers. * * If txring2 is NULL the function acts like a bridge between the stack and the * NIC card; otherwise ICMP packets will be routed back to the NIC card. */ static int process_rings(struct netmap_ring *rxring, struct netmap_ring *txring, u_int limit, const char *msg, int modify_icmp) { u_int j, k, m = 0; /* print a warning if any of the ring flags is set (e.g. NM_REINIT) */ if (rxring->flags || txring->flags) D("%s rxflags %x stack txflags %x", msg, rxring->flags, txring->flags); j = rxring->cur; /* RX */ k = txring->cur; /* TX */ if (rxring->avail < limit) limit = rxring->avail; if (txring->avail < limit) limit = txring->avail; while (m < limit) { struct netmap_slot *rs = &rxring->slot[j]; struct netmap_slot *ts = &txring->slot[k]; char *buf = NETMAP_BUF(rxring, rxring->slot[j].buf_idx); uint32_t pkt; if (modify_icmp) { if (!is_icmp(buf)) { D("rx[%d] is not ICMP", j); break; /* best effort! */ } /*Swap addresses*/ icmp_packet_process(buf, rxring->slot[j].len); } else if (is_icmp(buf)) { D("rx[%d] is ICMP", j); break; /* best effort! */ } if (ts->buf_idx < 2 || rs->buf_idx < 2) { D("wrong index rx[%d] = %d -> tx[%d] = %d", j, rs->buf_idx, k, ts->buf_idx); sleep(2); } pkt = ts->buf_idx; ts->buf_idx = rs->buf_idx; rs->buf_idx = pkt; /* copy the packet lenght. */ if (rs->len < 14 || rs->len > 2048) D("wrong len %d rx[%d] -> tx[%d]", rs->len, j, k); else if (verbose > 1) D("send len %d rx[%d] -> tx[%d]", rs->len, j, k); ts->len = rs->len; /* report the buffer change. */ ts->flags |= NS_BUF_CHANGED; rs->flags |= NS_BUF_CHANGED; /* report status */ if (report) ts->flags |= NS_REPORT; j = NETMAP_RING_NEXT(rxring, j); k = NETMAP_RING_NEXT(txring, k); m++; } rxring->avail -= m; txring->avail -= m; rxring->cur = j; txring->cur = k; if (verbose && m > 0) D("sent %d packets to %p", m, txring); return (m); } /* move packts from src to destination */ static int move(struct my_ring *src, struct my_ring *dst, u_int limit, int modify_icmp) { struct netmap_ring *txring, *rxring; u_int m = 0, si = src->begin, di = dst->begin; const char *msg = (src->queueid & NETMAP_SW_RING) ? "host->net" : "net->host"; while (si < src->end && di < dst->end) { rxring = NETMAP_RXRING(src->nifp, si); txring = NETMAP_TXRING(dst->nifp, di); ND("txring %p rxring %p", txring, rxring); if (rxring->avail == 0) { si++; continue; } if (txring->avail == 0) { di++; continue; } m += process_rings(rxring, txring, limit, msg, modify_icmp); if (rxring->avail != 0 && txring->avail != 0) si++; } return (m); } /* * how many packets on this set of queues ? */ static int howmany(struct my_ring *me, int tx) { u_int i, tot = 0; ND("me %p begin %d end %d", me, me->begin, me->end); for (i = me->begin; i < me->end; i++) { struct netmap_ring *ring = tx ? NETMAP_TXRING(me->nifp, i) : NETMAP_RXRING(me->nifp, i); tot += ring->avail; } if (0 && verbose && tot && !tx) D("ring %s %s %s has %d avail at %d", me->ifname, tx ? "tx": "rx", me->end > me->nifp->ni_num_queues ? "host":"net", tot, NETMAP_TXRING(me->nifp, me->begin)->cur); return tot; } /* * bridge [-v] if1 if2 * * If only one name, or the two interfaces are the same, * bridges userland and the adapter. */ int main(int argc, char **argv) { struct pollfd pollfd[2]; int i, single_fd = 0; u_int burst = 1024; struct my_ring me[2]; fprintf(stderr, "%s %s built %s %s\n", argv[0], version, __DATE__, __TIME__); bzero(me, sizeof(me)); while (argc > 1) { if (!strcmp(argv[1], "-v")) { verbose++; } else if (!strcmp(argv[1], "-r")) { report++; } else if (!strcmp(argv[1], "-1")) { single_fd = 1; } else break; argv++; argc--; } if (argc < 2 || argc > 3) { D("Usage: %s [-vr1] IFNAME1 [BURST]", argv[0]); return (1); } me[0].ifname = me[1].ifname = argv[1]; if (!single_fd && netmap_open(me, NETMAP_SW_RING)) return (1); me[1].mem = me[0].mem; if (netmap_open(me+1, 0)) return (1); do_ioctl(me+1, SIOCGIFFLAGS); if ((me[1].if_flags & IFF_UP) == 0) { D("%s is down, bringing up...", me[1].ifname); me[1].if_flags |= IFF_UP; } do_ioctl(me+1, SIOCSIFFLAGS); do_ioctl(me+1, SIOCGIFCAP); me[1].if_reqcap = me[1].if_curcap; me[1].if_reqcap &= ~(IFCAP_HWCSUM | IFCAP_TSO | IFCAP_TOE); do_ioctl(me+1, SIOCSIFCAP); if (argc > 2) burst = atoi(argv[3]); /* packets burst size. */ /* setup poll(2) variables. */ memset(pollfd, 0, sizeof(pollfd)); for (i = 0; i < 2; i++) pollfd[i].fd = me[i].fd; D("Wait 2 secs for link to come up..."); sleep(2); D("Ready to go, %s 0x%x/%d <-> %s 0x%x/%d.", me[0].ifname, me[0].queueid, me[0].nifp->ni_num_queues, me[1].ifname, me[1].queueid, me[1].nifp->ni_num_queues); /* main loop */ signal(SIGINT, sigint_h); while (!ABORT) { int n0 = 0, n1 = 0, ret; if (!single_fd) { pollfd[0].events = pollfd[0].revents = 0; n0 = howmany(me, 0); if (n0) pollfd[1].events |= POLLOUT; else pollfd[0].events |= POLLIN; } pollfd[1].events = pollfd[1].revents = 0; n1 = howmany(me + 1, 0); if (n1) { pollfd[0].events |= POLLOUT; pollfd[1].events |= POLLOUT; } else pollfd[1].events |= POLLIN; ret = poll(pollfd + single_fd, 2 - single_fd, 2500); if (ret <= 0 || verbose) D("poll %s [0] ev %x %x rx %d@%d tx %d," " [1] ev %x %x rx %d@%d tx %d", ret <= 0 ? "timeout" : "ok", pollfd[0].events, pollfd[0].revents, howmany(me, 0), me[0].rx->cur, howmany(me, 1), pollfd[1].events, pollfd[1].revents, howmany(me+1, 0), me[1].rx->cur, howmany(me+1, 1) ); if (ret < 0) continue; if (!single_fd && pollfd[0].revents & POLLERR) { D("error on fd0, rxcur %d@%d", me[0].rx->avail, me[0].rx->cur); } if (pollfd[1].revents & POLLERR) { D("error on fd1, rxcur %d@%d", me[1].rx->avail, me[1].rx->cur); } if (pollfd[1].revents & POLLOUT) { if (n1) move(me + 1, me + 1, burst, 1 /* change ICMP content */); if (!single_fd && n0) move(me, me + 1, burst, 0 /* swap packets */); } if (!single_fd && pollfd[0].revents & POLLOUT) { move(me + 1, me, burst, 0 /* swap packets */); } } D("exiting"); netmap_close(me + 1); netmap_close(me + 0); return (0); } netmap-release/examples/pkt-gen.c000644 000765 000024 00000124657 12230345061 017576 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/tools/tools/netmap/pkt-gen.c 231198 2012-02-08 11:43:29Z luigi $ * $Id: pkt-gen.c 12346 2013-06-12 17:36:25Z luigi $ * * Example program to show how to build a multithreaded packet * source/sink using the netmap device. * * In this example we create a programmable number of threads * to take care of all the queues of the interface used to * send or receive traffic. * */ #include "nm_util.h" #include // isprint() const char *default_payload="netmap pkt-gen DIRECT payload\n" "http://info.iet.unipi.it/~luigi/netmap/ "; const char *indirect_payload="netmap pkt-gen indirect payload\n" "http://info.iet.unipi.it/~luigi/netmap/ "; int time_second; // support for RD() debugging macro int verbose = 0; #define SKIP_PAYLOAD 1 /* do not check payload. */ struct pkt { struct ether_header eh; struct ip ip; struct udphdr udp; uint8_t body[2048]; // XXX hardwired } __attribute__((__packed__)); struct ip_range { char *name; uint32_t start, end; /* same as struct in_addr */ uint16_t port0, port1; }; struct mac_range { char *name; struct ether_addr start, end; }; /* * global arguments for all threads */ struct glob_arg { struct ip_range src_ip; struct ip_range dst_ip; struct mac_range dst_mac; struct mac_range src_mac; int pkt_size; int burst; int forever; int npackets; /* total packets to send */ int frags; /* fragments per packet */ int nthreads; int cpus; int options; /* testing */ #define OPT_PREFETCH 1 #define OPT_ACCESS 2 #define OPT_COPY 4 #define OPT_MEMCPY 8 #define OPT_TS 16 /* add a timestamp */ #define OPT_INDIRECT 32 /* use indirect buffers, tx only */ #define OPT_DUMP 64 /* dump rx/tx traffic */ int dev_type; pcap_t *p; int tx_rate; struct timespec tx_period; int affinity; int main_fd; int report_interval; void *(*td_body)(void *); void *mmap_addr; int mmap_size; char *ifname; char *nmr_config; int dummy_send; }; enum dev_type { DEV_NONE, DEV_NETMAP, DEV_PCAP, DEV_TAP }; /* * Arguments for a new thread. The same structure is used by * the source and the sink */ struct targ { struct glob_arg *g; int used; int completed; int cancel; int fd; struct nmreq nmr; struct netmap_if *nifp; uint16_t qfirst, qlast; /* range of queues to scan */ volatile uint64_t count; struct timespec tic, toc; int me; pthread_t thread; int affinity; struct pkt pkt; }; /* * extract the extremes from a range of ipv4 addresses. * addr_lo[-addr_hi][:port_lo[-port_hi]] */ static void extract_ip_range(struct ip_range *r) { char *ap, *pp; struct in_addr a; D("extract IP range from %s", r->name); r->port0 = r->port1 = 0; r->start = r->end = 0; /* the first - splits start/end of range */ ap = index(r->name, '-'); /* do we have ports ? */ if (ap) { *ap++ = '\0'; } /* grab the initial values (mandatory) */ pp = index(r->name, ':'); if (pp) { *pp++ = '\0'; r->port0 = r->port1 = strtol(pp, NULL, 0); }; inet_aton(r->name, &a); r->start = r->end = ntohl(a.s_addr); if (ap) { pp = index(ap, ':'); if (pp) { *pp++ = '\0'; if (*pp) r->port1 = strtol(pp, NULL, 0); } if (*ap) { inet_aton(ap, &a); r->end = ntohl(a.s_addr); } } if (r->port0 > r->port1) { uint16_t tmp = r->port0; r->port0 = r->port1; r->port1 = tmp; } if (r->start > r->end) { uint32_t tmp = r->start; r->start = r->end; r->end = tmp; } { struct in_addr a; char buf1[16]; // one ip address a.s_addr = htonl(r->end); strncpy(buf1, inet_ntoa(a), sizeof(buf1)); a.s_addr = htonl(r->start); D("range is %s:%d to %s:%d", inet_ntoa(a), r->port0, buf1, r->port1); } } static void extract_mac_range(struct mac_range *r) { D("extract MAC range from %s", r->name); bcopy(ether_aton(r->name), &r->start, 6); bcopy(ether_aton(r->name), &r->end, 6); #if 0 bcopy(targ->src_mac, eh->ether_shost, 6); p = index(targ->g->src_mac, '-'); if (p) targ->src_mac_range = atoi(p+1); bcopy(ether_aton(targ->g->dst_mac), targ->dst_mac, 6); bcopy(targ->dst_mac, eh->ether_dhost, 6); p = index(targ->g->dst_mac, '-'); if (p) targ->dst_mac_range = atoi(p+1); #endif D("%s starts at %s", r->name, ether_ntoa(&r->start)); } static struct targ *targs; static int global_nthreads; /* control-C handler */ static void sigint_h(int sig) { int i; (void)sig; /* UNUSED */ for (i = 0; i < global_nthreads; i++) { targs[i].cancel = 1; } signal(SIGINT, SIG_DFL); } /* sysctl wrapper to return the number of active CPUs */ static int system_ncpus(void) { #ifdef __FreeBSD__ int mib[2], ncpus; size_t len; mib[0] = CTL_HW; mib[1] = HW_NCPU; len = sizeof(mib); sysctl(mib, 2, &ncpus, &len, NULL, 0); return (ncpus); #else return 1; #endif /* !__FreeBSD__ */ } #ifdef __linux__ #define sockaddr_dl sockaddr_ll #define sdl_family sll_family #define AF_LINK AF_PACKET #define LLADDR(s) s->sll_addr; #include #define TAP_CLONEDEV "/dev/net/tun" #endif /* __linux__ */ #ifdef __FreeBSD__ #include #define TAP_CLONEDEV "/dev/tap" #endif /* __FreeBSD */ #ifdef __APPLE__ // #warning TAP not supported on apple ? #include #define TAP_CLONEDEV "/dev/tap" #endif /* __APPLE__ */ /* * parse the vale configuration in conf and put it in nmr. * The configuration may consist of 0 to 4 numbers separated * by commas: #tx-slots,#rx-slots,#tx-rinzgs,#rx-rings. * Missing numbers or zeroes stand for default values. * As an additional convenience, if exactly one number * is specified, then this is assigned to bot #tx-slots and #rx-slots. * If there is no 4th number, then the 3rd is assigned to bot #tx-rings * and #rx-rings. */ void parse_nmr_config(const char* conf, struct nmreq *nmr) { char *w, *tok; int i, v; nmr->nr_tx_rings = nmr->nr_rx_rings = 0; nmr->nr_tx_slots = nmr->nr_rx_slots = 0; if (conf == NULL || ! *conf) return; w = strdup(conf); for (i = 0, tok = strtok(w, ","); tok; i++, tok = strtok(NULL, ",")) { v = atoi(tok); switch (i) { case 0: nmr->nr_tx_slots = nmr->nr_rx_slots = v; break; case 1: nmr->nr_rx_slots = v; break; case 2: nmr->nr_tx_rings = nmr->nr_rx_rings = v; break; case 3: nmr->nr_rx_rings = v; break; default: D("ignored config: %s", tok); break; } } D("txr %d txd %d rxr %d rxd %d", nmr->nr_tx_rings, nmr->nr_tx_slots, nmr->nr_rx_rings, nmr->nr_rx_slots); free(w); } /* * locate the src mac address for our interface, put it * into the user-supplied buffer. return 0 if ok, -1 on error. */ static int source_hwaddr(const char *ifname, char *buf) { struct ifaddrs *ifaphead, *ifap; int l = sizeof(ifap->ifa_name); if (getifaddrs(&ifaphead) != 0) { D("getifaddrs %s failed", ifname); return (-1); } for (ifap = ifaphead; ifap; ifap = ifap->ifa_next) { struct sockaddr_dl *sdl = (struct sockaddr_dl *)ifap->ifa_addr; uint8_t *mac; if (!sdl || sdl->sdl_family != AF_LINK) continue; if (strncmp(ifap->ifa_name, ifname, l) != 0) continue; mac = (uint8_t *)LLADDR(sdl); sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); if (verbose) D("source hwaddr %s", buf); break; } freeifaddrs(ifaphead); return ifap ? 0 : 1; } /* set the thread affinity. */ static int setaffinity(pthread_t me, int i) { #ifdef __FreeBSD__ cpuset_t cpumask; if (i == -1) return 0; /* Set thread affinity affinity.*/ CPU_ZERO(&cpumask); CPU_SET(i, &cpumask); if (pthread_setaffinity_np(me, sizeof(cpuset_t), &cpumask) != 0) { D("Unable to set affinity"); return 1; } #else (void)me; /* suppress 'unused' warnings */ (void)i; #endif /* __FreeBSD__ */ return 0; } /* Compute the checksum of the given ip header. */ static uint16_t checksum(const void *data, uint16_t len, uint32_t sum) { const uint8_t *addr = data; uint32_t i; /* Checksum all the pairs of bytes first... */ for (i = 0; i < (len & ~1U); i += 2) { sum += (u_int16_t)ntohs(*((u_int16_t *)(addr + i))); if (sum > 0xFFFF) sum -= 0xFFFF; } /* * If there's a single byte left over, checksum it, too. * Network byte order is big-endian, so the remaining byte is * the high byte. */ if (i < len) { sum += addr[i] << 8; if (sum > 0xFFFF) sum -= 0xFFFF; } return sum; } static u_int16_t wrapsum(u_int32_t sum) { sum = ~sum & 0xFFFF; return (htons(sum)); } /* Check the payload of the packet for errors (use it for debug). * Look for consecutive ascii representations of the size of the packet. */ static void dump_payload(char *p, int len, struct netmap_ring *ring, int cur) { char buf[128]; int i, j, i0; /* get the length in ASCII of the length of the packet. */ printf("ring %p cur %5d [buf %6d flags 0x%04x len %5d]\n", ring, cur, ring->slot[cur].buf_idx, ring->slot[cur].flags, len); /* hexdump routine */ for (i = 0; i < len; ) { memset(buf, sizeof(buf), ' '); sprintf(buf, "%5d: ", i); i0 = i; for (j=0; j < 16 && i < len; i++, j++) sprintf(buf+7+j*3, "%02x ", (uint8_t)(p[i])); i = i0; for (j=0; j < 16 && i < len; i++, j++) sprintf(buf+7+j + 48, "%c", isprint(p[i]) ? p[i] : '.'); printf("%s\n", buf); } } /* * Fill a packet with some payload. * We create a UDP packet so the payload starts at * 14+20+8 = 42 bytes. */ #ifdef __linux__ #define uh_sport source #define uh_dport dest #define uh_ulen len #define uh_sum check #endif /* linux */ /* * increment the addressed in the packet, * starting from the least significant field. * DST_IP DST_PORT SRC_IP SRC_PORT */ static void update_addresses(struct pkt *pkt, struct glob_arg *g) { uint32_t a; uint16_t p; struct ip *ip = &pkt->ip; struct udphdr *udp = &pkt->udp; p = ntohs(udp->uh_sport); if (p < g->src_ip.port1) { /* just inc, no wrap */ udp->uh_sport = htons(p + 1); return; } udp->uh_sport = htons(g->src_ip.port0); a = ntohl(ip->ip_src.s_addr); if (a < g->src_ip.end) { /* just inc, no wrap */ ip->ip_src.s_addr = htonl(a + 1); return; } ip->ip_src.s_addr = htonl(g->src_ip.start); udp->uh_sport = htons(g->src_ip.port0); p = ntohs(udp->uh_dport); if (p < g->dst_ip.port1) { /* just inc, no wrap */ udp->uh_dport = htons(p + 1); return; } udp->uh_dport = htons(g->dst_ip.port0); a = ntohl(ip->ip_dst.s_addr); if (a < g->dst_ip.end) { /* just inc, no wrap */ ip->ip_dst.s_addr = htonl(a + 1); return; } ip->ip_dst.s_addr = htonl(g->dst_ip.start); } /* * initialize one packet and prepare for the next one. * The copy could be done better instead of repeating it each time. */ static void initialize_packet(struct targ *targ) { struct pkt *pkt = &targ->pkt; struct ether_header *eh; struct ip *ip; struct udphdr *udp; uint16_t paylen = targ->g->pkt_size - sizeof(*eh) - sizeof(struct ip); const char *payload = targ->g->options & OPT_INDIRECT ? indirect_payload : default_payload; int i, l, l0 = strlen(payload); /* create a nice NUL-terminated string */ for (i = 0; i < paylen;) { l = min(l0, paylen - i); bcopy(payload, pkt->body + i, l); i += l; } pkt->body[i-1] = '\0'; ip = &pkt->ip; /* prepare the headers */ ip->ip_v = IPVERSION; ip->ip_hl = 5; ip->ip_id = 0; ip->ip_tos = IPTOS_LOWDELAY; ip->ip_len = ntohs(targ->g->pkt_size - sizeof(*eh)); ip->ip_id = 0; ip->ip_off = htons(IP_DF); /* Don't fragment */ ip->ip_ttl = IPDEFTTL; ip->ip_p = IPPROTO_UDP; ip->ip_dst.s_addr = htonl(targ->g->dst_ip.start); ip->ip_src.s_addr = htonl(targ->g->src_ip.start); ip->ip_sum = wrapsum(checksum(ip, sizeof(*ip), 0)); udp = &pkt->udp; udp->uh_sport = htons(targ->g->src_ip.port0); udp->uh_dport = htons(targ->g->dst_ip.port0); udp->uh_ulen = htons(paylen); /* Magic: taken from sbin/dhclient/packet.c */ udp->uh_sum = wrapsum(checksum(udp, sizeof(*udp), checksum(pkt->body, paylen - sizeof(*udp), checksum(&ip->ip_src, 2 * sizeof(ip->ip_src), IPPROTO_UDP + (u_int32_t)ntohs(udp->uh_ulen) ) ) )); eh = &pkt->eh; bcopy(&targ->g->src_mac.start, eh->ether_shost, 6); bcopy(&targ->g->dst_mac.start, eh->ether_dhost, 6); eh->ether_type = htons(ETHERTYPE_IP); // dump_payload((void *)pkt, targ->g->pkt_size, NULL, 0); } /* * create and enqueue a batch of packets on a ring. * On the last one set NS_REPORT to tell the driver to generate * an interrupt when done. */ static int send_packets(struct netmap_ring *ring, struct pkt *pkt, struct glob_arg *g, u_int count, int options, u_int nfrags) { u_int sent, cur = ring->cur; int fcnt; int size = g->pkt_size; if (ring->avail < count) count = ring->avail; if (count < nfrags) { D("truncating packet, no room for frags %d %d", count, nfrags); } #if 0 if (options & (OPT_COPY | OPT_PREFETCH) ) { for (sent = 0; sent < count; sent++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); prefetch(p); cur = NETMAP_RING_NEXT(ring, cur); } cur = ring->cur; } #endif for (fcnt = nfrags, sent = 0; sent < count; sent++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); slot->flags = 0; if (options & OPT_INDIRECT) { slot->flags |= NS_INDIRECT; slot->ptr = (uint64_t)pkt; } else if (options & OPT_COPY) { pkt_copy(pkt, p, size); if (fcnt == 1) update_addresses(pkt, g); } else if (options & OPT_MEMCPY) { memcpy(p, pkt, size); if (fcnt == 1) update_addresses(pkt, g); } else if (options & OPT_PREFETCH) { prefetch(p); } if (options & OPT_DUMP) dump_payload(p, size, ring, cur); slot->len = size; if (--fcnt > 0) slot->flags |= NS_MOREFRAG; else fcnt = nfrags; if (sent == count - 1) { slot->flags &= ~NS_MOREFRAG; slot->flags |= NS_REPORT; } cur = NETMAP_RING_NEXT(ring, cur); } ring->avail -= sent; ring->cur = cur; return (sent); } /* * Send a packet, and wait for a response. * The payload (after UDP header, ofs 42) has a 4-byte sequence * followed by a struct timeval (or bintime?) */ #define PAY_OFS 42 /* where in the pkt... */ static void * pinger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; int i, rx = 0, n = targ->g->npackets; fds[0].fd = targ->fd; fds[0].events = (POLLIN); static uint32_t sent; struct timespec ts, now, last_print; uint32_t count = 0, min = 1000000000, av = 0; if (targ->g->nthreads > 1) { D("can only ping with 1 thread"); return NULL; } clock_gettime(CLOCK_REALTIME_PRECISE, &last_print); while (n == 0 || (int)sent < n) { struct netmap_ring *ring = NETMAP_TXRING(nifp, 0); struct netmap_slot *slot; char *p; for (i = 0; i < 1; i++) { slot = &ring->slot[ring->cur]; slot->len = targ->g->pkt_size; p = NETMAP_BUF(ring, slot->buf_idx); if (ring->avail == 0) { D("-- ouch, cannot send"); } else { pkt_copy(&targ->pkt, p, targ->g->pkt_size); clock_gettime(CLOCK_REALTIME_PRECISE, &ts); bcopy(&sent, p+42, sizeof(sent)); bcopy(&ts, p+46, sizeof(ts)); sent++; ring->cur = NETMAP_RING_NEXT(ring, ring->cur); ring->avail--; } } /* should use a parameter to decide how often to send */ if (poll(fds, 1, 3000) <= 0) { D("poll error/timeout on queue %d", targ->me); continue; } /* see what we got back */ for (i = targ->qfirst; i < targ->qlast; i++) { ring = NETMAP_RXRING(nifp, i); while (ring->avail > 0) { uint32_t seq; slot = &ring->slot[ring->cur]; p = NETMAP_BUF(ring, slot->buf_idx); clock_gettime(CLOCK_REALTIME_PRECISE, &now); bcopy(p+42, &seq, sizeof(seq)); bcopy(p+46, &ts, sizeof(ts)); ts.tv_sec = now.tv_sec - ts.tv_sec; ts.tv_nsec = now.tv_nsec - ts.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (1) D("seq %d/%d delta %d.%09d", seq, sent, (int)ts.tv_sec, (int)ts.tv_nsec); if (ts.tv_nsec < (int)min) min = ts.tv_nsec; count ++; av += ts.tv_nsec; ring->avail--; ring->cur = NETMAP_RING_NEXT(ring, ring->cur); rx++; } } //D("tx %d rx %d", sent, rx); //usleep(100000); ts.tv_sec = now.tv_sec - last_print.tv_sec; ts.tv_nsec = now.tv_nsec - last_print.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (ts.tv_sec >= 1) { D("count %d min %d av %d", count, min, av/count); count = 0; av = 0; min = 100000000; last_print = now; } } return NULL; } /* * reply to ping requests */ static void * ponger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring, *rxring; int i, rx = 0, sent = 0, n = targ->g->npackets; fds[0].fd = targ->fd; fds[0].events = (POLLIN); if (targ->g->nthreads > 1) { D("can only reply ping with 1 thread"); return NULL; } D("understood ponger %d but don't know how to do it", n); while (n == 0 || sent < n) { uint32_t txcur, txavail; //#define BUSYWAIT #ifdef BUSYWAIT ioctl(fds[0].fd, NIOCRXSYNC, NULL); #else if (poll(fds, 1, 1000) <= 0) { D("poll error/timeout on queue %d", targ->me); continue; } #endif txring = NETMAP_TXRING(nifp, 0); txcur = txring->cur; txavail = txring->avail; /* see what we got back */ for (i = targ->qfirst; i < targ->qlast; i++) { rxring = NETMAP_RXRING(nifp, i); while (rxring->avail > 0) { uint16_t *spkt, *dpkt; uint32_t cur = rxring->cur; struct netmap_slot *slot = &rxring->slot[cur]; char *src, *dst; src = NETMAP_BUF(rxring, slot->buf_idx); //D("got pkt %p of size %d", src, slot->len); rxring->avail--; rxring->cur = NETMAP_RING_NEXT(rxring, cur); rx++; if (txavail == 0) continue; dst = NETMAP_BUF(txring, txring->slot[txcur].buf_idx); /* copy... */ dpkt = (uint16_t *)dst; spkt = (uint16_t *)src; pkt_copy(src, dst, slot->len); dpkt[0] = spkt[3]; dpkt[1] = spkt[4]; dpkt[2] = spkt[5]; dpkt[3] = spkt[0]; dpkt[4] = spkt[1]; dpkt[5] = spkt[2]; txring->slot[txcur].len = slot->len; /* XXX swap src dst mac */ txcur = NETMAP_RING_NEXT(txring, txcur); txavail--; sent++; } } txring->cur = txcur; txring->avail = txavail; targ->count = sent; #ifdef BUSYWAIT ioctl(fds[0].fd, NIOCTXSYNC, NULL); #endif //D("tx %d rx %d", sent, rx); } return NULL; } static __inline int timespec_ge(const struct timespec *a, const struct timespec *b) { if (a->tv_sec > b->tv_sec) return (1); if (a->tv_sec < b->tv_sec) return (0); if (a->tv_nsec >= b->tv_nsec) return (1); return (0); } static __inline struct timespec timeval2spec(const struct timeval *a) { struct timespec ts = { .tv_sec = a->tv_sec, .tv_nsec = a->tv_usec * 1000 }; return ts; } static __inline struct timeval timespec2val(const struct timespec *a) { struct timeval tv = { .tv_sec = a->tv_sec, .tv_usec = a->tv_nsec / 1000 }; return tv; } static int wait_time(struct timespec ts, struct timespec *wakeup_ts, long long *waited) { struct timespec curtime; curtime.tv_sec = 0; curtime.tv_nsec = 0; if (clock_gettime(CLOCK_REALTIME_PRECISE, &curtime) == -1) { D("clock_gettime: %s", strerror(errno)); return (-1); } while (timespec_ge(&ts, &curtime)) { if (waited != NULL) (*waited)++; if (clock_gettime(CLOCK_REALTIME_PRECISE, &curtime) == -1) { D("clock_gettime"); return (-1); } } if (wakeup_ts != NULL) *wakeup_ts = curtime; return (0); } static __inline void timespec_add(struct timespec *tsa, struct timespec *tsb) { tsa->tv_sec += tsb->tv_sec; tsa->tv_nsec += tsb->tv_nsec; if (tsa->tv_nsec >= 1000000000) { tsa->tv_sec++; tsa->tv_nsec -= 1000000000; } } static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads, sent = 0; int options = targ->g->options | OPT_COPY; struct timespec tmptime, nexttime = { 0, 0}; // XXX silence compiler int rate_limit = targ->g->tx_rate; long long waited = 0; D("start"); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLOUT); /* main loop.*/ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (rate_limit) { tmptime.tv_sec = 2; tmptime.tv_nsec = 0; timespec_add(&targ->tic, &tmptime); targ->tic.tv_nsec = 0; if (wait_time(targ->tic, NULL, NULL) == -1) { D("wait_time: %s", strerror(errno)); goto quit; } nexttime = targ->tic; } if (targ->g->dev_type == DEV_PCAP) { int size = targ->g->pkt_size; void *pkt = &targ->pkt; pcap_t *p = targ->g->p; for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (pcap_inject(p, pkt, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } } else if (targ->g->dev_type == DEV_TAP) { /* tap */ int size = targ->g->pkt_size; void *pkt = &targ->pkt; D("writing to file desc %d", targ->g->main_fd); for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (write(targ->g->main_fd, pkt, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } } else { int tosend = 0; int frags = targ->g->frags; while (!targ->cancel && (n == 0 || sent < n)) { if (rate_limit && tosend <= 0) { tosend = targ->g->burst; timespec_add(&nexttime, &targ->g->tx_period); if (wait_time(nexttime, &tmptime, &waited) == -1) { D("wait_time"); goto quit; } } /* * wait for available room in the send queue(s) */ if (poll(fds, 1, 2000) <= 0) { if (targ->cancel) break; D("poll error/timeout on queue %d", targ->me); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->qfirst; i < targ->qlast; i++) { int m, limit = rate_limit ? tosend : targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (txring->avail == 0) continue; if (frags > 1) limit = ((limit + frags - 1) / frags) * frags; m = send_packets(txring, &targ->pkt, targ->g, limit, options, frags); ND("limit %d avail %d frags %d m %d", limit, txring->avail, frags, m); sent += m; targ->count = sent; if (rate_limit) { tosend -= m; if (tosend <= 0) break; } } } /* flush any remaining packets */ ioctl(fds[0].fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->qfirst; i < targ->qlast; i++) { txring = NETMAP_TXRING(nifp, i); while (!NETMAP_TX_RING_EMPTY(txring)) { ioctl(fds[0].fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } static void receive_pcap(u_char *user, const struct pcap_pkthdr * h, const u_char * bytes) { int *count = (int *)user; (void)h; /* UNUSED */ (void)bytes; /* UNUSED */ (*count)++; } static int receive_packets(struct netmap_ring *ring, u_int limit, int dump) { u_int cur, rx; cur = ring->cur; if (ring->avail < limit) limit = ring->avail; for (rx = 0; rx < limit; rx++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); if (dump) dump_payload(p, slot->len, ring, cur); cur = NETMAP_RING_NEXT(ring, cur); } ring->avail -= rx; ring->cur = cur; return (rx); } static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *rxring; int i; uint64_t received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLIN); /* unbounded wait for the first packet. */ for (;;) { i = poll(fds, 1, 1000); if (i > 0 && !(fds[0].revents & POLLERR)) break; D("waiting for initial packets, poll returns %d %d", i, fds[0].revents); } /* main loop, exit after 1s silence */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (targ->g->dev_type == DEV_PCAP) { while (!targ->cancel) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, NULL); } } else if (targ->g->dev_type == DEV_TAP) { D("reading from %s fd %d", targ->g->ifname, targ->g->main_fd); while (!targ->cancel) { char buf[2048]; /* XXX should we poll ? */ if (read(targ->g->main_fd, buf, sizeof(buf)) > 0) targ->count++; } } else { int dump = targ->g->options & OPT_DUMP; while (!targ->cancel) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(fds, 1, 1 * 1000) <= 0 && !targ->g->forever) { clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ break; } for (i = targ->qfirst; i < targ->qlast; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (rxring->avail == 0) continue; m = receive_packets(rxring, targ->g->burst, dump); received += m; } targ->count = received; // tell the card we have read the data //ioctl(fds[0].fd, NIOCRXSYNC, NULL); } } targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } /* very crude code to print a number in normalized form. * Caller has to make sure that the buffer is large enough. */ static const char * norm(char *buf, double val) { char *units[] = { "", "K", "M", "G" }; u_int i; for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *); i++) val /= 1000; sprintf(buf, "%.2f %s", val, units[i]); return buf; } static void tx_output(uint64_t sent, int size, double delta) { double bw, raw_bw, pps; char b1[40], b2[80], b3[80]; printf("Sent %" PRIu64 " packets, %d bytes each, in %.2f seconds.\n", sent, size, delta); if (delta == 0) delta = 1e-6; if (size < 60) /* correct for min packet size */ size = 60; pps = sent / delta; bw = (8.0 * size * sent) / delta; /* raw packets have4 bytes crc + 20 bytes framing */ raw_bw = (8.0 * (size + 24) * sent) / delta; printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n", norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) ); } static void rx_output(uint64_t received, double delta) { double pps; char b1[40]; printf("Received %" PRIu64 " packets, in %.2f seconds.\n", received, delta); if (delta == 0) delta = 1e-6; pps = received / delta; printf("Speed: %spps\n", norm(b1, pps)); } static void usage(void) { const char *cmd = "pkt-gen"; fprintf(stderr, "Usage:\n" "%s arguments\n" "\t-i interface interface name\n" "\t-f function tx rx ping pong\n" "\t-n count number of iterations (can be 0)\n" "\t-t pkts_to_send also forces tx mode\n" "\t-r pkts_to_receive also forces rx mode\n" "\t-l pkt_size in bytes excluding CRC\n" "\t-d dst_ip[:port[-dst_ip:port]] single or range\n" "\t-s src_ip[:port[-src_ip:port]] single or range\n" "\t-D dst-mac\n" "\t-S src-mac\n" "\t-a cpu_id use setaffinity\n" "\t-b burst size testing, mostly\n" "\t-c cores cores to use\n" "\t-p threads processes/threads to use\n" "\t-T report_ms milliseconds between reports\n" "\t-P use libpcap instead of netmap\n" "\t-w wait_for_link_time in seconds\n" "\t-R rate in packets per second\n" "\t-X dump payload\n" "", cmd); exit(0); } static void start_threads(struct glob_arg *g) { int i; targs = calloc(g->nthreads, sizeof(*targs)); /* * Now create the desired number of threads, each one * using a single descriptor. */ for (i = 0; i < g->nthreads; i++) { bzero(&targs[i], sizeof(targs[i])); targs[i].fd = -1; /* default, with pcap */ targs[i].g = g; if (g->dev_type == DEV_NETMAP) { struct nmreq tifreq; int tfd; /* register interface. */ tfd = open("/dev/netmap", O_RDWR); if (tfd == -1) { D("Unable to open /dev/netmap"); continue; } targs[i].fd = tfd; bzero(&tifreq, sizeof(tifreq)); strncpy(tifreq.nr_name, g->ifname, sizeof(tifreq.nr_name)); tifreq.nr_version = NETMAP_API; tifreq.nr_ringid = (g->nthreads > 1) ? (i | NETMAP_HW_RING) : 0; parse_nmr_config(g->nmr_config, &tifreq); /* * if we are acting as a receiver only, do not touch the transmit ring. * This is not the default because many apps may use the interface * in both directions, but a pure receiver does not. */ if (g->td_body == receiver_body) { tifreq.nr_ringid |= NETMAP_NO_TX_POLL; } if ((ioctl(tfd, NIOCREGIF, &tifreq)) == -1) { D("Unable to register %s", g->ifname); continue; } targs[i].nmr = tifreq; targs[i].nifp = NETMAP_IF(g->mmap_addr, tifreq.nr_offset); /* start threads. */ targs[i].qfirst = (g->nthreads > 1) ? i : 0; targs[i].qlast = (g->nthreads > 1) ? i+1 : (g->td_body == receiver_body ? tifreq.nr_rx_rings : tifreq.nr_tx_rings); } else { targs[i].fd = g->main_fd; } targs[i].used = 1; targs[i].me = i; if (g->affinity >= 0) { if (g->affinity < g->cpus) targs[i].affinity = g->affinity; else targs[i].affinity = i % g->cpus; } else targs[i].affinity = -1; /* default, init packets */ initialize_packet(&targs[i]); if (pthread_create(&targs[i].thread, NULL, g->td_body, &targs[i]) == -1) { D("Unable to create thread %d", i); targs[i].used = 0; } } } static void main_thread(struct glob_arg *g) { int i; uint64_t prev = 0; uint64_t count = 0; double delta_t; struct timeval tic, toc; gettimeofday(&toc, NULL); for (;;) { struct timeval now, delta; uint64_t pps, usec, my_count, npkts; int done = 0; delta.tv_sec = g->report_interval/1000; delta.tv_usec = (g->report_interval%1000)*1000; select(0, NULL, NULL, NULL, &delta); gettimeofday(&now, NULL); time_second = now.tv_sec; timersub(&now, &toc, &toc); my_count = 0; for (i = 0; i < g->nthreads; i++) { my_count += targs[i].count; if (targs[i].used == 0) done++; } usec = toc.tv_sec* 1000000 + toc.tv_usec; if (usec < 10000) continue; npkts = my_count - prev; pps = (npkts*1000000 + usec/2) / usec; D("%" PRIu64 " pps (%" PRIu64 " pkts in %" PRIu64 " usec)", pps, npkts, usec); prev = my_count; toc = now; if (done == g->nthreads) break; } timerclear(&tic); timerclear(&toc); for (i = 0; i < g->nthreads; i++) { struct timespec t_tic, t_toc; /* * Join active threads, unregister interfaces and close * file descriptors. */ if (targs[i].used) pthread_join(targs[i].thread, NULL); close(targs[i].fd); if (targs[i].completed == 0) D("ouch, thread %d exited with error", i); /* * Collect threads output and extract information about * how long it took to send all the packets. */ count += targs[i].count; t_tic = timeval2spec(&tic); t_toc = timeval2spec(&toc); if (!timerisset(&tic) || timespec_ge(&targs[i].tic, &t_tic)) tic = timespec2val(&targs[i].tic); if (!timerisset(&toc) || timespec_ge(&targs[i].toc, &t_toc)) toc = timespec2val(&targs[i].toc); } /* print output. */ timersub(&toc, &tic, &toc); delta_t = toc.tv_sec + 1e-6* toc.tv_usec; if (g->td_body == sender_body) tx_output(count, g->pkt_size, delta_t); else rx_output(count, delta_t); if (g->dev_type == DEV_NETMAP) { munmap(g->mmap_addr, g->mmap_size); close(g->main_fd); } } struct sf { char *key; void *f; }; static struct sf func[] = { { "tx", sender_body }, { "rx", receiver_body }, { "ping", pinger_body }, { "pong", ponger_body }, { NULL, NULL } }; static int tap_alloc(char *dev) { struct ifreq ifr; int fd, err; char *clonedev = TAP_CLONEDEV; (void)err; (void)dev; /* Arguments taken by the function: * * char *dev: the name of an interface (or '\0'). MUST have enough * space to hold the interface name if '\0' is passed * int flags: interface flags (eg, IFF_TUN etc.) */ #ifdef __FreeBSD__ if (dev[3]) { /* tapSomething */ static char buf[128]; snprintf(buf, sizeof(buf), "/dev/%s", dev); clonedev = buf; } #endif /* open the device */ if( (fd = open(clonedev, O_RDWR)) < 0 ) { return fd; } D("%s open successful", clonedev); /* preparation of the struct ifr, of type "struct ifreq" */ memset(&ifr, 0, sizeof(ifr)); #ifdef linux ifr.ifr_flags = IFF_TAP | IFF_NO_PI; if (*dev) { /* if a device name was specified, put it in the structure; otherwise, * the kernel will try to allocate the "next" device of the * specified type */ strncpy(ifr.ifr_name, dev, IFNAMSIZ); } /* try to create the device */ if( (err = ioctl(fd, TUNSETIFF, (void *) &ifr)) < 0 ) { D("failed to to a TUNSETIFF"); close(fd); return err; } /* if the operation was successful, write back the name of the * interface to the variable "dev", so the caller can know * it. Note that the caller MUST reserve space in *dev (see calling * code below) */ strcpy(dev, ifr.ifr_name); D("new name is %s", dev); #endif /* linux */ /* this is the special file descriptor that the caller will use to talk * with the virtual interface */ return fd; } int main(int arc, char **argv) { int i; struct glob_arg g; struct nmreq nmr; int ch; int wait_link = 2; int devqueues = 1; /* how many device queues */ bzero(&g, sizeof(g)); g.main_fd = -1; g.td_body = receiver_body; g.report_interval = 1000; /* report interval */ g.affinity = -1; /* ip addresses can also be a range x.x.x.x-x.x.x.y */ g.src_ip.name = "10.0.0.1"; g.dst_ip.name = "10.1.0.1"; g.dst_mac.name = "ff:ff:ff:ff:ff:ff"; g.src_mac.name = NULL; g.pkt_size = 60; g.burst = 512; // default g.nthreads = 1; g.cpus = 1; g.forever = 1; g.tx_rate = 0; g.frags = 1; g.nmr_config = ""; while ( (ch = getopt(arc, argv, "a:f:F:n:i:It:r:l:d:s:D:S:b:c:o:p:PT:w:WvR:XC:")) != -1) { struct sf *fn; switch(ch) { default: D("bad option %c %s", ch, optarg); usage(); break; case 'n': g.npackets = atoi(optarg); break; case 'F': i = atoi(optarg); if (i < 1 || i > 63) { D("invalid frags %d [1..63], ignore", i); break; } g.frags = i; break; case 'f': for (fn = func; fn->key; fn++) { if (!strcmp(fn->key, optarg)) break; } if (fn->key) g.td_body = fn->f; else D("unrecognised function %s", optarg); break; case 'o': /* data generation options */ g.options = atoi(optarg); break; case 'a': /* force affinity */ g.affinity = atoi(optarg); break; case 'i': /* interface */ g.ifname = optarg; if (!strncmp(optarg, "tap", 3)) g.dev_type = DEV_TAP; else g.dev_type = DEV_NETMAP; if (!strcmp(g.ifname, "null")) g.dummy_send = 1; break; case 'I': g.options |= OPT_INDIRECT; /* XXX use indirect buffer */ break; case 't': /* send, deprecated */ D("-t deprecated, please use -f tx -n %s", optarg); g.td_body = sender_body; g.npackets = atoi(optarg); break; case 'r': /* receive */ D("-r deprecated, please use -f rx -n %s", optarg); g.td_body = receiver_body; g.npackets = atoi(optarg); break; case 'l': /* pkt_size */ g.pkt_size = atoi(optarg); break; case 'd': g.dst_ip.name = optarg; break; case 's': g.src_ip.name = optarg; break; case 'T': /* report interval */ g.report_interval = atoi(optarg); break; case 'w': wait_link = atoi(optarg); break; case 'W': /* XXX changed default */ g.forever = 0; /* do not exit rx even with no traffic */ break; case 'b': /* burst */ g.burst = atoi(optarg); break; case 'c': g.cpus = atoi(optarg); break; case 'p': g.nthreads = atoi(optarg); break; case 'P': g.dev_type = DEV_PCAP; break; case 'D': /* destination mac */ g.dst_mac.name = optarg; break; case 'S': /* source mac */ g.src_mac.name = optarg; break; case 'v': verbose++; break; case 'R': g.tx_rate = atoi(optarg); break; case 'X': g.options |= OPT_DUMP; break; case 'C': g.nmr_config = strdup(optarg); } } if (g.ifname == NULL) { D("missing ifname"); usage(); } i = system_ncpus(); if (g.cpus < 0 || g.cpus > i) { D("%d cpus is too high, have only %d cpus", g.cpus, i); usage(); } if (g.cpus == 0) g.cpus = i; if (g.pkt_size < 16 || g.pkt_size > 1536) { D("bad pktsize %d\n", g.pkt_size); usage(); } if (g.src_mac.name == NULL) { static char mybuf[20] = "00:00:00:00:00:00"; /* retrieve source mac address. */ if (source_hwaddr(g.ifname, mybuf) == -1) { D("Unable to retrieve source mac"); // continue, fail later } g.src_mac.name = mybuf; } /* extract address ranges */ extract_ip_range(&g.src_ip); extract_ip_range(&g.dst_ip); extract_mac_range(&g.src_mac); extract_mac_range(&g.dst_mac); if (g.dev_type == DEV_TAP) { D("want to use tap %s", g.ifname); g.main_fd = tap_alloc(g.ifname); if (g.main_fd < 0) { D("cannot open tap %s", g.ifname); usage(); } } else if (g.dev_type > DEV_NETMAP) { char pcap_errbuf[PCAP_ERRBUF_SIZE]; D("using pcap on %s", g.ifname); pcap_errbuf[0] = '\0'; // init the buffer g.p = pcap_open_live(g.ifname, 0, 1, 100, pcap_errbuf); if (g.p == NULL) { D("cannot open pcap on %s", g.ifname); usage(); } } else if (g.dummy_send) { D("using a dummy send routine"); } else { bzero(&nmr, sizeof(nmr)); nmr.nr_version = NETMAP_API; /* * Open the netmap device to fetch the number of queues of our * interface. * * The first NIOCREGIF also detaches the card from the * protocol stack and may cause a reset of the card, * which in turn may take some time for the PHY to * reconfigure. */ g.main_fd = open("/dev/netmap", O_RDWR); if (g.main_fd == -1) { D("Unable to open /dev/netmap"); // fail later } /* * Register the interface on the netmap device: from now on, * we can operate on the network interface without any * interference from the legacy network stack. * * We decide to put the first interface registration here to * give time to cards that take a long time to reset the PHY. */ bzero(&nmr, sizeof(nmr)); nmr.nr_version = NETMAP_API; strncpy(nmr.nr_name, g.ifname, sizeof(nmr.nr_name)); nmr.nr_version = NETMAP_API; parse_nmr_config(g.nmr_config, &nmr); if (ioctl(g.main_fd, NIOCREGIF, &nmr) == -1) { D("Unable to register interface %s", g.ifname); //continue, fail later } ND("%s: txr %d txd %d rxr %d rxd %d", g.ifname, nmr.nr_tx_rings, nmr.nr_tx_slots, nmr.nr_rx_rings, nmr.nr_rx_slots); //if ((ioctl(g.main_fd, NIOCGINFO, &nmr)) == -1) { // D("Unable to get if info without name"); //} else { // D("map size is %d Kb", nmr.nr_memsize >> 10); //} if ((ioctl(g.main_fd, NIOCGINFO, &nmr)) == -1) { D("Unable to get if info for %s", g.ifname); } devqueues = nmr.nr_rx_rings; /* validate provided nthreads. */ if (g.nthreads < 1 || g.nthreads > devqueues) { D("bad nthreads %d, have %d queues", g.nthreads, devqueues); // continue, fail later } /* * Map the netmap shared memory: instead of issuing mmap() * inside the body of the threads, we prefer to keep this * operation here to simplify the thread logic. */ D("mapping %d Kbytes", nmr.nr_memsize>>10); g.mmap_size = nmr.nr_memsize; g.mmap_addr = (struct netmap_d *) mmap(0, nmr.nr_memsize, PROT_WRITE | PROT_READ, MAP_SHARED, g.main_fd, 0); if (g.mmap_addr == MAP_FAILED) { D("Unable to mmap %d KB", nmr.nr_memsize >> 10); // continue, fail later } /* Print some debug information. */ fprintf(stdout, "%s %s: %d queues, %d threads and %d cpus.\n", (g.td_body == sender_body) ? "Sending on" : "Receiving from", g.ifname, devqueues, g.nthreads, g.cpus); if (g.td_body == sender_body) { fprintf(stdout, "%s -> %s (%s -> %s)\n", g.src_ip.name, g.dst_ip.name, g.src_mac.name, g.dst_mac.name); } /* Exit if something went wrong. */ if (g.main_fd < 0) { D("aborting"); usage(); } } if (g.options) { D("--- SPECIAL OPTIONS:%s%s%s%s%s\n", g.options & OPT_PREFETCH ? " prefetch" : "", g.options & OPT_ACCESS ? " access" : "", g.options & OPT_MEMCPY ? " memcpy" : "", g.options & OPT_INDIRECT ? " indirect" : "", g.options & OPT_COPY ? " copy" : ""); } g.tx_period.tv_sec = g.tx_period.tv_nsec = 0; if (g.tx_rate > 0) { /* try to have at least something every second, * reducing the burst size to 0.5s worth of data * (but no less than one full set of fragments) */ if (g.burst > g.tx_rate/2) g.burst = g.tx_rate/2; if (g.burst < g.frags) g.burst = g.frags; g.tx_period.tv_nsec = (1e9 / g.tx_rate) * g.burst; g.tx_period.tv_sec = g.tx_period.tv_nsec / 1000000000; g.tx_period.tv_nsec = g.tx_period.tv_nsec % 1000000000; } if (g.td_body == sender_body) D("Sending %d packets every %ld.%09ld s", g.burst, g.tx_period.tv_sec, g.tx_period.tv_nsec); /* Wait for PHY reset. */ D("Wait %d secs for phy reset", wait_link); sleep(wait_link); D("Ready..."); /* Install ^C handler. */ global_nthreads = g.nthreads; signal(SIGINT, sigint_h); #if 0 // XXX this is not needed, i believe if (g.dev_type > DEV_NETMAP) { g.p = pcap_open_live(g.ifname, 0, 1, 100, NULL); if (g.p == NULL) { D("cannot open pcap on %s", g.ifname); usage(); } else D("using pcap %p on %s", g.p, g.ifname); } #endif // XXX start_threads(&g); main_thread(&g); return 0; } /* end of file */ netmap-release/examples/README000644 000765 000024 00000000507 12220335545 016735 0ustar00luigistaff000000 000000 $FreeBSD: head/tools/tools/netmap/README 227614 2011-11-17 12:17:39Z luigi $ This directory contains examples that use netmap pkt-gen a packet sink/source using the netmap API bridge a two-port jumper wire, also using the native API testpcap a jumper wire using libnetmap (or libpcap) click* various click examples netmap-release/examples/test_select.c000644 000765 000024 00000003102 12220335545 020531 0ustar00luigistaff000000 000000 /* * test minimum select time * * ./prog usec [method [duration]] */ #include #include #include #include #include #include #include enum { M_SELECT =0 , M_POLL, M_USLEEP }; static const char *names[] = { "select", "poll", "usleep" }; int main(int argc, char *argv[]) { struct timeval ta, tb, prev; int usec = 1, total = 0, method = M_SELECT; uint32_t *vals = NULL; uint32_t i, count = 0; #define LIM 1000000 if (argc > 1) usec = atoi(argv[1]); if (usec <= 0) usec = 1; else if (usec > 500000) usec = 500000; if (argc > 2) { if (!strcmp(argv[2], "poll")) method = M_POLL; else if (!strcmp(argv[2], "usleep")) method = M_USLEEP; } if (argc > 3) total = atoi(argv[3]); if (total < 1) total = 1; else if (total > 10) total = 10; fprintf(stderr, "testing %s for %dus over %ds\n", names[method], usec, total); gettimeofday(&ta, NULL); prev = ta; vals = calloc(LIM, sizeof(uint32_t)); for (;;) { if (method == M_SELECT) { struct timeval to = { 0, usec }; select(0, NULL, NULL, NULL, &to); } else if (method == M_POLL) { poll(NULL, 0, usec/1000); } else { usleep(usec); } gettimeofday(&tb, NULL); timersub(&tb, &prev, &prev); if (count < LIM) vals[count] = prev.tv_usec; count++; prev = tb; timersub(&tb, &ta, &tb); if (tb.tv_sec > total) break; } fprintf(stderr, "%dus actually took %dus\n", usec, (int)(tb.tv_sec * 1000000 + tb.tv_usec) / count ); for (i = 0; i < count && i < LIM; i++) fprintf(stdout, "%d\n", vals[i]); return 0; } netmap-release/examples/testcsum.c000644 000765 000024 00000012337 12220335545 020074 0ustar00luigistaff000000 000000 /* * test checksum * * General * - on new cpus (AMD X2, i5, i7) alignment is not very important. * - on old P4, the unrolling is not very useful * - the assembly version is uniformly slower * * In summary the 32-bit version with unrolling is quite fast. Data on i7-2600 checksums for 1518 bytes on i7-2600 at 3400 bufs ns/cycle 1 80 128 85 1024 90 2048 91 3000 90 3500 92 3800 95 3900 100 4096 119 8192 141 freq bufs ns/cy 200 1 1658 200 2048 1923 200 8192 2331 3400 1 78 3400 8192 141 For short packets bufs size ns/cy 1 64 7 3900 64 16 8192 64 33 */ #include #include #include #include #include inline void prefetch (const void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(const unsigned long *)x)); } volatile uint16_t res; #define REDUCE16(_x) ({ uint32_t x = _x; \ x = (x & 0xffff) + (x >> 16); \ x = (x & 0xffff) + (x >> 16); \ x; } ) #define REDUCE32(_x) ({ uint64_t x = _x; \ x = (x & 0xffffffff) + (x >> 32); \ x = (x & 0xffffffff) + (x >> 32); \ x; } ) uint32_t dummy(const unsigned char *addr, int count) { (void)addr; (void)count; return 0; } /* * Base mechanism, 16 bit at a time, not unrolled */ uint32_t sum16(const unsigned char *addr, int count) { uint32_t sum = 0; uint16_t *d = (uint16_t *)addr; for (;count >= 2; count -= 2) sum += *d++; /* Add left-over byte, if any */ if (count & 1) sum += *(uint8_t *)d; return REDUCE16(sum); } /* * Better mechanism, 32 bit at a time, not unrolled */ uint32_t sum32(const unsigned char *addr, int count) { uint64_t sum = 0; const uint32_t *d = (const uint32_t *)addr; for (; count >= 4; count -= 4) sum += *d++; addr = (const uint8_t *)d; if (count >= 2) { sum += *(const uint16_t *)addr; addr += 2; } /* Add left-over byte, if any */ if (count & 1) sum += *addr; sum = REDUCE32(sum); return REDUCE16(sum); } uint32_t sum32u(const unsigned char *addr, int count) { uint64_t sum = 0; const uint32_t *p = (uint32_t *)addr; for (; count >= 32; count -= 32) { sum += (uint64_t)p[0] + p[1] + p[2] + p[3] + p[4] + p[5] + p[6] + p[7]; p += 8; } if (count & 0x10) { sum += (uint64_t)p[0] + p[1] + p[2] + p[3]; p += 4; } if (count & 8) { sum += (uint64_t)p[0] + p[1]; p += 2; } if (count & 4) sum += *p++; addr = (const unsigned char *)p; if (count & 2) { sum += *(uint16_t *)addr; addr += 2; } if (count & 1) sum += *addr; sum = REDUCE32(sum); return REDUCE16(sum); } uint32_t sum32a(const unsigned char *addr, int count) { uint32_t sum32 = 0; uint64_t sum; const uint32_t *p = (const uint32_t *)addr; for (;count >= 32; count -= 32) { __asm( "add %1, %0\n" "adc %2, %0\n" "adc %3, %0\n" "adc %4, %0\n" "adc %5, %0\n" "adc %6, %0\n" "adc %7, %0\n" "adc %8, %0\n" "adc $0, %0" : "+r" (sum32) : "g" (p[0]), "g" (p[1]), "g" (p[2]), "g" (p[3]), "g" (p[4]), "g" (p[5]), "g" (p[6]), "g" (p[7]) : "cc" ); p += 8; } sum = sum32; for (;1 && count >= 16; count -= 16) { sum += (uint64_t)p[0] + p[1] + p[2] + p[3]; p += 4; } for (; count >= 4; count -= 4) { sum += *p++; } addr = (unsigned char *)p; if (count > 1) { sum += *(uint16_t *)addr; addr += 2; } if (count & 1) sum += *addr; sum = REDUCE32(sum); return REDUCE16(sum); } struct ftab { char *name; uint32_t (*fn)(const unsigned char *, int); }; struct ftab f[] = { { "dummy", dummy }, { "sum16", sum16 }, { "sum32", sum32 }, { "sum32u", sum32u }, { "sum32a", sum32a }, { NULL, NULL } }; int main(int argc, char *argv[]) { int i, j, n; int lim = argc > 1 ? atoi(argv[1]) : 100; int len = argc > 2 ? atoi(argv[2]) : 1024; char *fn = argc > 3 ? argv[3] : "sum16"; int ring_size = argc > 4 ? atoi(argv[4]) : 0; unsigned char *buf0, *buf; #define MAXLEN 2048 #define NBUFS 65536 /* 128MB */ uint32_t (*fnp)(const unsigned char *, int) = NULL; struct timeval ta, tb; if (ring_size < 1 || ring_size > NBUFS) ring_size = 1; buf0 = calloc(1, MAXLEN * NBUFS); if (!buf0) return 1; for (i = 0; f[i].name; i++) { if (!strcmp(f[i].name, fn)) { fnp = f[i].fn; break; } } if (fnp == NULL) { fnp = sum16; fn = "sum16-default"; } if (len > MAXLEN) len = MAXLEN; for (n = 0; n < NBUFS; n++) { buf = buf0 + n*MAXLEN; for (i = 0; i < len; i++) buf[i] = i *i - i + 5; } fprintf(stderr, "function %s len %d count %dM ring_size %d\n", fn, len, lim, ring_size); gettimeofday(&ta, NULL); for (n = 0; n < lim; n++) { for (i = j = 0; i < 1000000; i++) { const unsigned char *x = buf0 + j*MAXLEN; prefetch(x + MAXLEN); prefetch(x + MAXLEN + 64); res = fnp(x, len); if (++j == ring_size) j = 0; } } gettimeofday(&tb, NULL); tb.tv_sec -= ta.tv_sec; tb.tv_usec -= ta.tv_usec; if (tb.tv_usec < 0) { tb.tv_sec--; tb.tv_usec += 1000000; } n = tb.tv_sec * 1000000 + tb.tv_usec; fprintf(stderr, "%dM cycles in %d.%06ds, %dns/cycle\n", lim, (int)tb.tv_sec, (int)tb.tv_usec, n/(lim*1000) ); fprintf(stderr, "%s %u sum16 %u sum32 %d sum32u %u\n", fn, res, sum16((unsigned char *)buf0, len), sum32((unsigned char *)buf0, len), sum32u((unsigned char *)buf0, len)); return 0; } netmap-release/examples/testlock.c000644 000765 000024 00000044212 12220335545 020052 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id$ * * Test program to study various ops and concurrency issues. * Create multiple threads, possibly bind to cpus, and run a workload. * * cc -O2 -Werror -Wall testlock.c -o testlock -lpthread * you might need -lrt */ #include #include #include /* pthread_* */ #if defined(__APPLE__) #include #define atomic_add_int(p, n) OSAtomicAdd32(n, (int *)p) #define atomic_cmpset_32(p, o, n) OSAtomicCompareAndSwap32(o, n, (int *)p) #elif defined(linux) int atomic_cmpset_32(volatile uint32_t *p, uint32_t old, uint32_t new) { int ret = *p == old; *p = new; return ret; } #if defined(HAVE_GCC_ATOMICS) int atomic_add_int(volatile int *p, int v) { return __sync_fetch_and_add(p, v); } #else inline uint32_t atomic_add_int(uint32_t *p, int v) { __asm __volatile ( " lock xaddl %0, %1 ; " : "+r" (v), /* 0 (result) */ "=m" (*p) /* 1 */ : "m" (*p)); /* 2 */ return (v); } #endif #else /* FreeBSD */ #include #include #include /* pthread w/ affinity */ #if __FreeBSD_version > 500000 #include /* cpu_set */ #if __FreeBSD_version > 800000 #define HAVE_AFFINITY #endif inline void prefetch (const void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(const unsigned long *)x)); } #else /* FreeBSD 4.x */ int atomic_cmpset_32(volatile uint32_t *p, uint32_t old, uint32_t new) { int ret = *p == old; *p = new; return ret; } #define PRIu64 "llu" #endif /* FreeBSD 4.x */ #endif /* FreeBSD */ #include /* signal */ #include #include #include #include /* PRI* macros */ #include /* strcmp */ #include /* open */ #include /* getopt */ #include /* sysctl */ #include /* timersub */ static inline int min(int a, int b) { return a < b ? a : b; } #define ONE_MILLION 1000000 /* debug support */ #define ND(format, ...) #define D(format, ...) \ fprintf(stderr, "%s [%d] " format "\n", \ __FUNCTION__, __LINE__, ##__VA_ARGS__) int verbose = 0; #if 1//def MY_RDTSC /* Wrapper around `rdtsc' to take reliable timestamps flushing the pipeline */ #define my_rdtsc(t) \ do { \ u_int __regs[4]; \ \ do_cpuid(0, __regs); \ (t) = rdtsc(); \ } while (0) static __inline void do_cpuid(u_int ax, u_int *p) { __asm __volatile("cpuid" : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) : "0" (ax) ); } static __inline uint64_t rdtsc(void) { uint64_t rv; // XXX does not work on linux-64 bit __asm __volatile("rdtscp" : "=A" (rv) : : "%rax"); return (rv); } #endif /* 1 */ struct targ; /*** global arguments for all threads ***/ struct glob_arg { struct { uint32_t ctr[1024]; } v __attribute__ ((aligned(256) )); int64_t m_cycles; /* total cycles */ int nthreads; int cpus; int privs; // 1 if has IO privileges int arg; // microseconds in usleep char *test_name; void (*fn)(struct targ *); uint64_t scale; // scaling factor char *scale_name; // scaling factor }; /* * Arguments for a new thread. */ struct targ { struct glob_arg *g; int completed; u_int *glob_ctr; uint64_t volatile count; struct timeval tic, toc; int me; pthread_t thread; int affinity; }; static struct targ *ta; static int global_nthreads; /* control-C handler */ static void sigint_h(int sig) { int i; (void)sig; /* UNUSED */ for (i = 0; i < global_nthreads; i++) { /* cancel active threads. */ if (ta[i].completed) continue; D("Cancelling thread #%d\n", i); pthread_cancel(ta[i].thread); ta[i].completed = 0; } signal(SIGINT, SIG_DFL); } /* sysctl wrapper to return the number of active CPUs */ static int system_ncpus(void) { #ifdef linux return 1; #else int mib[2] = { CTL_HW, HW_NCPU}, ncpus; size_t len = sizeof(mib); sysctl(mib, len / sizeof(mib[0]), &ncpus, &len, NULL, 0); D("system had %d cpus", ncpus); return (ncpus); #endif } /* * try to get I/O privileges so we can execute cli/sti etc. */ int getprivs(void) { int fd = open("/dev/io", O_RDWR); if (fd < 0) { D("cannot open /dev/io, fd %d", fd); return 0; } return 1; } /* set the thread affinity. */ /* ARGSUSED */ #ifdef HAVE_AFFINITY static int setaffinity(pthread_t me, int i) { cpuset_t cpumask; if (i == -1) return 0; /* Set thread affinity affinity.*/ CPU_ZERO(&cpumask); CPU_SET(i, &cpumask); if (pthread_setaffinity_np(me, sizeof(cpuset_t), &cpumask) != 0) { D("Unable to set affinity"); return 1; } return 0; } #endif static void * td_body(void *data) { struct targ *t = (struct targ *) data; #ifdef HAVE_AFFINITY if (0 == setaffinity(t->thread, t->affinity)) #endif { /* main loop.*/ D("testing %"PRIu64" cycles", t->g->m_cycles); gettimeofday(&t->tic, NULL); t->g->fn(t); gettimeofday(&t->toc, NULL); } t->completed = 1; return (NULL); } void test_sel(struct targ *t) { int64_t m; for (m = 0; m < t->g->m_cycles; m++) { fd_set r; struct timeval to = { 0, t->g->arg}; FD_ZERO(&r); FD_SET(0,&r); // FD_SET(1,&r); select(1, &r, NULL, NULL, &to); t->count++; } } void test_poll(struct targ *t) { int64_t m, ms = t->g->arg/1000; for (m = 0; m < t->g->m_cycles; m++) { struct pollfd x; x.fd = 0; x.events = POLLIN; poll(&x, 1, ms); t->count++; } } void test_usleep(struct targ *t) { int64_t m; for (m = 0; m < t->g->m_cycles; m++) { usleep(t->g->arg); t->count++; } } void test_cli(struct targ *t) { int64_t m, i; if (!t->g->privs) { D("%s", "privileged instructions not available"); return; } for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { __asm __volatile("cli;"); __asm __volatile("and %eax, %eax;"); __asm __volatile("sti;"); t->count++; } } } void test_nop(struct targ *t) { int64_t m, i; for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { __asm __volatile("nop;"); __asm __volatile("nop; nop; nop; nop; nop;"); //__asm __volatile("nop; nop; nop; nop; nop;"); t->count++; } } } void test_rdtsc1(struct targ *t) { int64_t m, i; uint64_t v; (void)v; for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { my_rdtsc(v); t->count++; } } } void test_rdtsc(struct targ *t) { int64_t m, i; volatile uint64_t v; for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { v = rdtsc(); t->count++; } } (void)v; } void test_add(struct targ *t) { int64_t m, i; for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { t->glob_ctr[0] ++; t->count++; } } } void test_atomic_add(struct targ *t) { int64_t m, i; for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { atomic_add_int(t->glob_ctr, 1); t->count++; } } } void test_atomic_cmpset(struct targ *t) { int64_t m, i; for (m = 0; m < t->g->m_cycles; m++) { for (i = 0; i < ONE_MILLION; i++) { atomic_cmpset_32(t->glob_ctr, m, i); t->count++; } } } void test_time(struct targ *t) { int64_t m; for (m = 0; m < t->g->m_cycles; m++) { #ifndef __APPLE__ struct timespec ts; clock_gettime(t->g->arg, &ts); #endif t->count++; } } void test_gettimeofday(struct targ *t) { int64_t m; struct timeval ts; for (m = 0; m < t->g->m_cycles; m++) { gettimeofday(&ts, NULL); t->count++; } } /* * getppid is the simplest system call (getpid is cached by glibc * so it would not be a good test) */ void test_getpid(struct targ *t) { int64_t m; for (m = 0; m < t->g->m_cycles; m++) { getppid(); t->count++; } } #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) static void fast_bcopy(void *_src, void *_dst, int l) { uint64_t *src = _src; uint64_t *dst = _dst; if (unlikely(l >= 1024)) { bcopy(src, dst, l); return; } for (; likely(l > 0); l-=64) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } } static inline void asmcopy(void *dst, void *src, uint64_t l) { (void)dst; (void)src; asm( "\n\t" "movq %0, %%rcx\n\t" "addq $7, %%rcx\n\t" "shrq $03, %%rcx\n\t" "cld\n\t" "movq %1, %%rdi\n\t" "movq %2, %%rsi\n\t" "repe movsq\n\t" /* "movq %0, %%rcx\n\t" "andq $0x7, %%rcx\n\t" "repe movsb\n\t" */ : /* out */ : "r" (l), "r" (dst), "r" (src) /* in */ : "%rcx", "%rsi", "%rdi" /* clobbered */ ); } // XXX if you want to make sure there is no inlining... // static void (*fp)(void *_src, void *_dst, int l) = fast_bcopy; #define HU 0x3ffff static struct glob_arg huge[HU+1]; void test_fastcopy(struct targ *t) { int64_t m; int len = t->g->arg; if (len > (int)sizeof(struct glob_arg)) len = sizeof(struct glob_arg); D("fast copying %d bytes", len); for (m = 0; m < t->g->m_cycles; m++) { fast_bcopy(t->g, (void *)&huge[m & HU], len); t->count+=1; } } void test_asmcopy(struct targ *t) { int64_t m; int len = t->g->arg; if (len > (int)sizeof(struct glob_arg)) len = sizeof(struct glob_arg); D("fast copying %d bytes", len); for (m = 0; m < t->g->m_cycles; m++) { asmcopy((void *)&huge[m & HU], t->g, len); t->count+=1; } } void test_bcopy(struct targ *t) { int64_t m; int len = t->g->arg; if (len > (int)sizeof(struct glob_arg)) len = sizeof(struct glob_arg); D("bcopying %d bytes", len); for (m = 0; m < t->g->m_cycles; m++) { bcopy(t->g, (void *)&huge[m & HU], len); t->count+=1; } } void test_builtin_memcpy(struct targ *t) { int64_t m; int len = t->g->arg; if (len > (int)sizeof(struct glob_arg)) len = sizeof(struct glob_arg); D("bcopying %d bytes", len); for (m = 0; m < t->g->m_cycles; m++) { __builtin_memcpy((void *)&huge[m & HU], t->g, len); t->count+=1; } } void test_memcpy(struct targ *t) { int64_t m; int len = t->g->arg; if (len > (int)sizeof(struct glob_arg)) len = sizeof(struct glob_arg); D("memcopying %d bytes", len); for (m = 0; m < t->g->m_cycles; m++) { memcpy((void *)&huge[m & HU], t->g, len); t->count+=1; } } #include #include #include void test_netmap(struct targ *t) { struct nmreq nmr; int fd; int64_t m, scale; scale = t->g->m_cycles / 100; fd = open("/dev/netmap", O_RDWR); if (fd < 0) { D("fail to open netmap, exit"); return; } bzero(&nmr, sizeof(nmr)); for (m = 0; m < t->g->m_cycles; m += scale) { nmr.nr_version = 666; nmr.nr_cmd = t->g->arg; nmr.nr_offset = (uint32_t)scale; ioctl(fd, NIOCGINFO, &nmr); t->count += scale; } return; } struct entry { void (*fn)(struct targ *); char *name; uint64_t scale; uint64_t m_cycles; }; struct entry tests[] = { { test_sel, "select", 1, 1000 }, { test_poll, "poll", 1, 1000 }, { test_usleep, "usleep", 1, 1000 }, { test_time, "time", 1, 1000 }, { test_gettimeofday, "gettimeofday", 1, 1000000 }, { test_getpid, "getpid", 1, 1000000 }, { test_bcopy, "bcopy", 1000, 100000000 }, { test_builtin_memcpy, "__builtin_memcpy", 1000, 100000000 }, { test_memcpy, "memcpy", 1000, 100000000 }, { test_fastcopy, "fastcopy", 1000, 100000000 }, { test_asmcopy, "asmcopy", 1000, 100000000 }, { test_add, "add", ONE_MILLION, 100000000 }, { test_nop, "nop", ONE_MILLION, 100000000 }, { test_atomic_add, "atomic-add", ONE_MILLION, 100000000 }, { test_cli, "cli", ONE_MILLION, 100000000 }, { test_rdtsc, "rdtsc", ONE_MILLION, 100000000 }, // unserialized { test_rdtsc1, "rdtsc1", ONE_MILLION, 100000000 }, // serialized { test_atomic_cmpset, "cmpset", ONE_MILLION, 100000000 }, { test_netmap, "netmap", 1000, 100000000 }, { NULL, NULL, 0, 0 } }; static void usage(void) { const char *cmd = "test"; int i; fprintf(stderr, "Usage:\n" "%s arguments\n" "\t-m name test name\n" "\t-n cycles (millions) of cycles\n" "\t-l arg bytes, usec, ... \n" "\t-t threads total threads\n" "\t-c cores cores to use\n" "\t-a n force affinity every n cores\n" "\t-A n cache contention every n bytes\n" "\t-w report_ms milliseconds between reports\n" "", cmd); fprintf(stderr, "Available tests:\n"); for (i = 0; tests[i].name; i++) { fprintf(stderr, "%12s\n", tests[i].name); } exit(0); } static int64_t getnum(const char *s) { int64_t n; char *e; n = strtol(s, &e, 0); switch (e ? *e : '\0') { case 'k': case 'K': return n*1000; case 'm': case 'M': return n*1000*1000; case 'g': case 'G': return n*1000*1000*1000; case 't': case 'T': return n*1000*1000*1000*1000; default: return n; } } struct glob_arg g; int main(int argc, char **argv) { int i, ch, report_interval, affinity, align; ND("g has size %d", (int)sizeof(g)); report_interval = 250; /* ms */ affinity = 0; /* no affinity */ align = 0; /* global variable */ bzero(&g, sizeof(g)); g.privs = getprivs(); g.nthreads = 1; g.cpus = 1; g.m_cycles = 0; while ( (ch = getopt(argc, argv, "A:a:m:n:w:c:t:vl:")) != -1) { switch(ch) { default: D("bad option %c %s", ch, optarg); usage(); break; case 'A': /* align */ align = atoi(optarg); break; case 'a': /* force affinity */ affinity = atoi(optarg); break; case 'n': /* cycles */ g.m_cycles = getnum(optarg); break; case 'w': /* report interval */ report_interval = atoi(optarg); break; case 'c': g.cpus = atoi(optarg); break; case 't': g.nthreads = atoi(optarg); break; case 'm': g.test_name = optarg; break; case 'l': g.arg = getnum(optarg); break; case 'v': verbose++; break; } } argc -= optind; argv += optind; if (!g.test_name && argc > 0) g.test_name = argv[0]; if (g.test_name) { for (i = 0; tests[i].name; i++) { if (!strcmp(g.test_name, tests[i].name)) { g.fn = tests[i].fn; g.scale = tests[i].scale; if (g.m_cycles == 0) g.m_cycles = tests[i].m_cycles; if (g.scale == ONE_MILLION) g.scale_name = "M"; else if (g.scale == 1000) g.scale_name = "K"; else { g.scale = 1; g.scale_name = ""; } break; } } } if (!g.fn) { D("%s", "missing/unknown test name"); usage(); } i = system_ncpus(); if (g.cpus < 0 || g.cpus > i) { D("%d cpus is too high, have only %d cpus", g.cpus, i); usage(); } if (g.cpus == 0) g.cpus = i; if (g.nthreads < 1) { D("bad nthreads %d, using 1", g.nthreads); g.nthreads = 1; } i = sizeof(g.v.ctr) / g.nthreads*sizeof(g.v.ctr[0]); if (align < 0 || align > i) { D("bad align %d, max is %d", align, i); align = i; } /* Install ^C handler. */ global_nthreads = g.nthreads; signal(SIGINT, sigint_h); ta = calloc(g.nthreads, sizeof(*ta)); /* * Now create the desired number of threads, each one * using a single descriptor. */ D("start %d threads on %d cores", g.nthreads, g.cpus); for (i = 0; i < g.nthreads; i++) { struct targ *t = &ta[i]; bzero(t, sizeof(*t)); t->g = &g; t->me = i; t->glob_ctr = &g.v.ctr[(i*align)/sizeof(g.v.ctr[0])]; D("thread %d ptr %p", i, t->glob_ctr); t->affinity = affinity ? (affinity*i) % g.cpus : -1; if (pthread_create(&t->thread, NULL, td_body, t) == -1) { D("Unable to create thread %d", i); t->completed = 1; } } /* the main loop */ { uint64_t my_count = 0, prev = 0; uint64_t count = 0; double delta_t; struct timeval tic, toc; gettimeofday(&toc, NULL); for (;;) { struct timeval now, delta; uint64_t pps; int done = 0; delta.tv_sec = report_interval/1000; delta.tv_usec = (report_interval%1000)*1000; select(0, NULL, NULL, NULL, &delta); gettimeofday(&now, NULL); timersub(&now, &toc, &toc); my_count = 0; for (i = 0; i < g.nthreads; i++) { my_count += ta[i].count; if (ta[i].completed) done++; } pps = toc.tv_sec* ONE_MILLION + toc.tv_usec; if (pps < 10000) continue; pps = (my_count - prev)*ONE_MILLION / pps; D("%" PRIu64 " %scycles/s scale %" PRIu64 " in %dus", pps/g.scale, g.scale_name, g.scale, (int)(toc.tv_sec* ONE_MILLION + toc.tv_usec)); prev = my_count; toc = now; if (done == g.nthreads) break; } D("total %" PRIu64 " cycles", prev); timerclear(&tic); timerclear(&toc); for (i = 0; i < g.nthreads; i++) { pthread_join(ta[i].thread, NULL); if (ta[i].completed == 0) continue; /* * Collect threads o1utput and extract information about * how log it took to send all the packets. */ count += ta[i].count; if (!timerisset(&tic) || timercmp(&ta[i].tic, &tic, <)) tic = ta[i].tic; if (!timerisset(&toc) || timercmp(&ta[i].toc, &toc, >)) toc = ta[i].toc; } /* print output. */ timersub(&toc, &tic, &toc); delta_t = toc.tv_sec + 1e-6* toc.tv_usec; D("total %8.6f seconds", delta_t); } return (0); } /* end of file */ netmap-release/examples/testmmap.c000644 000765 000024 00000030005 12220335545 020047 0ustar00luigistaff000000 000000 #include "nm_util.h" #include #define MAX_VARS 100 char *variables[MAX_VARS]; int curr_var; #define VAR_FAILED ((void*)1) char *firstarg(char *buf) { int v; char *arg = strtok(buf, " \t\n"); char *ret; if (!arg) return NULL; if (arg[0] != '$' && arg[0] != '?') return arg; v = atoi(arg+1); if (v < 0 || v >= MAX_VARS) return ""; ret = variables[v]; if (ret == NULL) return "NULL"; if (ret == VAR_FAILED) { printf("reading failed var, exit\n"); exit(1); } if (arg[0] == '?') return ret; ret = rindex(ret, '=') + 1; return ret; } char *nextarg() { return firstarg(NULL); } char *restofline() { return strtok(NULL, "\n"); } void resetvar(int v, char *b) { if (variables[v] != VAR_FAILED) free(variables[v]); variables[v] = b; } #define outecho(format, args...) \ do {\ printf("%u:%lu: " format "\n", getpid(), (unsigned long) pthread_self(), ##args);\ fflush(stdout);\ } while (0) #define output(format, args...) \ do {\ resetvar(curr_var, (char*)malloc(1024));\ snprintf(variables[curr_var], 1024, format, ##args);\ outecho(format, ##args);\ } while (0) #define output_err(ret, format, args...)\ do {\ if (ret < 0) {\ resetvar(curr_var, VAR_FAILED);\ outecho(format, ##args);\ outecho("error: %s", strerror(errno));\ } else {\ output(format, ##args);\ }\ } while (0) struct chan { FILE *out; pid_t pid; pthread_t tid; }; int chan_search_free(struct chan* c[], int max) { int i; for (i = 0; i < max && c[i]; i++) ; return i; } void chan_clear_all(struct chan *c[], int max) { int i; for (i = 0; i < max; i++) { if (c[i]) { fclose(c[i]->out); free(c[i]); c[i] = NULL; } } } int last_fd = -1; size_t last_memsize = 0; void* last_mmap_addr = NULL; char* last_access_addr = NULL; void do_open() { last_fd = open("/dev/netmap", O_RDWR); output_err(last_fd, "open(\"/dev/netmap\", O_RDWR)=%d", last_fd); } void do_close() { int ret, fd; char *arg = nextarg(); fd = arg ? atoi(arg) : last_fd; ret = close(fd); output_err(ret, "close(%d)=%d", fd, ret); } void do_getinfo() { struct nmreq nmr; int ret; char *arg, *name = "any"; int fd; bzero(&nmr, sizeof(nmr)); nmr.nr_version = NETMAP_API; arg = nextarg(); if (!arg) { fd = last_fd; goto doit; } fd = atoi(arg); name = nextarg(); if (name) { strncpy(nmr.nr_name, name, sizeof(nmr.nr_name)); } doit: ret = ioctl(fd, NIOCGINFO, &nmr); last_memsize = nmr.nr_memsize; output_err(ret, "ioctl(%d, NIOCGINFO) for %s: memsize=%zu", fd, name, last_memsize); } void do_regif() { struct nmreq nmr; int ret; char *arg, *name; int fd; bzero(&nmr, sizeof(nmr)); nmr.nr_version = NETMAP_API; name = nextarg(); if (!name) { output("missing ifname"); return; } strncpy(nmr.nr_name, name, sizeof(nmr.nr_name)); arg = nextarg(); fd = arg ? atoi(arg) : last_fd; ret = ioctl(fd, NIOCREGIF, &nmr); last_memsize = nmr.nr_memsize; output_err(ret, "ioctl(%d, NIOCREGIF) for %s =%d", fd, name, ret); } volatile char tmp1; void do_access() { char *arg = nextarg(); char *p; if (!arg) { if (!last_access_addr) { output("missing address"); return; } p = last_access_addr; } else { p = (char *)strtoul((void *)arg, NULL, 0); } last_access_addr = p + 4096; tmp1 = *p; } void do_mmap() { size_t memsize; off_t off = 0; int fd; char *arg; arg = nextarg(); if (!arg) { memsize = last_memsize; fd = last_fd; goto doit; } memsize = atoi(arg); arg = nextarg(); if (!arg) { fd = last_fd; goto doit; } fd = atoi(arg); arg = nextarg(); if (arg) { off = (off_t)atol(arg); } doit: last_mmap_addr = mmap(0, memsize, PROT_WRITE | PROT_READ, MAP_SHARED, fd, off); if (last_access_addr == NULL) last_access_addr = last_mmap_addr; output_err(last_mmap_addr == MAP_FAILED ? -1 : 0, "mmap(0, %zu, PROT_WRITE|PROT_READ, MAP_SHARED, %d, %jd)=%p", memsize, fd, (intmax_t)off, last_mmap_addr); } void do_munmap() { void *mmap_addr; size_t memsize; char *arg; int ret; arg = nextarg(); if (!arg) { mmap_addr = last_mmap_addr; memsize = last_memsize; goto doit; } mmap_addr = (void*)strtoul(arg, NULL, 0); arg = nextarg(); if (!arg) { memsize = last_memsize; goto doit; } memsize = (size_t)strtoul(arg, NULL, 0); doit: ret = munmap(mmap_addr, memsize); output_err(ret, "munmap(%p, %zu)=%d", mmap_addr, memsize, ret); } void do_poll() { /* timeout fd fd... */ nfds_t nfds = 0, allocated_fds = 10, i; struct pollfd *fds; int timeout = 500; /* 1/2 second */ char *arg; int ret; arg = nextarg(); if (arg) timeout = atoi(arg); fds = malloc(allocated_fds * sizeof(struct pollfd)); if (fds == NULL) { output_err(-1, "out of memory"); return; } while ( (arg = nextarg()) ) { if (nfds >= allocated_fds) { allocated_fds *= 2; fds = realloc(fds, allocated_fds * sizeof(struct pollfd)); if (fds == NULL) { output_err(-1, "out of memory"); return; } } fds[nfds].fd = atoi(arg); fds[nfds].events = POLLIN; nfds++; } ret = poll(fds, nfds, timeout); for (i = 0; i < nfds; i++) { output("poll(%d)=%s%s%s%s%s", fds[i].fd, (fds[i].revents & POLLIN) ? "IN " : "- ", (fds[i].revents & POLLOUT)? "OUT " : "- ", (fds[i].revents & POLLERR)? "ERR " : "- ", (fds[i].revents & POLLHUP)? "HUP " : "- ", (fds[i].revents & POLLNVAL)?"NVAL" : "-"); } output_err(ret, "poll(...)=%d", ret); free(fds); } void do_txsync() { char *arg = nextarg(); int fd = arg ? atoi(arg) : last_fd; int ret = ioctl(fd, NIOCTXSYNC, NULL); output_err(ret, "ioctl(%d, NIOCTXSYNC)=%d", fd, ret); } void do_rxsync() { char *arg = nextarg(); int fd = arg ? atoi(arg) : last_fd; int ret = ioctl(fd, NIOCRXSYNC, NULL); output_err(ret, "ioctl(%d, NIOCRXSYNC)=%d", fd, ret); } void do_expr() { unsigned long stack[11]; int top = 10; char *arg; int err = 0; stack[10] = ULONG_MAX; while ( (arg = nextarg()) ) { errno = 0; char *rest; unsigned long n = strtoul(arg, &rest, 0); if (!errno && rest != arg) { if (top <= 0) { err = -1; break; } stack[--top] = n; continue; } if (top <= 8) { unsigned long n1 = stack[top++]; unsigned long n2 = stack[top++]; unsigned long r = 0; switch (arg[0]) { case '+': r = n1 + n2; break; case '-': r = n1 - n2; break; case '*': r = n1 * n2; break; case '/': if (n2) r = n1 / n2; else { errno = EDOM; err = -1; } break; default: err = -1; break; } stack[--top] = r; continue; } err = -1; break; } output_err(err, "expr=%lu", stack[top]); } void do_echo() { char *arg; for (arg = nextarg(); arg; arg = nextarg()) { printf("%s\n", arg); } } void do_vars() { int i; for (i = 0; i < MAX_VARS; i++) { const char *v = variables[i]; if (v == NULL) continue; printf("?%d\t%s\n", i, v == VAR_FAILED ? "FAILED" : v); } } struct cmd_def { const char *name; void (*f)(void); }; struct cmd_def commands[] = { { .name = "open", .f = do_open, }, { .name = "close", .f = do_close, }, { .name = "getinfo", .f = do_getinfo, }, { .name = "regif", .f = do_regif, }, { .name = "mmap", .f = do_mmap, }, { .name = "access", .f = do_access, }, { .name = "munmap", .f = do_munmap, }, { .name = "poll", .f = do_poll, }, { .name = "txsync", .f = do_txsync, }, { .name = "rxsync", .f = do_rxsync, }, { .name = "expr", .f = do_expr, }, { .name = "echo", .f = do_echo, }, { .name = "vars", .f = do_vars, } }; const int N_CMDS = sizeof(commands) / sizeof(struct cmd_def); int find_command(const char* cmd) { int i; for (i = 0; i < N_CMDS; i++) { if (strcmp(commands[i].name, cmd) == 0) break; } return i; } #define MAX_CHAN 10 void prompt() { if (isatty(STDIN_FILENO)) { printf("> "); } } struct chan *channels[MAX_CHAN]; void* thread_cmd_loop(void *arg) { char buf[1024]; FILE *in = (FILE*)arg; while (fgets(buf, 1024, in)) { char *cmd; int i; cmd = firstarg(buf); i = find_command(cmd); if (i < N_CMDS) { commands[i].f(); continue; } output("unknown cmd %s", cmd); } fclose(in); return NULL; } void do_exit() { output("quit"); } void cmd_loop() { char buf[1024]; int i; struct chan *c; bzero(channels, sizeof(*channels) * MAX_CHAN); atexit(do_exit); for (prompt(); fgets(buf, 1024, stdin); prompt()) { char *cmd; int slot; cmd = firstarg(buf); if (!cmd) continue; if (cmd[0] == '@') { curr_var = atoi(cmd + 1); if (curr_var < 0 || curr_var >= MAX_VARS) curr_var = 0; cmd = nextarg(); if (!cmd) continue; } else { curr_var = 0; } if (strcmp(cmd, "fork") == 0) { int slot = chan_search_free(channels, MAX_CHAN); struct chan *c = NULL; pid_t pid; int p1[2] = { -1, -1}; if (slot == MAX_CHAN) { output("too many channels"); continue; } c = channels[slot] = (struct chan*)malloc(sizeof(struct chan)); if (c == NULL) { output_err(-1, "malloc"); continue; } bzero(c, sizeof(*c)); if (pipe(p1) < 0) { output_err(-1, "pipe"); goto clean1; } c->out = fdopen(p1[1], "w"); if (c->out == NULL) { output_err(-1, "fdopen"); goto clean1; } pid = fork(); switch (pid) { case -1: output_err(-1, "fork"); goto clean1; case 0: fclose(stdin); if (dup(p1[0]) < 0) { output_err(-1, "dup"); exit(1); } close(p1[1]); stdin = fdopen(0, "r"); chan_clear_all(channels, MAX_CHAN); goto out; default: break; } c->pid = pid; close(p1[0]); output("fork()=%d slot=%d", pid, slot); continue; clean1: if (c) { fclose(c->out); } close(p1[0]); close(p1[1]); free(c); out: continue; } if (strcmp(cmd, "kill") == 0) { int ret; cmd = nextarg(); if (!cmd) { output("missing slot"); continue; } slot = atoi(cmd); if (slot < 0 || slot >= MAX_CHAN || !channels[slot]) { output("invalid slot: %s", cmd); continue; } c = channels[slot]; ret = kill(c->pid, SIGTERM); output_err(ret, "kill(%d, SIGTERM)=%d", c->pid, ret); if (ret != -1) { wait(NULL); fclose(c->out); free(c); channels[slot] = NULL; } continue; } if (strcmp(cmd, "thread") == 0) { int slot = chan_search_free(channels, MAX_CHAN); struct chan *c = NULL; pthread_t tid; int p1[2] = { -1, -1}; int ret; FILE *in = NULL; if (slot == MAX_CHAN) { output("too many channels"); continue; } c = channels[slot] = (struct chan*)malloc(sizeof(struct chan)); bzero(c, sizeof(*c)); if (pipe(p1) < 0) { output_err(-1, "pipe"); goto clean2; } c->out = fdopen(p1[1], "w"); if (c->out == NULL) { output_err(-1, "fdopen"); goto clean2; } in = fdopen(p1[0], "r"); if (in == NULL) { output_err(-1, "fdopen"); goto clean2; } ret = pthread_create(&tid, NULL, thread_cmd_loop, in); output_err(ret, "pthread_create() tid=%lu slot=%d", (unsigned long) tid, slot); if (ret < 0) goto clean2; c->pid = getpid(); c->tid = tid; continue; clean2: fclose(in); fclose(c->out); close(p1[0]); close(p1[1]); free(c); continue; } if (strcmp(cmd, "cancel") == 0) { int ret; cmd = nextarg(); if (!cmd) { output("missing slot"); continue; } slot = atoi(cmd); if (slot < 0 || slot >= MAX_CHAN || !channels[slot]) { output("invalid slot: %s", cmd); continue; } c = channels[slot]; fclose(c->out); ret = pthread_join(c->tid, NULL); output_err(ret, "pthread_join(%lu)=%d", (unsigned long) c->tid, ret); if (ret > 0) { free(c); channels[slot] = NULL; } continue; } i = find_command(cmd); if (i < N_CMDS) { commands[i].f(); continue; } slot = atoi(cmd); if (slot < 0 || slot > MAX_CHAN || !channels[slot]) { output("invalid cmd/slot: %s", cmd); continue; } cmd = restofline(); if (!cmd) { output("missing command"); continue; } fprintf(channels[slot]->out, "%s\n", cmd); fflush(channels[slot]->out); sleep(1); } } int main(int argc, char **argv) { (void) argc; (void) argv; cmd_loop(); return 0; } netmap-release/examples/vale-ctl.c000644 000765 000024 00000010716 12220335545 017733 0ustar00luigistaff000000 000000 /* * Copyright (C) 2013 Michio Honda. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include /* PRI* macros */ #include /* strcmp */ #include /* open */ #include /* close */ #include /* ioctl */ #include #include /* ifreq */ #include #include #include /* basename */ /* debug support */ #define ND(format, ...) do {} while(0) #define D(format, ...) \ fprintf(stderr, "%s [%d] " format "\n", \ __FUNCTION__, __LINE__, ##__VA_ARGS__) static int bdg_ctl(const char *name, int nr_cmd, int nr_arg) { struct nmreq nmr; int error = 0; int fd = open("/dev/netmap", O_RDWR); if (fd == -1) { D("Unable to open /dev/netmap"); return -1; } bzero(&nmr, sizeof(nmr)); nmr.nr_version = NETMAP_API; if (name != NULL) /* might be NULL */ strncpy(nmr.nr_name, name, sizeof(nmr.nr_name)); nmr.nr_cmd = nr_cmd; switch (nr_cmd) { case NETMAP_BDG_ATTACH: case NETMAP_BDG_DETACH: if (nr_arg && nr_arg != NETMAP_BDG_HOST) nr_arg = 0; nmr.nr_arg1 = nr_arg; error = ioctl(fd, NIOCREGIF, &nmr); if (error == -1) D("Unable to %s %s to the bridge", nr_cmd == NETMAP_BDG_DETACH?"detach":"attach", name); else D("Success to %s %s to the bridge\n", nr_cmd == NETMAP_BDG_DETACH?"detach":"attach", name); break; case NETMAP_BDG_LIST: if (strlen(nmr.nr_name)) { /* name to bridge/port info */ error = ioctl(fd, NIOCGINFO, &nmr); if (error) D("Unable to obtain info for %s", name); else D("%s at bridge:%d port:%d", name, nmr.nr_arg1, nmr.nr_arg2); break; } /* scan all the bridges and ports */ nmr.nr_arg1 = nmr.nr_arg2 = 0; for (; !ioctl(fd, NIOCGINFO, &nmr); nmr.nr_arg2++) { D("bridge:%d port:%d %s", nmr.nr_arg1, nmr.nr_arg2, nmr.nr_name); nmr.nr_name[0] = '\0'; } break; default: /* GINFO */ nmr.nr_cmd = nmr.nr_arg1 = nmr.nr_arg2 = 0; error = ioctl(fd, NIOCGINFO, &nmr); if (error) D("Unable to get if info for %s", name); else D("%s: %d queues.", name, nmr.nr_rx_rings); break; } close(fd); return error; } int main(int argc, char *argv[]) { int ch, nr_cmd = 0, nr_arg = 0; const char *command = basename(argv[0]); char *name = NULL; if (argc != 3 && argc != 1 /* list all */ ) { usage: fprintf(stderr, "Usage:\n" "%s arguments\n" "\t-g interface interface name to get info\n" "\t-d interface interface name to be detached\n" "\t-a interface interface name to be attached\n" "\t-h interface interface name to be attached with the host stack\n" "\t-l list all or specified bridge's interfaces\n" "", command); return 0; } while ((ch = getopt(argc, argv, "d:a:h:g:l:")) != -1) { switch (ch) { default: fprintf(stderr, "bad option %c %s", ch, optarg); goto usage; case 'd': nr_cmd = NETMAP_BDG_DETACH; break; case 'a': nr_cmd = NETMAP_BDG_ATTACH; break; case 'h': nr_cmd = NETMAP_BDG_ATTACH; nr_arg = NETMAP_BDG_HOST; break; case 'g': nr_cmd = 0; break; case 'l': nr_cmd = NETMAP_BDG_LIST; break; } name = optarg; } if (argc == 1) nr_cmd = NETMAP_BDG_LIST; bdg_ctl(name, nr_cmd, nr_arg); return 0; } netmap-release/README000644 000765 000024 00000012311 12220335545 015113 0ustar00luigistaff000000 000000 Netmap - a framework for fast packet I/O VALE - a Virtual Local Ethernet using the netmap API =================================================================== NETMAP is a framework for very fast packet I/O from userspace. VALE implements an equally fast software switch using the netmap API. Both can deal with line rate on real or emulated 10 Gbit ports. See details at http://info.iet.unipi.it/~luigi/netmap/ http://info.iet.unipi.it/~luigi/vale/ In this directory you can find source code (BSD-Copyright) for FreeBSD and Linux. Note that recent FreeBSD distributions already include both NETMAP and VALE. Last update: 2013-06-06 Installation instructions ------------------------- A kernel module (netmap.ko or netmap_lin.ko) implements the whole VALE switch and the core NETMAP routines. Netmap-aware device drivers are needed to use netmap on ethernet ports. To date, we have support for Intel ixgbe (10G), e1000/e1000e/igb (1G), Realtek 8169 (1G) and Nvidia (1G). FreeBSD -------- (FreeBSD HEAD and stable/9 already include netmap in the source tree so you should not need the code in this distribution.) + add 'device netmap' to your kernel config file and rebuild a kernel. This will include the netmap module and netmap support in the device drivers. Alternatively, you can build standalone modules (netmap, ixgbe, em, lem, re, igb) Linux ------- + make sure you have kernel sources matching your installed kernel, so the build system can patch the sources and build netmap-enabled drivers. If kernel sources are in /a/b/c/linux-A.B.C/ , then you should do cd netmap/LINUX make KSRC=/a/b/c/linux-A.B.C/ # builds the kernel modules make KSRC=/a/b/c/linux-A.B.C/ apps # builds sample applications You can omit KSRC if your kernel sources are in a standard place. Applications ------------- The directory examples/ contains some programs that use the netmap API pkt-gen a packet generator/receiver working at line rate at 10Gbit/s vale-cfg utility to configure ports of a VALE switch bridge a utility that bridges two interfaces or one interface with the host stack pcap.c a simple libpcap-over-netmap library, and some test code, to help porting pcap applications to netmap. The library compiles as libnetmap.so, you can use it to replace your libpcap.so.* Testing ------- pkt-gen is a generic test program which can act as a sender or receiver. It has a large number of options, but the simplest form is: pkt-gen -i ix0 -f rx # receive and print stats pkt-gen -i ix0 -f tx -l 60 # send a stream of 60-byte packets (replace ix0 with the name of the interface or VALE port). This should be able to work at line rate (up to 14.88 Mpps on 10 Gbit/interfaces, even higher on VALE) but note the following CAVEATS ------- Before reporting slow send or receive speed on a physical interface, check ALL of the following: + check that your CPUs are running at the maximum clock rate and are not throttled down by the governor/powerd + make sure the interface is up before invoking pkt-gen + make sure that the netmap module and drivers are correctly loaded and can allocate all the memory they need (check into /var/log/messages or equivalent) These errors will generally cause a failure to register the netmap port + some switches/interfaces take a long time to (re)negotiate the link after starting pkt-gen; in case, use the -w N option to increase the initial delay to N seconds; This may cause inability to transmit, or lost packets for the first few seconds of transmission + make sure that the interface and switch you connect to has flow control (FC) disabled (either via sysctl or ethtool). If FC is enabled and the receiving end is unable to cope with the traffic, the driver will try to slow down transmission, sometimes to very low rates. + a lot of hardware is not able to sustain line rate. For instance, ixgbe has problems with receiving frames that are not multiple of 64 bytes (with/without CRC depending on the driver); also on transmissions, ixgbe tops at about 12.5 Mpps unless the driver prefetches tx descriptors. igb does line rate in all configurations. e1000/e1000e vary between 1.15 and 1.32 Mpps. re/r8169 is extremely slow in sending (max 4-500 Kpps) Credits ------- NETMAP and VALE are projects of the Universita` di Pisa, partially supported by Intel Research Berkeley, EU FP7 projects CHANGE and OPENLAB. Author: Luigi Rizzo Contributors: Giuseppe Lettieri Michio Honda Marta Carbone Gaetano Catalli Matteo Landi References ---------- There are a few academic papers describing netmap, VALE and applications. You can find the papers at http://info.iet.unipi.it/~luigi/research.html + Luigi Rizzo, netmap: a novel framework for fast packet I/O, Usenix ATC'12, Boston, June 2012 + Luigi Rizzo, Revisiting network I/O APIs: the netmap framework, Communications of the ACM 55 (3), 45-51, March 2012 + Luigi Rizzo, Marta Carbone, Gaetano Catalli, Transparent acceleration of software packet forwarding using netmap, IEEE Infocom 2012, Orlando, March 2012 + Luigi Rizzo, Giuseppe Lettieri, VALE: a switched ethernet for virtual machines, ACM Conext 2012, Nice, Dec. 2012 netmap-release/README.images000644 000765 000024 00000035264 12230344760 016373 0ustar00luigistaff000000 000000 EXPERIMENTING WITH NETMAP, VALE AND FAST QEMU --------------------------------------------- To ease experiments with Netmap, the VALE switch and our Qemu enhancements we have prepared a couple of bootable images (linux and FreeBSD). You can find them on the netmap page http://info.iet.unipi.it/~luigi/netmap/ where you can also look at more recent versions of this file. Below are step-by-step instructions on experiments you can run with these images. The two main versions are picobsd.hdd -> FreeBSD HEAD (netmap + VALE) tinycore.hdd -> Linux (qemu + netmap + VALE) Booting the image ----------------- For all experiments you need to copy the image on a USB stick and boot a PC with it. Alternatively, you can use the image with VirtualBox, Qemu or other emulators, as an example qemu-system-x86_64 -hda IMAGE_FILE -m 1G -machine accel=kvm ... (remove 'accel=kvm' if your host does not support kvm). The images do not install anything on the hard disk. Both systems have preloaded drivers for a number of network cards (including the intel 10 Gbit ones) with netmap extensions. The VALE switch is also available (it is part of the netmap module). ssh and scp clients are also included, together with a few other utilities. For the FreeBSD image: + the OS boots directly in console mode, you can switch between terminals with ALT-Fn. The password for the 'root' account is 'setup' + if you are connected to a network, you can use dhclient em0 # or other interface name to obtain an IP address and external connectivity. For the Linux image: + in addition to the netmap/VALE modules, the KVM kernel module is also preloaded. + the boot-loader gives you two main options (each with a variant to delay boot in case you have slow devices). + "Boot TinyCore" boots in an X11 environment as user 'tc'. You can create a few terminals using the icon at the bottom. You can use "sudo -s" to get root access. In case no suitable video card is available/detected, it falls back to command line mode. + "Boot Core (command line only)" boots in console mode with virtual terminals. You're automatically logged in as user 'tc'. To log in the other terminals use the same username (no password required). + The system should automatically recognize the existing ethernet devices, and load the appropriate netmap-capable device drivers when available. Interfaces are configured through DHCP when possible. General test recommendations ---------------------------- NOTE: The tests outlined in the following sections can generate very high packet rates, and some hardware misconfiguration problems may prevent you from achieving maximum speed. Common problems are: + slow link autonegotiation. Our programs typically wait 2-4 seconds for link negotiation to complete, but some NIC/switch combinations are much slower. In this case you should increase the delay (pkt-gen has the -w XX option for that) or possibly force the link speed and duplex mode on both sides. Check the link speed to make sure there are no nogotiation problems, and that you see the expected speed. ethtool IFNAME # on linux ifconfig IFNAME # on FreeBSD + ethernet flow control. If the receiving port is slow (often the case in presence of multicast/broadcast traffic, or also unicast if you are sending to non-netmap receivers), it will generate ethernet flow control frames that throttle down the sender. We recommend to disable BOTH RX and TX ethernet flow control on BOTH sender and receiver. On Linux this can be done with ethtool: ethtool -A IFNAME tx off rx off whereas on FreeBSD there are device-specific sysctl sysctl dev.ix.0.queue0.flow_control = 0 + CPU power saving. The CPU governor on linux, or equivalent in FreeBSD, tend to throttle down the clock rate reducing performance. Unlike other similar systems, netmap does not have busy-wait loops, so the CPU load is generally low and this can trigger the clock slowdown. Make sure that ALL CPUs run at maximum speed disabling the dynamic frequency-scaling mechanisms. cpufreq-set -gperformance # on linux sysctl dev.cpu.0.freq=3401 # on FreeBSD. + wrong MAC address netmap does not put the NIC in promiscuous mode, so unless the application does it, the NIC will only receive broadcast traffic or unicast directed to its own MAC address. STANDARD SOCKET TESTS --------------------- For most socket-based experiments you can use the "netperf" tool installed on the system (version 2.6.0). Be careful to use a matching version for the other netperf endpoint (e.g. netserver) when running tests between different machines. Interesting experiments are: netperf -H x.y.z.w -tTCP_STREAM # test TCP throughput netperf -H x.y.z.w -tTCP_RR # test latency netperf -H x.y.z.w -tUDP_STREAM -- -m8 # test UDP throughput with short packets where x.y.z.w is the host running "netserver". RAW SOCKET AND TAP TESTS ------------------------ For experiments with raw sockets and tap devices you can use the l2 utilities (l2open, l2send, l2recv) installed on the system. With these utilities you can send/receive custom network packets to/from raw sockets or tap file descriptors. The receiver can be run with one of the following commands l2open -r IFNAME l2recv # receive from a raw socket attached to IFNAME l2open -t IFNAME l2recv # receive from a file descriptor opened on the tap IFNAME The receiver process will wait indefinitely for the first packet and then keep receiving as long as packets keep coming. When the flow stops (after a 2 seconds timeout) the process terminates and prints the received packet rate and packet count. To run the sender in an easy way, you can use the script l2-send.sh in the home directory. This script defines several shell variables that can be manually changed to customize the test (see the comments in the script itself). As an example, you can test configurations with Virtual Machines attached to host tap devices bridged together. Tests using the Linux in-kernel pktgen -------------------------------------- To use the Linux in-kernel packet generator, you can use the script "linux-pktgen.sh" in the home directory. The pktgen creates a kernel thread for each hardware TX queue of a given NIC. By manually changing the script shell variable definitions you can change the test configuration (e.g. addresses in the generated packet). Please change the "NCPU" variable to match the number of CPUs on your machine. The script has an argument which specifies the number of NIC queues (i.e. kernel threads) to use minus one. For example: ./linux-pktgen.sh 2 # Uses 3 NIC queues When the script terminates, it prints the per-queue rates and the total rate achieved. NETMAP AND VALE EXPERIMENTS --------------------------- For most experiments with netmap you can use the "pkt-gen" command (don't get confused with the Linux in-kernel pktgen), which has a large number of options to send and receive traffic (also on TAP devices). pkt-gen normally generates UDP traffic for a specific IP address and using the brodadcast MAC address Netmap testing with network interfaces -------------------------------------- Remember that you need a netmap-capable driver in order to use netmap on a specific NIC. Currently supported drivers are e1000, e1000e, ixgbe, igb. For updated information please visit http://info.iet.unipi.it/~luigi/netmap/ Before running pkt-gen, make sure that the link is up. Run pkt-gen on an interface called "IFNAME": pkt-gen -i IFNAME -f tx # run a pkt-gen sender pkt-gen -i IFNAME -f rx # run a pkt-gen receiver pkt-gen without arguments will show other options, e.g. + -w sec modifies the wait time for link negotioation + -l len modifies the packet size + -d, -s set the IP destination/source addresses and ports + -D, -S set the MAC destination/source addresses and more. Testing the VALE switch ------------------------ To use the VALE switch instead of physical ports you only need to change the interface name in the pkt-gen command. As an example, on a single machine, you can run senders and receivers on multiple ports of a VALE switch as follows (run the commands into separate terminals to see the output) pkt-gen -ivale0:01 -ftx # run a sender on the port 01 of the switch vale0 pkt-gen -ivale0:02 -frx # receiver on the port 02 of same switch pkt-gen -ivale0:03 -ftx # another sender on the port 03 The VALE switches and ports are created (and destroyed) on the fly. Transparent connection of physical ports to the VALE switch ----------------------------------------------------------- It is also possible to use a network device as a port of a VALE switch. You can do this with the following command: vale-ctl -h vale0:eth0 # attach interface "eth0" to the "vale0" switch To detach an interface from a bridge: vale-ctl -d vale0:eth0 # detach interface "eth0" from the "vale0" switch These operations can be issued at any moment. Tests with our modified QEMU ---------------------------- The Linux image also contains our modified QEMU, with the VALE backend and the "e1000-paravirt" frontend (a paravirtualized e1000 emulation). After you have booted the image on a physical machine (so you can exploit KVM), you can boot the same image a second time (recursively) with QEMU. Therefore, you can run all the tests above also from within the virtual machine environment. To make VM testing easier, the home directory contains some some useful scripts to set up and launch VMs on the physical machine. + "prep-taps.sh" creates and sets up two permanent tap interfaces ("tap01" and "tap02") and a Linux in-kernel bridge. The tap interfaces are then bridged together on the same bridge. The bridge interface ("br0"), is given the address 10.0.0.200/24. This setup can be used to make two VMs communicate through the host bridge, or to test the speed of a linux switch using l2open + "unprep-taps.sh" undoes the above setup. + "launch-qemu.sh" can be used to run QEMU virtual machines. It takes four arguments: + The first argument can be "qemu" or "kvm", depending on whether we want to use the standard QEMU binary translation or the hardware virtualization acceleration. + The third argument can be "--tap", "--netuser" or "--vale", and tells QEMU what network backend to use: a tap device, the QEMU user networking (slirp), or a VALE switch port. + When the third argument is "--tap" or "--vale", the fourth argument specifies an index (e.g. "01", "02", etc..) which tells QEMU what tap device or VALE port to use as backend. You can manually modify the script to set the shell variables that select the type of emulated device (e.g. e1000, virtio-net-pci, ...) and related options (ioeventfd, virtio vhost, e1000 mitigation, ....). The default setup has an "e1000" device with interrupt mitigation disabled. You can try the paravirtualized e1000 device ("e1000-paravirt") or the "virtio-net" device to get better performance. However, bear in mind that these paravirtualized devices don't have netmap support (whereas the standard e1000 does have netmap support). Examples: # Run a kvm VM attached to the port 01 of a VALE switch ./launch-qemu.sh kvm --vale 01 # Run a kvm VM attached to the port 02 of the same VALE switch ./launch-qemu.sh kvm --vale 02 # Run a kvm VM attached to the tap called "tap01" ./launch-qemu.sh kvm --tap 01 # Run a kvm VM attached to the tap called "tap02" ./launch-qemu.sh kvm --tap 02 Guest-to-guest tests -------------------- If you run two VMs attached to the same switch (which can be a Linux bridge or a VALE switch), you can run guest-to-guest experiments. All the tests reported in the previous sections are possible (normal sockets, raw sockets, pkt-gen, ...), indipendently of the backend used. In the following examples we assume that: + Each VM has an ethernet interface called "eth0". + The interface of the first VM is given the IP 10.0.0.1/24. + The interface of the second VM is given the IP 10.0.0.2/24. + The Linux bridge interface "br0" on the host is given the IP 10.0.0.200/24. Examples: [1] ### Test UDP short packets over traditional sockets ### # On the guest 10.0.0.2 run netserver # on the guest 10.0.0.1 run netperf -H10.0.0.2 -tUDP_STREAM -- -m8 [2] ### Test UDP short packets with pkt-gen ### # On the guest 10.0.0.2 run pkt-gen -ieth0 -frx # On the guest 10.0.0.1 run pkt-gen -ieth0 -ftx [3] ### Test guest-to-guest latency ### # On the guest 10.0.0.2 run netserver # On the guest 10.0.0.1 run netperf -H10.0.0.2 -tTCP_RR Note that you can use pkt-gen into a VM only if the emulated ethernet device is supported by netmap. The default emulated device is "e1000", which has netmap support. If you try to run pkt-gen on an unsupported device, pkt-gen will not work, reporting that it is unable to register the interface. Guest-to-host tests (follows from the previous section) ------------------------------------------------------- If you run only a VM on your host machine, you can measure the network performance between the VM and the host machine. In this case the experiment setup depends on the backend you are using. With the tap backend, you can use the bridge interface "br0" as a communication endpoint. You can run normal/raw sockets experiments, but you cannot use pkt-gen on the "br0" interface, since the Linux bridge interface is not supported by netmap. Examples with the tap backend: [1] ### Test TCP throughput over traditional sockets ### # On the host run netserver # on the guest 10.0.0.1 run netperf -H10.0.0.200 -tTCP_STREAM [2] ### Test UDP short packets with pkt-gen and l2 ### # On the host run l2open -r br0 l2recv # On the guest 10.0.0.1 run (xx:yy:zz:ww:uu:vv is the # "br0" hardware address) pkt-gen -ieth0 -ftx -d10.0.0.200:7777 -Dxx:yy:zz:ww:uu:vv With the VALE backend you can perform only UDP tests, since we don't have a netmap application which implements a TCP endpoint: pkt-gen generates UDP packets. As a communication endpoint on the host, you can use a virtual VALE port opened on the fly by a pkt-gen instance. Examples with the VALE backend: [1] ### Test UDP short packets ### # On the host run pkt-gen -ivale0:99 -frx # On the guest 10.0.0.1 run pkt-gen -ieth0 -ftx [2] ### Test UDP big packets (receiver on the guest) ### # On the guest 10.0.0.1 run pkt-gen -ieth0 -frx # On the host run pkt-gen -ivale0:99 -ftx -l1460 netmap-release/LINUX/000755 000765 000024 00000000000 12230553506 015134 5ustar00luigistaff000000 000000 netmap-release/LINUX/bsd_glue.h000644 000765 000024 00000024242 12230530510 017063 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012-2013 Luigi Rizzo - Universita` di Pisa * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * glue code to build the netmap bsd code under linux. * Some of these tweaks are generic, some are specific for * character device drivers and network code/device drivers. */ #ifndef _BSD_GLUE_H #define _BSD_GLUE_H /* a set of headers used in netmap */ #include #include #include #include #include #include #include #include #include #include #include #include //#include // ilog2 #include // eth_type_trans #include #include #include // virt_to_phys #define printf(fmt, arg...) printk(KERN_ERR fmt, ##arg) #define KASSERT(a, b) BUG_ON(!(a)) /* Type redefinitions. XXX check them */ typedef void * bus_dma_tag_t; typedef void * bus_dmamap_t; typedef int bus_size_t; typedef int bus_dma_segment_t; typedef void * bus_addr_t; #define vm_paddr_t phys_addr_t /* XXX the 'off_t' on Linux corresponds to a 'long' */ #define vm_offset_t uint32_t #define vm_ooffset_t unsigned long struct thread; /* endianness macros/functions */ #define le16toh le16_to_cpu #define le32toh le32_to_cpu #define le64toh le64_to_cpu #define be64toh be64_to_cpu #define htole32 cpu_to_le32 #define htole64 cpu_to_le64 #include #define time_second (jiffies_to_msecs(jiffies) / 1000U ) #define bzero(a, len) memset(a, 0, len) #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) #define netdev_tx_t int #define netdev_ops hard_start_xmit struct net_device_ops { int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev); }; #endif /* < 2.6.28 */ // XXX maybe implement it as a proper function somewhere // it is important to set s->len before the copy. #define m_devget(_buf, _len, _ofs, _dev, _fn) ( { \ struct sk_buff *s = netdev_alloc_skb(_dev, _len); \ if (s) { \ s->len += _len; \ skb_copy_to_linear_data_offset(s, _ofs, _buf, _len); \ s->protocol = eth_type_trans(s, _dev); \ } \ s; } ) #define mbuf sk_buff #define m_nextpkt next // chain of mbufs #define m_freem(m) dev_kfree_skb_any(m) // free a sk_buff /* * m_copydata() copies from mbuf to buffer following the mbuf chain. * XXX check which linux equivalent we should use to follow fragmented * skbufs. */ //#define m_copydata(m, o, l, b) skb_copy_bits(m, o, b, l) #define m_copydata(m, o, l, b) skb_copy_from_linear_data_offset(m, o, b, l) #define copyin(_from, _to, _len) copy_from_user(_to, _from, _len) /* * struct ifnet is remapped into struct net_device on linux. * ifnet has an if_softc field pointing to the device-specific struct * (adapter). * On linux the ifnet/net_device is at the beginning of the device-specific * structure, so a pointer to the first field of the ifnet works. * We don't use this in netmap, though. * * if_xname name device name * if_capenable priv_flags * we would use "features" but it is all taken. * XXX check for conflict in flags use. * * In netmap we use if_pspare[0] to point to the netmap_adapter, * in linux we have no spares so we overload ax25_ptr, and the detection * for netmap-capable is some magic in the area pointed by that. */ #define WNA(_ifp) (_ifp)->ax25_ptr #define ifnet net_device /* remap */ #define if_xname name /* field ifnet-> net_device */ #define if_capenable priv_flags /* IFCAP_NETMAP */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) typedef unsigned long phys_addr_t; extern struct net init_net; #endif #define CURVNET_SET(x) #define CURVNET_RESTORE(x) #define refcount_acquire(_a) atomic_add(1, (atomic_t *)_a) #define refcount_release(_a) atomic_dec_and_test((atomic_t *)_a) /* * We use spin_lock_irqsave() because we use the lock in the * (hard) interrupt context. */ typedef struct { spinlock_t sl; ulong flags; } safe_spinlock_t; static inline void mtx_lock(safe_spinlock_t *m) { spin_lock_irqsave(&(m->sl), m->flags); } static inline void mtx_unlock(safe_spinlock_t *m) { ulong flags = ACCESS_ONCE(m->flags); spin_unlock_irqrestore(&(m->sl), flags); } #define mtx_init(a, b, c, d) spin_lock_init(&((a)->sl)) #define mtx_destroy(a) // XXX spin_lock_destroy(a) /* * XXX these must be changed, as we cannot sleep within the RCU. * Must change to proper rwlock, and then can move the definitions * into the main netmap.c file. */ #define BDG_RWLOCK_T struct rw_semaphore #define BDG_RWINIT(b) init_rwsem(&(b)->bdg_lock) #define BDG_WLOCK(b) down_write(&(b)->bdg_lock) #define BDG_WUNLOCK(b) up_write(&(b)->bdg_lock) #define BDG_RLOCK(b) down_read(&(b)->bdg_lock) #define BDG_RUNLOCK(b) up_read(&(b)->bdg_lock) #define BDG_RTRYLOCK(b) down_read_trylock(&(b)->bdg_lock) #define BDG_SET_VAR(lval, p) ((lval) = (p)) #define BDG_GET_VAR(lval) (lval) #define BDG_FREE(p) kfree(p) /* use volatile to fix a probable compiler error on 2.6.25 */ #define malloc(_size, type, flags) \ ({ volatile int _v = _size; kmalloc(_v, GFP_ATOMIC | __GFP_ZERO); }) #define free(a, t) kfree(a) // XXX do we need GPF_ZERO ? // XXX do we need GFP_DMA for slots ? // http://www.mjmwired.net/kernel/Documentation/DMA-API.txt #define contigmalloc(sz, ty, flags, a, b, pgsz, c) \ (char *) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, \ ilog2(roundup_pow_of_two((sz)/PAGE_SIZE))) #define contigfree(va, sz, ty) free_pages((unsigned long)va, \ ilog2(roundup_pow_of_two(sz)/PAGE_SIZE)) #define vtophys virt_to_phys /*--- selrecord and friends ---*/ /* wake_up() or wake_up_interruptible() ? */ #define selwakeuppri(sw, pri) wake_up(sw) #define selrecord(x, y) poll_wait((struct file *)x, y, pwait) // #define knlist_destroy(x) // XXX todo #define tsleep(a, b, c, t) msleep(10) // #define wakeup(sw) // XXX double check #define microtime do_gettimeofday // debugging /* * The following trick is to map a struct cdev into a struct miscdevice */ #define cdev miscdevice /* * XXX to complete - the dmamap interface */ #define BUS_DMA_NOWAIT 0 #define bus_dmamap_load(_1, _2, _3, _4, _5, _6, _7) #define bus_dmamap_unload(_1, _2) typedef int (d_mmap_t)(struct file *f, struct vm_area_struct *vma); typedef unsigned int (d_poll_t)(struct file * file, struct poll_table_struct *pwait); /* * make_dev will set an error and return the first argument. * This relies on the availability of the 'error' local variable. * For old linux systems that do not have devfs, generate a * message in syslog so the sysadmin knows which command to run * in order to create the /dev/netmap entry */ #define make_dev(_cdev, _zero, _uid, _gid, _perm, _name) \ ({error = misc_register(_cdev); \ D("run mknod /dev/%s c %d %d # error %d", \ (_cdev)->name, MISC_MAJOR, (_cdev)->minor, error); \ _cdev; } ) #define destroy_dev(_cdev) misc_deregister(_cdev) /*--- sysctl API ----*/ /* * linux: sysctl are mapped into /sys/module/ipfw_mod parameters * windows: they are emulated via get/setsockopt */ #define CTLFLAG_RD 1 #define CTLFLAG_RW 2 struct sysctl_oid; struct sysctl_req; #define SYSCTL_DECL(_1) #define SYSCTL_OID(_1, _2, _3, _4, _5, _6, _7, _8) #define SYSCTL_NODE(_1, _2, _3, _4, _5, _6) #define _SYSCTL_BASE(_name, _var, _ty, _perm) \ module_param_named(_name, *(_var), _ty, \ ( (_perm) == CTLFLAG_RD) ? 0444: 0644 ) /* XXX should implement this */ extern struct kernel_param_ops generic_sysctl_ops; #define SYSCTL_PROC(_base, _oid, _name, _mode, _var, _val, _fn, _ty, _desc) \ module_param_cb(_name, &generic_sysctl_ops, _fn, \ ( (_mode) & CTLFLAG_WR) ? 0644: 0444 ) /* for a string, _var is a preallocated buffer of size _varlen */ #define SYSCTL_STRING(_base, _oid, _name, _mode, _var, _varlen, _desc) \ module_param_string(_name, _var, _varlen, \ ((_mode) == CTLFLAG_RD) ? 0444: 0644 ) #define SYSCTL_INT(_base, _oid, _name, _mode, _var, _val, _desc) \ _SYSCTL_BASE(_name, _var, int, _mode) #define SYSCTL_LONG(_base, _oid, _name, _mode, _var, _val, _desc) \ _SYSCTL_BASE(_name, _var, long, _mode) #define SYSCTL_ULONG(_base, _oid, _name, _mode, _var, _val, _desc) \ _SYSCTL_BASE(_name, _var, ulong, _mode) #define SYSCTL_UINT(_base, _oid, _name, _mode, _var, _val, _desc) \ _SYSCTL_BASE(_name, _var, uint, _mode) // #define TUNABLE_INT(_name, _ptr) #define SYSCTL_VNET_PROC SYSCTL_PROC #define SYSCTL_VNET_INT SYSCTL_INT #define SYSCTL_HANDLER_ARGS \ struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req int sysctl_handle_int(SYSCTL_HANDLER_ARGS); int sysctl_handle_long(SYSCTL_HANDLER_ARGS); #define MALLOC_DECLARE(a) #define MALLOC_DEFINE(a, b, c) #define NM_ATOMIC_TEST_AND_SET(p) test_and_set_bit(0, (p)) #define NM_ATOMIC_CLEAR(p) clear_bit(0, (p)) #endif /* _BSD_GLUE_H */ netmap-release/LINUX/final-patches/000755 000765 000024 00000000000 12230530510 017640 5ustar00luigistaff000000 000000 netmap-release/LINUX/forcedeth_netmap.h000644 000765 000024 00000026255 12220335545 020626 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id: forcedeth_netmap.h 10670 2012-02-27 21:15:38Z luigi $ * * netmap support for 'forcedeth' (nfe) driver * For details on netmap support see ixgbe_netmap.h The driver supports ORIGinal and EXtended descriptors through unions. We remove the .orig and .ex suffix for brevity. Pointers in the ring (N slots) are first_rx = 0, last_rx = N-1, get_rx = put_rx = 0 at init Following init there is a call to nv_alloc_rx_optimized() which does less_rx = get_rx - 1 for (put_rx = 0; put_rx != less_rx; put_rx++) put_rx.flags = LEN | NV_RX2_AVAIL; so it leaves one free slot and put_rx pointing at the end. Basically, get_rx is where new packets arrive, put_rx is where new buffers are added. The rx_intr aka nv_rx_process_optimized() scans while (get_rx != put_rx && !(get_rx.flags & NV_RX2_AVAIL)) { ... get_rx++ } followed by a nv_alloc_rx_optimized(). This makes sure that there is always a free slot. */ #include #include #include #define SOFTC_T fe_priv /* * support for netmap register/unregisted. We are already under core lock. * only called on the first register or the last unregister. * The "forcedeth" driver is poorly written, the reinit routine * is replicated multiple times and one way to achieve it is to * nv_change_mtu twice above ETH_DATA_LEN. */ static int forcedeth_netmap_reg(struct ifnet *dev, int onoff) { struct netmap_adapter *na = NA(dev); struct SOFTC_T *np = netdev_priv(dev); int error = 0; u8 __iomem *base = get_hwbase(dev); if (na == NULL) return EINVAL; // first half of nv_change_mtu() - down nv_disable_irq(dev); nv_napi_disable(dev); netif_tx_lock_bh(dev); netif_addr_lock(dev); spin_lock(&np->lock); /* stop engines */ nv_stop_rxtx(dev); nv_txrx_reset(dev); /* drain rx queue */ nv_drain_rxtx(dev); if (onoff) { dev->if_capenable |= IFCAP_NETMAP; na->if_transmit = (void *)dev->netdev_ops; dev->netdev_ops = &na->nm_ndo; } else { /* restore if_transmit */ dev->netdev_ops = (void *)na->if_transmit; dev->if_capenable &= ~IFCAP_NETMAP; } // second half of nv_change_mtu() -- up if (nv_init_ring(dev)) { if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); } /* reinit nic view of the rx queue */ writel(np->rx_buf_sz, base + NvRegOffloadConfig); setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(base); /* restart rx engine */ nv_start_rxtx(dev); spin_unlock(&np->lock); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); nv_napi_enable(dev); nv_enable_irq(dev); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int forcedeth_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *np = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n, lim = kring->nkr_num_slots - 1; struct ring_desc_ex *txr = np->tx_ring.ex; uint32_t lastpkt = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); /* Sync the TX descriptor list */ rmb(); /* XXX (move after tx) record completed transmissions */ // l is the current pointer, k is the last pointer l = np->get_tx.ex - txr; k = np->put_tx.ex - txr; for (n = 0; l != k; n++) { uint32_t cmdstat = le32toh(txr[l].flaglen); if (cmdstat & NV_TX2_VALID) break; if (++l == np->tx_ring_size) l = 0; } if (n > 0) { np->get_tx.ex = txr + l; kring->nr_hwavail += n; } /* now deal with new transmissions */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = np->put_tx.ex - txr; // NIC pointer for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; struct ring_desc_ex *put_tx = txr + l; int len = slot->len; int cmd = (len - 1) | NV_TX2_VALID | lastpkt; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, unload and reload map */ // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr); slot->flags &= ~NS_BUF_CHANGED; } slot->flags &= ~NS_REPORT; put_tx->bufhigh = htole32(dma_high(paddr)); put_tx->buflow = htole32(dma_low(paddr)); put_tx->flaglen = htole32(cmd); put_tx->txvlan = 0; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } np->put_tx.ex = txr + l; kring->nr_hwcur = k; /* decrease avail by number of sent packets */ kring->nr_hwavail -= n; wmb(); /* start ? */ writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(ifp) + NvRegTxRxControl); } /* update avail to what the hardware knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int forcedeth_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *np = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n, lim = kring->nkr_num_slots - 1; struct ring_desc_ex *rxr = np->rx_ring.ex; u_int resvd, refill; // refill position uint16_t slot_flags = kring->nkr_slot_flags; k = ring->cur; resvd = ring->cur; if (k > lim) return netmap_ring_reinit(kring); rmb(); l = np->get_rx.ex - rxr; /* next pkt to check */ /* put_rx is the refill position, one before nr_hwcur. * This slot is not available */ refill = np->put_rx.ex - rxr; /* refill position */ j = netmap_idx_n2k(kring, l); for (n = kring->nr_hwavail; l != refill ; n++) { uint32_t statlen = le32toh(rxr[l].flaglen); if (statlen & NV_RX2_AVAIL) /* still owned by the NIC */ break; kring->ring->slot[j].len = statlen & LEN_MASK_V2; // XXX crc? kring->ring->slot[j].flags = slot_flags; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n != kring->nr_hwavail) { /* new received buffers */ np->get_rx.ex = rxr + l; ifp->stats.rx_packets += n - kring->nr_hwavail; kring->nr_hwavail = n; } /* skip past packets that userspace has already processed, */ j = kring->nr_hwcur; // refill is one before j if (resvd > 0) { if (resvd + ring->avail >= lim) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has returned some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = ring->slot + j; struct ring_desc_ex *desc = rxr + l; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr); slot->flags &= ~NS_BUF_CHANGED; } desc->flaglen = htole32(NETMAP_BUF_SIZE); desc->bufhigh = htole32(dma_high(paddr)); desc->buflow = htole32(dma_low(paddr)); // enable the previous buffer rxr[refill].flaglen |= htole32(NV_RX2_AVAIL); refill = (refill == lim) ? 0 : refill + 1; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; np->put_rx.ex = rxr + refill; /* Flush the RX DMA ring */ wmb(); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } /* * Additional routines to init the tx and rx rings. * In other drivers we do that inline in the main code. */ static int forcedeth_netmap_tx_init(struct SOFTC_T *np) { struct ring_desc_ex *desc; int i, n; struct netmap_adapter *na = NA(np->dev); struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); /* slot is NULL if we are not in netmap mode */ if (!slot) return 0; /* in netmap mode, overwrite addresses and maps */ //txd = np->rl_ldata.rl_tx_desc; desc = np->tx_ring.ex; n = np->tx_ring_size; /* l points in the netmap ring, i points in the NIC ring */ for (i = 0; i < n; i++) { int l = netmap_idx_n2k(&na->tx_rings[0], i); uint64_t paddr; PNMB(slot + l, &paddr); desc[i].flaglen = 0; desc[i].bufhigh = htole32(dma_high(paddr)); desc[i].buflow = htole32(dma_low(paddr)); } return 1; } static int forcedeth_netmap_rx_init(struct SOFTC_T *np) { struct netmap_adapter *na = NA(np->dev); struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); struct ring_desc_ex *desc = np->rx_ring.ex; uint32_t cmdstat; int i, lim; if (!slot) return 0; /* * userspace knows that hwavail packets were ready before the * reset, so we need to tell the NIC that last hwavail * descriptors of the ring are still owned by the driver. */ lim = np->rx_ring_size - 1 - na->rx_rings[0].nr_hwavail; for (i = 0; i < np->rx_ring_size; i++) { uint64_t paddr; int l = netmap_idx_n2k(&na->rx_rings[0], i); PNMB(slot + l, &paddr); netmap_reload_map(np->rl_ldata.rl_rx_mtag, np->rl_ldata.rl_rx_desc[i].rx_dmamap, addr); desc[i].bufhigh = htole32(dma_high(paddr)); desc[i].buflow = htole32(dma_low(paddr)); cmdstat = NETMAP_BUF_SIZE; if (i < lim) cmdstat |= NV_RX2_AVAIL; desc[i].flaglen = htole32(cmdstat); } // XXX ring end anywhere ? np->get_rx.ex = desc; np->put_rx.ex = desc + lim; return 1; } static void forcedeth_netmap_attach(struct SOFTC_T *np) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = np->dev; na.num_tx_desc = np->tx_ring_size; na.num_rx_desc = np->tx_ring_size; na.nm_txsync = forcedeth_netmap_txsync; na.nm_rxsync = forcedeth_netmap_rxsync; na.nm_register = forcedeth_netmap_reg; netmap_attach(&na, 1); } netmap-release/LINUX/if_e1000_netmap.h000644 000765 000024 00000023563 12227500737 020071 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Gaetano Catalli, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id: if_e1000_netmap.h 10878 2012-04-12 22:28:48Z luigi $ * * netmap support for e1000 (lem) * For details on netmap support please see ixgbe_netmap.h */ #include #include #include #define SOFTC_T e1000_adapter /* * Register/unregister, similar to e1000_reinit_safe() */ static int e1000_netmap_reg(struct ifnet *ifp, int onoff) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; rtnl_lock(); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); if (netif_running(adapter->netdev)) e1000_down(adapter); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = (void *)ifp->netdev_ops; ifp->netdev_ops = &na->nm_ndo; } else { ifp->if_capenable &= ~IFCAP_NETMAP; ifp->netdev_ops = (void *)na->if_transmit; } if (netif_running(adapter->netdev)) e1000_up(adapter); else e1000_reset(adapter); clear_bit(__E1000_RESETTING, &adapter->flags); rtnl_unlock(); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int e1000_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct e1000_tx_ring* txr = &adapter->tx_ring[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ int report_frequency = kring->nkr_num_slots >> 1; if (!netif_carrier_ok(ifp)) return 0; /* take a copy of ring->cur now, and never read it again */ k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ struct e1000_tx_desc *curr = E1000_TX_DESC(*txr, l); int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? E1000_TXD_CMD_RS : 0; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_addr, paddr); curr->buffer_addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } curr->upper.data = 0; curr->lower.data = htole32(adapter->txd_cmd | len | (E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS | flags) ); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; wmb(); /* synchronize writes to the NIC ring */ txr->next_to_use = l; writel(l, adapter->hw.hw_addr + txr->tdt); mmiowb(); // XXX where do we need this ? } if (n == 0 || kring->nr_hwavail < 1) { int delta; /* record completed transmissions using TDH */ l = readl(adapter->hw.hw_addr + txr->tdh); if (l >= kring->nkr_num_slots) { /* XXX can happen */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; if (delta) { /* some tx completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; kring->nr_hwavail += delta; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int e1000_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); struct e1000_rx_ring *rxr = &adapter->rx_ring[ring_nr]; struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = rxr->next_to_clean; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { struct e1000_rx_desc *curr = E1000_RX_DESC(*rxr, l); uint32_t staterr = le32toh(curr->status); if ((staterr & E1000_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->length) - 4; ring->slot[j].flags = slot_flags; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ rxr->next_to_clean = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; struct e1000_rx_desc *curr = E1000_RX_DESC(*rxr, l); uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(...) curr->buffer_addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } curr->status = 0; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; wmb(); rxr->next_to_use = l; // XXX not really used /* * IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; writel(l, adapter->hw.hw_addr + rxr->rdt); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } /* diagnostic routine to catch errors */ static void e1000_no_rx_alloc(struct SOFTC_T *adapter, struct e1000_rx_ring *rxr, int cleaned_count) { D("e1000->alloc_rx_buf should not be called"); } /* * Make the tx and rx rings point to the netmap buffers. */ static int e1000_netmap_init_buffers(struct SOFTC_T *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->netdev; struct netmap_adapter* na = NA(ifp); struct netmap_slot* slot; struct e1000_tx_ring* txr = &adapter->tx_ring[0]; unsigned int i, r, si; uint64_t paddr; if (!na || !(na->ifp->if_capenable & IFCAP_NETMAP)) return 0; adapter->alloc_rx_buf = e1000_no_rx_alloc; for (r = 0; r < na->num_rx_rings; r++) { struct e1000_rx_ring *rxr; slot = netmap_reset(na, NR_RX, r, 0); if (!slot) { D("strange, null netmap ring %d", r); return 0; } rxr = &adapter->rx_ring[r]; for (i = 0; i < rxr->count; i++) { // XXX the skb check and cleanup can go away struct e1000_buffer *bi = &rxr->buffer_info[i]; si = netmap_idx_n2k(&na->rx_rings[r], i); PNMB(slot + si, &paddr); if (bi->skb) D("rx buf %d was set", i); bi->skb = NULL; // netmap_load_map(...) E1000_RX_DESC(*rxr, i)->buffer_addr = htole64(paddr); } rxr->next_to_use = 0; /* preserve buffers already made available to clients */ i = rxr->count - 1 - na->rx_rings[0].nr_hwavail; if (i < 0) i += rxr->count; D("i now is %d", i); wmb(); /* Force memory writes to complete */ writel(i, hw->hw_addr + rxr->rdt); } /* now initialize the tx ring(s) */ slot = netmap_reset(na, NR_TX, 0, 0); for (i = 0; i < na->num_tx_desc; i++) { si = netmap_idx_n2k(&na->tx_rings[0], i); PNMB(slot + si, &paddr); // netmap_load_map(...) E1000_TX_DESC(*txr, i)->buffer_addr = htole64(paddr); } return 1; } static void e1000_netmap_attach(struct SOFTC_T *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->netdev; na.num_tx_desc = adapter->tx_ring[0].count; na.num_rx_desc = adapter->rx_ring[0].count; na.nm_register = e1000_netmap_reg; na.nm_txsync = e1000_netmap_txsync; na.nm_rxsync = e1000_netmap_rxsync; netmap_attach(&na, 1); } /* end of file */ netmap-release/LINUX/if_e1000e_netmap.h000644 000765 000024 00000025656 12227500737 020243 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Gaetano Catalli, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * BSD Copyright * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id: if_e1000e_netmap.h 10670 2012-02-27 21:15:38Z luigi $ * * netmap support for e1000e (em) * For details on netmap support please see ixgbe_netmap.h * * The driver supports 1 TX and 1 RX ring. Single lock. * tx buffer address only written on change. * Apparently the driver uses extended descriptors on rx from 3.2.32 * Rx Crc stripping ? */ #include #include #include #define SOFTC_T e1000_adapter #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) #warning this driver uses extended descriptors #define NM_E1K_RX_DESC_T union e1000_rx_desc_extended #define NM_E1R_RX_STATUS wb.upper.status_error #define NM_E1R_RX_LENGTH wb.upper.length #define NM_E1R_RX_BUFADDR read.buffer_addr #else #warning this driver uses regular descriptors #define E1000_RX_DESC_EXT E1000_RX_DESC // XXX workaround #define NM_E1K_RX_DESC_T struct e1000_rx_desc #define NM_E1R_RX_STATUS status #define NM_E1R_RX_BUFADDR buffer_addr #define NM_E1R_RX_LENGTH length #endif /* up to 3.2.x */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) #define NM_WR_TX_TAIL(_x) writel(_x, txr->tail) // XXX tx_ring #define NM_WR_RX_TAIL(_x) writel(_x, rxr->tail) // XXX rx_ring #define NM_RD_TX_HEAD() readl(txr->head) #else #define NM_WR_TX_TAIL(_x) writel(_x, adapter->hw.hw_addr + txr->tail) #define NM_WR_RX_TAIL(_x) writel(_x, adapter->hw.hw_addr + rxr->tail) #define NM_RD_TX_HEAD() readl(adapter->hw.hw_addr + txr->head) #endif /* < 3.4.0 */ /* * Register/unregister, similar to e1000_reinit_safe() */ static int e1000_netmap_reg(struct ifnet *ifp, int onoff) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; rtnl_lock(); while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); if (netif_running(adapter->netdev)) e1000e_down(adapter); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = (void *)ifp->netdev_ops; ifp->netdev_ops = &na->nm_ndo; } else { ifp->if_capenable &= ~IFCAP_NETMAP; ifp->netdev_ops = (void *)na->if_transmit; } if (netif_running(adapter->netdev)) e1000e_up(adapter); else e1000e_reset(adapter); clear_bit(__E1000_RESETTING, &adapter->state); rtnl_unlock(); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int e1000_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct e1000_ring* txr = &adapter->tx_ring[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ int report_frequency = kring->nkr_num_slots >> 1; if (!netif_carrier_ok(ifp)) return 0; /* take a copy of ring->cur now, and never read it again */ k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ struct e1000_tx_desc *curr = E1000_TX_DESC(*txr, l); int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? E1000_TXD_CMD_RS : 0; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr) curr->buffer_addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } curr->upper.data = 0; curr->lower.data = htole32(adapter->txd_cmd | len | (E1000_TXD_CMD_EOP | flags) ); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; wmb(); /* synchronize writes to the NIC ring */ txr->next_to_use = l; NM_WR_TX_TAIL(l); mmiowb(); // XXX where do we need this ? } if (n == 0 || kring->nr_hwavail < 1) { int delta; /* record completed transmissions using TDH */ l = NM_RD_TX_HEAD(); // XXX could scan descriptors ? if (l >= kring->nkr_num_slots) { /* XXX can it happen ? */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; if (delta) { /* some tx completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; kring->nr_hwavail += delta; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int e1000_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); struct e1000_ring *rxr = &adapter->rx_ring[ring_nr]; struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; int strip_crc = (adapter->flags2 & FLAG2_CRC_STRIPPING) ? 0 : 4; u_int k = ring->cur, resvd = ring->reserved; if (!netif_carrier_ok(ifp)) return 0; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = rxr->next_to_clean; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { NM_E1K_RX_DESC_T *curr = E1000_RX_DESC_EXT(*rxr, l); uint32_t staterr = le32toh(curr->NM_E1R_RX_STATUS); if ((staterr & E1000_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->NM_E1R_RX_LENGTH) - strip_crc; ring->slot[j].flags = slot_flags; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ rxr->next_to_clean = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; NM_E1K_RX_DESC_T *curr = E1000_RX_DESC_EXT(*rxr, l); uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } curr->NM_E1R_RX_BUFADDR = htole64(paddr); /* reload ext.desc. addr. */ if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr) slot->flags &= ~NS_BUF_CHANGED; } curr->NM_E1R_RX_STATUS = 0; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; wmb(); rxr->next_to_use = l; // XXX not really used /* * IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; NM_WR_RX_TAIL(l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } /* diagnostic routine to catch errors */ static void e1000e_no_rx_alloc(struct SOFTC_T *a, int n) { D("e1000->alloc_rx_buf should not be called"); } /* * Make the tx and rx rings point to the netmap buffers. */ static int e1000e_netmap_init_buffers(struct SOFTC_T *adapter) { struct ifnet *ifp = adapter->netdev; struct netmap_adapter* na = NA(ifp); struct netmap_slot* slot; struct e1000_ring *rxr = adapter->rx_ring; struct e1000_ring *txr = adapter->tx_ring; int i, si; uint64_t paddr; slot = netmap_reset(na, NR_RX, 0, 0); if (!slot) return 0; // not in netmap mode adapter->alloc_rx_buf = (void*)e1000e_no_rx_alloc; for (i = 0; i < rxr->count; i++) { // XXX the skb check and cleanup can go away struct e1000_buffer *bi = &rxr->buffer_info[i]; si = netmap_idx_n2k(&na->rx_rings[0], i); PNMB(slot + si, &paddr); if (bi->skb) D("rx buf %d was set", i); bi->skb = NULL; // XXX leak if set // netmap_load_map(...) E1000_RX_DESC_EXT(*rxr, i)->NM_E1R_RX_BUFADDR = htole64(paddr); } rxr->next_to_use = 0; /* preserve buffers already made available to clients */ i = rxr->count - 1 - na->rx_rings[0].nr_hwavail; wmb(); /* Force memory writes to complete */ NM_WR_RX_TAIL(i); /* now initialize the tx ring */ slot = netmap_reset(na, NR_TX, 0, 0); for (i = 0; i < na->num_tx_desc; i++) { si = netmap_idx_n2k(&na->tx_rings[0], i); PNMB(slot + si, &paddr); // netmap_load_map(...) E1000_TX_DESC(*txr, i)->buffer_addr = htole64(paddr); } return 1; } static void e1000_netmap_attach(struct SOFTC_T *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->netdev; na.num_tx_desc = adapter->tx_ring->count; na.num_rx_desc = adapter->rx_ring->count; na.nm_register = e1000_netmap_reg; na.nm_txsync = e1000_netmap_txsync; na.nm_rxsync = e1000_netmap_rxsync; netmap_attach(&na, 1); } /* end of file */ netmap-release/LINUX/if_igb_netmap.h000644 000765 000024 00000027065 12227500737 020106 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id: if_igb_netmap.h 10878 2012-04-12 22:28:48Z luigi $ * * netmap support for "igb" (untested) * For details on netmap support please see ixgbe_netmap.h * This supports multiple tx/rx rings, multiple locks ? * CRCstrip, address rewrite ? */ #include #include #include #define SOFTC_T igb_adapter /* * Adapt to different versions. E1000_TX_DESC_ADV etc. have * dropped the _ADV suffix in newer versions. Also the first * argument is now a pointer not the object. */ #ifndef E1000_TX_DESC_ADV #define E1000_TX_DESC_ADV(_r, _i) IGB_TX_DESC(&(_r), _i) #define E1000_RX_DESC_ADV(_r, _i) IGB_RX_DESC(&(_r), _i) #define READ_TDH(_txr) ({struct e1000_hw *hw = &adapter->hw;rd32(E1000_TDH((_txr)->reg_idx));} ) #else /* up to 3.2, approximately */ #define igb_tx_buffer igb_buffer #define tx_buffer_info buffer_info #define igb_rx_buffer igb_buffer #define rx_buffer_info buffer_info #define READ_TDH(_txr) readl((_txr)->head) #endif /* * Register/unregister, similar to e1000_reinit_safe() */ static int igb_netmap_reg(struct ifnet *ifp, int onoff) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; rtnl_lock(); while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); if (netif_running(adapter->netdev)) igb_down(adapter); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = (void *)ifp->netdev_ops; ifp->netdev_ops = &na->nm_ndo; } else { ifp->if_capenable &= ~IFCAP_NETMAP; ifp->netdev_ops = (void *)na->if_transmit; } if (netif_running(adapter->netdev)) igb_up(adapter); else igb_reset(adapter); clear_bit(__IGB_RESETTING, &adapter->state); rtnl_unlock(); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct igb_ring* txr = adapter->tx_ring[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; /* generate an interrupt approximately every half ring */ int report_frequency = kring->nkr_num_slots >> 1; if (!netif_carrier_ok(ifp)) return 0; /* take a copy of ring->cur now, and never read it again */ k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ uint32_t olinfo_status=0; l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ union e1000_adv_tx_desc *curr = E1000_TX_DESC_ADV(*txr, l); int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? E1000_TXD_CMD_RS : 0; uint64_t paddr; void *addr = PNMB(slot, &paddr); u_int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->read.buffer_addr = htole64(paddr); // XXX check olinfo and cmd_type_len curr->read.olinfo_status = htole32(olinfo_status | (len<< E1000_ADVTXD_PAYLEN_SHIFT)); curr->read.cmd_type_len = htole32(len | E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT | E1000_TXD_CMD_EOP | flags); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ ND("ring %d sent %d", ring_nr, n); kring->nr_hwavail -= n; wmb(); /* synchronize writes to the NIC ring */ txr->next_to_use = l; writel(l, txr->tail); mmiowb(); // XXX where do we need this ? } if (kring->nr_hwavail < 0 || kring->nr_hwavail > lim) D("ouch, hwavail %d", kring->nr_hwavail); if (n == 0 || kring->nr_hwavail < 1) { int delta; /* record completed transmissions using TDH */ l = READ_TDH(txr); if (l >= kring->nkr_num_slots) { /* XXX can happen */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; ND("ring %d tdh %d delta %d", ring_nr, l, delta); if (delta) { /* some tx completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; /* fool the timer so we don't get watchdog resets */ txr->next_to_use = l; kring->nr_hwavail += delta; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); struct igb_ring *rxr = adapter->rx_ring[ring_nr]; struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (!netif_carrier_ok(ifp)) return 0; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Import newly received packets into the netmap ring. * j is an index in the netmap ring, l in the NIC ring. */ l = rxr->next_to_clean; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { union e1000_adv_rx_desc *curr = E1000_RX_DESC_ADV(*rxr, l); uint32_t staterr = le32toh(curr->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->wb.upper.length); ring->slot[j].flags = slot_flags; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ rxr->next_to_clean = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = &ring->slot[j]; union e1000_adv_rx_desc *curr = E1000_RX_DESC_ADV(*rxr, l); uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_FROM_DEVICE, old_paddr, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->read.pkt_addr = htole64(paddr); curr->read.hdr_addr = 0; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; wmb(); rxr->next_to_use = l; // XXX not really used /* * IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; writel(l, rxr->tail); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } static int igb_netmap_configure_tx_ring(struct SOFTC_T *adapter, int ring_nr) { struct ifnet *ifp = adapter->netdev; struct netmap_adapter* na = NA(ifp); struct netmap_slot* slot = netmap_reset(na, NR_TX, ring_nr, 0); struct igb_ring *txr = adapter->tx_ring[ring_nr]; int i, si; void *addr; uint64_t paddr; if (!slot) return 0; for (i = 0; i < na->num_tx_desc; i++) { union e1000_adv_tx_desc *tx_desc; si = netmap_idx_n2k(&na->tx_rings[ring_nr], i); addr = PNMB(slot + si, &paddr); tx_desc = E1000_TX_DESC_ADV(*txr, i); tx_desc->read.buffer_addr = htole64(paddr); /* actually we don't care to init the rings here */ } return 1; // success } static int igb_netmap_configure_rx_ring(struct igb_ring *rxr) { struct ifnet *ifp = rxr->netdev; struct netmap_adapter* na = NA(ifp); int reg_idx = rxr->reg_idx; struct netmap_slot* slot = netmap_reset(na, NR_RX, reg_idx, 0); u_int i; /* * XXX watch out, the main driver must not use * split headers. The buffer len should be written * into wr32(E1000_SRRCTL(reg_idx), srrctl) with options * something like * srrctl = ALIGN(buffer_len, 1024) >> * E1000_SRRCTL_BSIZEPKT_SHIFT; * srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; * srrctl |= E1000_SRRCTL_DROP_EN; */ if (!slot) return 0; // not in netmap mode for (i = 0; i < rxr->count; i++) { union e1000_adv_rx_desc *rx_desc; uint64_t paddr; int si = netmap_idx_n2k(&na->rx_rings[reg_idx], i); #if 0 // XXX the skb check can go away struct igb_rx_buffer *bi = &rxr->rx_buffer_info[i]; if (bi->skb) D("rx buf %d was set", i); bi->skb = NULL; // XXX leak if set #endif /* useless */ PNMB(slot + si, &paddr); rx_desc = E1000_RX_DESC_ADV(*rxr, i); rx_desc->read.hdr_addr = 0; rx_desc->read.pkt_addr = htole64(paddr); } rxr->next_to_use = 0; /* preserve buffers already made available to clients */ i = rxr->count - 1 - na->rx_rings[reg_idx].nr_hwavail; wmb(); /* Force memory writes to complete */ ND("%s rxr%d.tail %d", ifp->if_xname, reg_idx, i); writel(i, rxr->tail); return 1; // success } static void igb_netmap_attach(struct SOFTC_T *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->netdev; na.num_tx_desc = adapter->tx_ring_count; na.num_rx_desc = adapter->rx_ring_count; na.nm_register = igb_netmap_reg; na.nm_txsync = igb_netmap_txsync; na.nm_rxsync = igb_netmap_rxsync; na.num_tx_rings = adapter->num_tx_queues; D("using %d TX and %d RX queues", adapter->num_tx_queues, adapter->num_rx_queues); netmap_attach(&na, adapter->num_rx_queues); } /* end of file */ netmap-release/LINUX/if_re_netmap_linux.h000644 000765 000024 00000022201 12220335545 021151 0ustar00luigistaff000000 000000 /* * Copyright (C) 2011 Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $Id: if_re_netmap_linux.h 10679 2012-02-28 13:42:18Z luigi $ * * netmap support for "r8169" (re) (UNTESTED) * For details on netmap support please see ixgbe_netmap.h * 1 tx ring, 1 rx ring, 1 lock, crcstrip ? reinit tx addr, */ #include #include #include static void rtl8169_wait_for_quiescence(struct ifnet *); #define SOFTC_T rtl8169_private /* * Register/unregister, mostly the reinit task */ static int re_netmap_reg(struct ifnet *ifp, int onoff) { struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; rtnl_lock(); rtl8169_wait_for_quiescence(ifp); rtl8169_close(ifp); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; na->if_transmit = (void *)ifp->netdev_ops; ifp->netdev_ops = &na->nm_ndo; if (rtl8169_open(ifp) < 0) { error = ENOMEM; goto fail; } } else { fail: ifp->if_capenable &= ~IFCAP_NETMAP; ifp->netdev_ops = (void *)na->if_transmit; error = rtl8169_open(ifp) ? EINVAL : 0; } rtnl_unlock(); return (error); } /* * Reconcile kernel and user view of the transmit ring. */ static int re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *sc = netdev_priv(ifp); void __iomem *ioaddr = sc->mmio_addr; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k, l, n = 0, lim = kring->nkr_num_slots - 1; k = ring->cur; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = sc->cur_tx; // XXX use internal macro ? for (n = 0; j != k; n++) { /* slot is the current slot in the netmap ring */ struct netmap_slot *slot = &ring->slot[j]; /* curr is the current slot in the nic ring */ struct TxDesc *curr = &sc->TxDescArray[l]; uint32_t flags = slot->len | LastFrag | DescOwn | FirstFrag ; uint64_t paddr; void *addr = PNMB(slot, &paddr); int len = slot->len; if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { sc->cur_tx = l; // XXX fix return netmap_ring_reinit(kring); } if (l == lim) /* mark end of ring */ flags |= RingEnd; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, unload and reload map */ // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr); curr->addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } slot->flags &= ~NS_REPORT; curr->opts1 = htole32(flags); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ kring->nr_hwavail -= n; sc->cur_tx = l; wmb(); /* synchronize writes to the NIC ring */ RTL_W8(TxPoll, NPQ); /* start ? */ } if (n == 0 || kring->nr_hwavail < 1) { /* record completed transmissions */ for (n = 0, l = sc->dirty_tx; l != sc->cur_tx; n++) { if (le32toh(sc->TxDescArray[l].opts1) & DescOwn) break; if (++l == NUM_TX_DESC) l = 0; } if (n > 0) { sc->dirty_tx = l; kring->nr_hwavail += n; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. */ static int re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *sc = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (k > lim) return netmap_ring_reinit(kring); rmb(); /* * The device uses all the buffers in the ring, so we need * another termination condition in addition to DescOwn * cleared (all buffers could have it cleared. The easiest one * is to limit the amount of data reported up to 'lim' */ l = sc->cur_rx; /* next pkt to check */ j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = kring->nr_hwavail; n < lim ; n++) { struct RxDesc *cur_rx = &sc->RxDescArray[l]; uint32_t rxstat = le32toh(cur_rx->opts1); uint32_t total_len; if ((rxstat & DescOwn) != 0) break; total_len = rxstat & 0x00001FFF; /* XXX subtract crc */ total_len = (total_len < 4) ? 0 : total_len - 4; kring->ring->slot[j].len = total_len; kring->ring->slot[j].flags = slot_flags; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n != kring->nr_hwavail) { sc->cur_rx = l; ifp->stats.rx_packets += n - kring->nr_hwavail; kring->nr_hwavail = n; } } /* skip past packets that userspace has released */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); /* NIC ring index */ for (n = 0; j != k; n++) { struct netmap_slot *slot = ring->slot + j; struct RxDesc *curr = &sc->RxDescArray[l]; uint32_t flags = NETMAP_BUF_SIZE | DescOwn; uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) { /* bad buf */ return netmap_ring_reinit(kring); } if (l == lim) /* mark end of ring */ flags |= RingEnd; slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_paddr, addr); curr->addr = htole64(paddr); slot->flags &= ~NS_BUF_CHANGED; } curr->opts1 = htole32(flags); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; wmb(); // XXX needed ? } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; } /* * Additional routines to init the tx and rx rings. * In other drivers we do that inline in the main code. */ static int re_netmap_tx_init(struct SOFTC_T *sc) { struct netmap_adapter *na = NA(sc->dev); struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0); struct TxDesc *desc = sc->TxDescArray; int i, l; uint64_t paddr; /* slot is NULL if we are not in netmap mode */ if (!slot) return 0; /* l points in the netmap ring, i points in the NIC ring */ for (i = 0; i < na->num_tx_desc; i++) { l = netmap_idx_n2k(&na->tx_rings[0], i); PNMB(slot + l, &paddr); desc[i].addr = htole64(paddr); } return 1; } static int re_netmap_rx_init(struct SOFTC_T *sc) { struct netmap_adapter *na = NA(sc->dev); struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0); struct RxDesc *desc = sc->RxDescArray; uint32_t cmdstat; int i, lim, l; uint64_t paddr; if (!slot) return 0; /* * userspace knows that hwavail packets were ready before * the reset, so only indexes < lim are made available for rx. * XXX we use all slots, so no '-1' here */ lim = na->num_rx_desc /* - 1 */ - na->rx_rings[0].nr_hwavail; for (i = 0; i < na->num_rx_desc; i++) { l = netmap_idx_n2k(&na->rx_rings[0], i); PNMB(slot + l, &paddr); cmdstat = NETMAP_BUF_SIZE; if (i == na->num_rx_desc - 1) cmdstat |= RingEnd; if (i < lim) cmdstat |= DescOwn; desc[i].opts1 = htole32(cmdstat); desc[i].addr = htole64(paddr); } return 1; } static void re_netmap_attach(struct SOFTC_T *sc) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = sc->dev; na.num_tx_desc = NUM_TX_DESC; na.num_rx_desc = NUM_RX_DESC; na.nm_txsync = re_netmap_txsync; na.nm_rxsync = re_netmap_rxsync; na.nm_register = re_netmap_reg; netmap_attach(&na, 1); } /* end of file */ netmap-release/LINUX/ixgbe_netmap_linux.h000644 000765 000024 00000042456 12220335545 021201 0ustar00luigistaff000000 000000 /* * Copyright (C) 2012 Matteo Landi, Luigi Rizzo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * $FreeBSD: head/sys/dev/netmap/ixgbe_netmap.h 230572 2012-01-26 09:55:16Z luigi $ * $Id: ixgbe_netmap_linux.h 10670 2012-02-27 21:15:38Z luigi $ * * netmap support for ixgbe (LINUX version) * * supports N TX and RX queues, separate locks, hw crc strip, * address rewrite in txsync * * This file is meant to be a reference on how to implement * netmap support for a network driver. * This file contains code but only static or inline functions * that are used by a single driver. To avoid replication of * code we just #include it near the beginning of the * standard driver. */ #include #include #include #define SOFTC_T ixgbe_adapter /* * Adaptation to various version of the driver. * Recent drivers (3.4 and above) redefine some macros */ #ifndef IXGBE_TX_DESC_ADV #define IXGBE_TX_DESC_ADV IXGBE_TX_DESC #define IXGBE_RX_DESC_ADV IXGBE_RX_DESC #endif /* * Register/unregister. We are already under core lock. * Only called on the first register or the last unregister. */ static int ixgbe_netmap_reg(struct ifnet *ifp, int onoff) { struct SOFTC_T *adapter = netdev_priv(ifp); struct netmap_adapter *na = NA(ifp); int error = 0; if (na == NULL) return EINVAL; /* no netmap support here */ /* Tell the stack that the interface is no longer active */ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); rtnl_lock(); if (netif_running(adapter->netdev)) ixgbe_down(adapter); if (onoff) { /* enable netmap mode */ ifp->if_capenable |= IFCAP_NETMAP; /* save if_transmit and replace with our routine */ na->if_transmit = (void *)ifp->netdev_ops; ifp->netdev_ops = &na->nm_ndo; } else { /* reset normal mode (explicit request or netmap failed) */ /* restore if_transmit */ ifp->netdev_ops = (void *)na->if_transmit; ifp->if_capenable &= ~IFCAP_NETMAP; } if (netif_running(adapter->netdev)) ixgbe_up(adapter); /* also enables intr */ rtnl_unlock(); clear_bit(__IXGBE_RESETTING, &adapter->state); return (error); } /* * Reconcile kernel and user view of the transmit ring. * This routine might be called frequently so it must be efficient. * * Userspace has filled tx slots up to ring->cur (excluded). * The last unused slot previously known to the kernel was kring->nkr_hwcur, * and the last interrupt reported kring->nr_hwavail slots available. * * This function runs under lock (acquired from the caller or internally). * It must first update ring->avail to what the kernel knows, * subtract the newly used slots (ring->cur - kring->nkr_hwcur) * from both avail and nr_hwavail, and set ring->nkr_hwcur = ring->cur * issuing a dmamap_sync on all slots. * * Since ring comes from userspace, its content must be read only once, * and validated before being used to update the kernel's structures. * (this is also true for every use of ring in the kernel). * * ring->avail is never used, only checked for bogus values. * */ static int ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct ixgbe_ring *txr = adapter->tx_ring[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->tx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, k = ring->cur, l, n, lim = kring->nkr_num_slots - 1; /* * ixgbe can generate an interrupt on every tx packet, but it * seems very expensive, so we interrupt once every half ring, * or when requested with NS_REPORT */ int report_frequency = kring->nkr_num_slots >> 1; if (!netif_carrier_ok(ifp)) return 0; /* if cur is invalid reinitialize the ring. */ if (k > lim) return netmap_ring_reinit(kring); /* * Process new packets to send. j is the current index in the * netmap ring, l is the corresponding index in the NIC ring. * The two numbers differ because upon a *_init() we reset * the NIC ring but leave the netmap ring unchanged. * For the transmit ring, we have * * j = kring->nr_hwcur * l = IXGBE_TDT (not tracked in the driver) * and * j == (l + kring->nkr_hwofs) % ring_size * * In this driver kring->nkr_hwofs >= 0, but for other * drivers it might be negative as well. */ j = kring->nr_hwcur; if (j != k) { /* we have new packets to send */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* * Collect per-slot info. * Note that txbuf and curr are indexed by l. * * In this driver we collect the buffer address * (using the PNMB() macro) because we always * need to rewrite it into the NIC ring. * Many other drivers preserve the address, so * we only need to access it if NS_BUF_CHANGED * is set. */ struct netmap_slot *slot = &ring->slot[j]; union ixgbe_adv_tx_desc *curr = IXGBE_TX_DESC_ADV(txr, l); uint64_t paddr; void *addr = PNMB(slot, &paddr); // XXX type for flags and len ? int flags = ((slot->flags & NS_REPORT) || j == 0 || j == report_frequency) ? IXGBE_TXD_CMD_RS : 0; u_int len = slot->len; /* * Quick check for valid addr and len. * NMB() returns netmap_buffer_base for invalid * buffer indexes (but the address is still a * valid one to be used in a ring). slot->len is * unsigned so no need to check for negative values. */ if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) { ring_reset: return netmap_ring_reinit(kring); } slot->flags &= ~NS_REPORT; if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, unload and reload map */ // netmap_reload_map(pdev, DMA_TO_DEVICE, old_addr, addr); slot->flags &= ~NS_BUF_CHANGED; } /* * Fill the slot in the NIC ring. * In this driver we need to rewrite the buffer * address in the NIC ring. Other drivers do not * need this. */ curr->read.buffer_addr = htole64(paddr); curr->read.olinfo_status = htole32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); curr->read.cmd_type_len = htole32( len | (IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_TXD_CMD_EOP | flags) ); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwcur = k; /* the saved ring->cur */ /* decrease avail by number of packets sent */ kring->nr_hwavail -= n; wmb(); /* synchronize writes to the NIC ring */ /* (re)start the transmitter up to slot l (excluded) */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->reg_idx), l); } /* * Reclaim buffers for completed transmissions. * Because this is expensive (we read a NIC register etc.) * we only do it in specific cases (see below). * In all cases kring->nr_kflags indicates which slot will be * checked upon a tx interrupt (nkr_num_slots means none). */ if (flags & NAF_FORCE_RECLAIM) { j = 1; /* forced reclaim, ignore interrupts */ kring->nr_kflags = kring->nkr_num_slots; } else if (kring->nr_hwavail > 0) { j = 0; /* buffers still available: no reclaim, ignore intr. */ kring->nr_kflags = kring->nkr_num_slots; } else { /* * no buffers available, locate a slot for which we request * ReportStatus (approximately half ring after next_to_clean) * and record it in kring->nr_kflags. * If the slot has DD set, do the reclaim looking at TDH, * otherwise we go to sleep (in netmap_poll()) and will be * woken up when slot nr_kflags will be ready. */ union ixgbe_adv_tx_desc *txd = IXGBE_TX_DESC_ADV(txr, 0); j = txr->next_to_clean + kring->nkr_num_slots/2; if (j >= kring->nkr_num_slots) j -= kring->nkr_num_slots; // round to the closest with dd set j= (j < kring->nkr_num_slots / 4 || j >= kring->nkr_num_slots*3/4) ? 0 : report_frequency; kring->nr_kflags = j; /* the slot to check */ j = txd[j].wb.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ? } if (j) { int delta; /* * Record completed transmissions. * We (re)use the driver's txr->next_to_clean to keep * track of the most recently completed transmission. * * The datasheet discourages the use of TDH to find out the * number of sent packets. We should rather check the DD * status bit in a packet descriptor. However, we only set * the "report status" bit for some descriptors (a kind of * interrupt mitigation), so we can only check on those. * For the time being we use TDH, as we do it infrequently * enough not to pose performance problems. */ l = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(ring_nr)); if (l >= kring->nkr_num_slots) { /* XXX can happen */ D("TDH wrap %d", l); l -= kring->nkr_num_slots; } delta = l - txr->next_to_clean; if (delta) { /* some tx completed, increment hwavail. */ if (delta < 0) delta += kring->nkr_num_slots; txr->next_to_clean = l; kring->nr_hwavail += delta; if (kring->nr_hwavail > lim) goto ring_reset; } } /* update avail to what the kernel knows */ ring->avail = kring->nr_hwavail; return 0; } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient and * avoid races in accessing the shared regions. * * When called, userspace has read data from slots kring->nr_hwcur * up to ring->cur (excluded). * * The last interrupt reported kring->nr_hwavail slots available * after kring->nr_hwcur. * We must subtract the newly consumed slots (cur - nr_hwcur) * from nr_hwavail, make the descriptors available for the next reads, * and set kring->nr_hwcur = ring->cur and ring->avail = kring->nr_hwavail. * */ static int ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int flags) { struct SOFTC_T *adapter = netdev_priv(ifp); struct ixgbe_ring *rxr = adapter->rx_ring[ring_nr]; struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = &na->rx_rings[ring_nr]; struct netmap_ring *ring = kring->ring; u_int j, l, n, lim = kring->nkr_num_slots - 1; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; u_int k = ring->cur, resvd = ring->reserved; if (!netif_carrier_ok(ifp)) return 0; if (k > lim) /* userspace is cheating */ return netmap_ring_reinit(kring); rmb(); /* * First part, import newly received packets into the netmap ring. * * j is the index of the next free slot in the netmap ring, * and l is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * j = (kring->nr_hwcur + kring->nr_hwavail) % ring_size * l = rxr->next_to_check; * and * j == (l + kring->nkr_hwofs) % ring_size * * rxr->next_to_check is set to 0 on a ring reinit */ l = rxr->next_to_clean; j = netmap_idx_n2k(kring, l); if (netmap_no_pendintr || force_update) { uint16_t slot_flags = kring->nkr_slot_flags; for (n = 0; ; n++) { union ixgbe_adv_rx_desc *curr = IXGBE_RX_DESC_ADV(rxr, l); uint32_t staterr = le32toh(curr->wb.upper.status_error); if ((staterr & IXGBE_RXD_STAT_DD) == 0) break; ring->slot[j].len = le16toh(curr->wb.upper.length); ring->slot[j].flags = slot_flags; j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } if (n) { /* update the state variables */ rxr->next_to_clean = l; kring->nr_hwavail += n; } kring->nr_kflags &= ~NKR_PENDINTR; } /* * Skip past packets that userspace has already released * (from kring->nr_hwcur to ring->cur-ring->reserved excluded), * and make the buffers available for reception. * As usual j is the index in the netmap ring, l is the index * in the NIC ring, and j == (l + kring->nkr_hwofs) % ring_size */ j = kring->nr_hwcur; /* netmap ring index */ if (resvd > 0) { if (resvd + ring->avail >= lim + 1) { D("XXX invalid reserve/avail %d %d", resvd, ring->avail); ring->reserved = resvd = 0; // XXX panic... } k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; } if (j != k) { /* userspace has released some packets. */ l = netmap_idx_k2n(kring, j); for (n = 0; j != k; n++) { /* collect per-slot info, with similar validations * and flag handling as in the txsync code. * * NOTE curr and rxbuf are indexed by l. * Also, this driver needs to update the physical * address in the NIC ring, but other drivers * may not have this requirement. */ struct netmap_slot *slot = &ring->slot[j]; union ixgbe_adv_rx_desc *curr = IXGBE_RX_DESC_ADV(rxr, l); uint64_t paddr; void *addr = PNMB(slot, &paddr); if (addr == netmap_buffer_base) /* bad buf */ goto ring_reset; if (slot->flags & NS_BUF_CHANGED) { // netmap_reload_map(pdev, DMA_TO_DEVICE, old_addr, addr); slot->flags &= ~NS_BUF_CHANGED; } curr->wb.upper.status_error = 0; curr->read.pkt_addr = htole64(paddr); j = (j == lim) ? 0 : j + 1; l = (l == lim) ? 0 : l + 1; } kring->nr_hwavail -= n; kring->nr_hwcur = k; rxr->next_to_use = l; // XXX not really used wmb(); /* IMPORTANT: we must leave one free slot in the ring, * so move l back by one unit */ l = (l == 0) ? lim : l - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->reg_idx), l); } /* tell userspace that there are new packets */ ring->avail = kring->nr_hwavail - resvd; return 0; ring_reset: return netmap_ring_reinit(kring); } /* * if in netmap mode, attach the netmap buffers to the ring and return true. * Otherwise return false. */ static int ixgbe_netmap_configure_tx_ring(struct SOFTC_T *adapter, int ring_nr) { struct netmap_adapter *na = NA(adapter->netdev); struct netmap_slot *slot = netmap_reset(na, NR_TX, ring_nr, 0); //int j; if (!slot) return 0; // not in netmap; #if 0 /* * on a generic card we should set the address in the slot. * But on the ixgbe, the address needs to be rewritten * after a transmission so there is nothing do to except * loading the map. */ for (j = 0; j < na->num_tx_desc; j++) { int sj = netmap_idx_n2k(&na->tx_rings[ring_nr], j); uint64_t paddr; void *addr = PNMB(slot + sj, &paddr); } #endif return 1; } static int ixgbe_netmap_configure_rx_ring(struct SOFTC_T *adapter, int ring_nr) { /* * In netmap mode, we must preserve the buffers made * available to userspace before the if_init() * (this is true by default on the TX side, because * init makes all buffers available to userspace). * * netmap_reset() and the device specific routines * (e.g. ixgbe_setup_receive_rings()) map these * buffers at the end of the NIC ring, so here we * must set the RDT (tail) register to make sure * they are not overwritten. * * In this driver the NIC ring starts at RDH = 0, * RDT points to the last slot available for reception (?), * so RDT = num_rx_desc - 1 means the whole ring is available. */ struct netmap_adapter *na = NA(adapter->netdev); struct netmap_slot *slot = netmap_reset(na, NR_RX, ring_nr, 0); int lim, i; struct ixgbe_ring *ring = adapter->rx_ring[ring_nr]; /* same as in ixgbe_setup_transmit_ring() */ if (!slot) return 0; // not in netmap; lim = na->num_rx_desc - 1 - na->rx_rings[ring_nr].nr_hwavail; for (i = 0; i < na->num_rx_desc; i++) { /* * Fill the map and set the buffer address in the NIC ring, * considering the offset between the netmap and NIC rings * (see comment in ixgbe_setup_transmit_ring() ). */ int si = netmap_idx_n2k(&na->rx_rings[ring_nr], i); uint64_t paddr; PNMB(slot + si, &paddr); // netmap_load_map(rxr->ptag, rxbuf->pmap, addr); /* Update descriptor */ IXGBE_RX_DESC_ADV(ring, i)->read.pkt_addr = htole64(paddr); } IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(ring_nr), lim); return 1; } /* * The attach routine, called near the end of ixgbe_attach(), * fills the parameters for netmap_attach() and calls it. * It cannot fail, in the worst case (such as no memory) * netmap mode will be disabled and the driver will only * operate in standard mode. */ static void ixgbe_netmap_attach(struct SOFTC_T *adapter) { struct netmap_adapter na; bzero(&na, sizeof(na)); na.ifp = adapter->netdev; na.num_tx_desc = adapter->tx_ring[0]->count; na.num_rx_desc = adapter->rx_ring[0]->count; na.nm_txsync = ixgbe_netmap_txsync; na.nm_rxsync = ixgbe_netmap_rxsync; na.nm_register = ixgbe_netmap_reg; netmap_attach(&na, adapter->num_tx_queues); } /* end of file */ netmap-release/LINUX/Makefile000644 000765 000024 00000010214 12230530510 016560 0ustar00luigistaff000000 000000 # To build external modules, you must have a prebuilt kernel available # that contains the configuration and header files used in the build. # go in the kernel directory and do a # make oldconfig; make scripts; make prepare # # The list of targets is derived from obj-m # and then the corresponding foo-objs CONFIG_NETMAP:=m CONFIG_R8169:=m CONFIG_FORCEDETH:=m CONFIG_E1000:=m CONFIG_E1000E:=m CONFIG_IXGBE:=m CONFIG_IGB:=m CONFIG_BNX2X:=m netmap_lin-objs := netmap.o netmap_mem2.o obj-$(CONFIG_NETMAP) = netmap_lin.o ifndef NODRIVERS obj-m += $(DRIVERS) endif # obj-$(CONFIG_R8169) += r8169.o # obj-$(CONFIG_FORCEDETH) += forcedeth.o # obj-$(CONFIG_E1000) += e1000/ # obj-$(CONFIG_E1000E) += e1000e/ # obj-$(CONFIG_IXGBE) += ixgbe/ # obj-$(CONFIG_IGB) += igb/ # names of the driver sources. In old linuxes are under # KSRC/drivers/net, but depending on where you build they # can be in $(KSRC)/source/drivers/net/ethernet/$(manufacturer) DRIVER_SRCS = r8169.c forcedeth.c e1000/ e1000e/ ixgbe/ igb/ DRIVER_SUBDIRS= nvidia realtek intel DRIVER_SRCS += bnx2x/ mellanox/ mlx4/ DRIVER_SUBDIRS += broadcom . # The following commands are needed to build the modules as out-of-tree, in # fact the kernel sources path must be specified. # Additional compile flags (e.g. header location) PWD ?= $(CURDIR) M:=$(PWD) EXTRA_CFLAGS := -I$(M) -I$(M)/../sys -I$(M)/../sys/dev -DCONFIG_NETMAP EXTRA_CFLAGS += -Wno-unused-but-set-variable # We use KSRC for the kernel configuration and sources. # If the sources are elsewhere, then use SRC to point to them. KSRC ?= /lib/modules/$(shell uname -r)/build SRC ?= $(KSRC) # extract version number and filter with the available patches. LIN_VER = $(shell V=linux/version.h; G=. ; \ [ -f $(KSRC)/include/$${V} ] || G=generated/uapi ;\ grep LINUX_VERSION_CODE $(KSRC)/include/$${G}/linux/version.h | \ awk '{printf "%03x%02d", $$3/256, $$3%256} ') PATCHES := $(shell \ cd $(PWD)/patches; ls diff--* | awk -v v=$(LIN_VER) -F -- '{ \ if ((!$$3 || $$3 <= v) && (!$$4 || v < $$4)) print $0; }') DRIVERS := $(shell \ cd $(PWD)/patches; ls diff--* | awk -v v=$(LIN_VER) -F -- '{ \ if ((!$$3 || $$3 <= v) && (!$$4 || v < $$4)) { ; \ if (match($$2, ".c")) print $$2 ; else print $$2 "/" } }' ) ifdef NODRIVERS all: build build: $(MAKE) -C $(KSRC) M=$(PWD) CONFIG_NETMAP=m \ EXTRA_CFLAGS='$(EXTRA_CFLAGS)' \ modules else all: get-drivers build build: $(MAKE) -C $(KSRC) M=$(PWD) CONFIG_NETMAP=m \ CONFIG_E1000=m CONFIG_E1000E=m \ CONFIG_BNX2X=m CONFIG_MLX4=m \ CONFIG_IXGBE=m CONFIG_IGB=m EXTRA_CFLAGS='$(EXTRA_CFLAGS)' \ DRIVERS="$(DRIVERS:%.c=%.o)" modules @ls -l `find . -name \*.ko` endif test: @echo "version $(LIN_VER)" @echo "patches $(PATCHES)" @echo "drivers $(DRIVERS)" clean: -@ $(MAKE) -C $(KSRC) M=$(PWD) clean -@ (rm -rf $(DRIVER_SRCS) *.orig *.rej *.ko *.o .*.d \ .tmp_versions *.mod.c modules.order \ Module.symvers .*.cmd get-drivers ) # the source is not here so we need to specify a dependency $(obj)/netmap.o: $(M)/../sys/dev/netmap/netmap.c $(call cmd,cc_o_c) $(call cmd,modversions) # $(call quiet_cmd,cc_o_c) $(obj)/netmap_mem2.o: $(M)/../sys/dev/netmap/netmap_mem2.c $(call cmd,cc_o_c) $(call cmd,modversions) #-- copy and patch initial files # The location changes depending on the OS version, so ... get-drivers: @echo "LIN_VER $(LIN_VER) subdirs $(DRIVER_SUBDIRS) files $(DRIVERS)" -@( \ cd $(SRC); [ -d source ] && cd source; \ cd drivers/net; s=. ; \ [ -d ethernet ] && cd ethernet && s="$(DRIVER_SUBDIRS)" ; \ for i in $$s; do (cd $$i ; \ echo "Copying from `pwd` "; \ cp -Rp $(DRIVERS) $(PWD) 2>/dev/null ); done ) -@(for i in $(PATCHES) ; do echo "** use patches/$$i"; \ patch --posix --quiet --force -p1 < patches/$$i; done ) @touch get-drivers @echo "Building the following drivers: $(obj-m)" # copy and patch files from the source tree #$(obj)/r8169.c: $(srctree)/drivers/net/r8169.c # # net-r8169.diff # $(call cmd,shipped) # (cd $(obj); patch < net-r8169.diff ) # # compute the diffs for the original files diffs: @for i in `find . -name \*.orig`; do \ diff -urp $$i $${i%.orig} ; \ done apps: (cd ../examples; $(MAKE)) +%: @echo $($*) netmap-release/LINUX/patches000755 000765 000024 00000000000 12230530510 021126 2final-patchesustar00luigistaff000000 000000 netmap-release/LINUX/README000644 000765 000024 00000012507 12220335545 016021 0ustar00luigistaff000000 000000 # $Id: README 10863 2012-04-11 17:10:39Z luigi $ NETMAP FOR LINUX ---------------- This directory contains a version of the "netmap" and "VALE" code for Linux. Netmap is a BSD-licensed framework that supports line-rate direct packet I/O even on 10GBit/s interfaces (14.88Mpps) with limited system load, and includes a libpcap emulation library to port applications. See http://info.iet.unipi.it/~luigi/netmap/ for more details. There you can also find the latest versions of the code and documentation as well as pre-built TinyCore images based on linux 3.0.3 and containing the netmap modules and some test applications. This version supports r8169, ixgbe, igb, e1000, e1000e and forcedeth. Netmap relies on a kernel module (netmap_lin.ko) and slightly modified device drivers. Userspace programs can use the native API (documented in netmap.4) or a libpcap emulation library. The FreeBSD and Linux versions share the same codebase, which is located in ../sys . For Linux we use some additional glue code, (bsd_glue.h). Device drivers are taken directly from the Linux distributions, and patched using the files in the patches/ directory. Common driver modifications are in the .h files in this directory. HOW TO BUILD THE CODE --------------------- 1. make sure you have kernel sources/headers matching your installed system 2. do the following make clean; make KSRC=/usr/src/linux-kernel-source-or-headers this produces ./netmap_lin.ko and other kernel modules. 3. to build sample applications, run (cd ../examples; make ) (you will need the pthreads and libpcap-dev packages to build them) If you want support for additional drivers please have a look at ixgbe_netmap_linux.h and the patches in patches/ The patch file are named as diff--DRIVER--LOW--HIGH--otherstuff where DRIVER is the driver name to patch, LOW and HIGH are the versions to which the patch applies (LOW included, HIGH excluded, so diff--r8169.c--20638--30300--ok applies from 2.6.38 to 3.3.0 (excluded) HOW TO USE THE CODE ------------------- REMEMBER THIS IS EXPERIMENTAL CODE WHICH MAY CRASH YOUR SYSTEM. USE IT AT YOUR OWN RISk. Whether you built your own modules, or are using the prebuilt TinyCore image, the following steps can be used for initial testing: 1. unload any modules for the network cards you want to use, e.g. sudo rmmod ixgbe sudo rmmod e1000 ... 2. load netmap and device driver module sudo insmod ./netmap_lin.ko sudo insmod ./ixgbe/ixgbe.ko sudo insmod ./e1000/e1000.ko ... 3. turn the interface(s) up sudo ifconfig eth0 up # and same for others 4. Run test applications -- as an example, pkt-gen is a raw packet sender/receiver which can do line rate on a 10G interface # send about 500 million packets of 60 bytes each. # wait 5s before starting, so the link can go up sudo pkt-gen -i eth0 -f tx -n 500111222 -l 60 -w 5 # you should see about 14.88 Mpps sudo pkt-gen -i eth0 -f rx # act as a receiver COMMON PROBLEMS ---------------- * switching in/out of netmap mode causes the link to go down and up. If your card is connected to a switch with spanning tree enabled, the switch will likely MUTE THE LINK FOR 10 SECONDS while it is detecting the new topology. Either disable the spanning tree on the switch or use long pauses before sending data; * Not all cards can do line rate no matter how fast is your software or CPU. Several have hardware limitations that prevent reaching the peak speed, especially for small packet sizes. Examples: - ixgbe cannot receive at line rate with packet sizes that are not multiple of 64 (after CRC stripping). This is especially evident with minimum-sized frames (-l 60 ) - some of the low-end 'e1000' cards can send 1.2 - 1.3Mpps instead of the theoretical maximum (1.488Mpps) - the 'realtek' cards seem unable to send more than 450-500Kpps even though they can receive at least 1.1Mpps * if the link is not up when the packet generator starts, you will see frequent messages about a link reset. While we work on a fix, use the '-w' argument on the generator to specify a longer timeout * the ixgbe driver (and perhaps others) is severely slowed down if the remote party is senting flow control frames to slow down traffic. If that happens try to use the ethtool command to disable flow control. REVISION HISTORY ----------------- 20120813 - updated distribution using common code for FreeBSD and Linux, and inclusion of drivers from the linux source tree 20120322 - fixed the 'igb' driver, now it can send and receive correctly (the problem was in netmap_rx_irq() so it might have affected other multiqueue cards). Also tested the 'r8169' in transmit mode. Added comments on switches and spanning tree. 20120217 - initial version. Only ixgbe, e1000 and e1000e are working. Other drivers (igb, r8169, forcedeth) are supplied only as a proof of concept. DETAILS -------- + igb: on linux 3.2 and above the igb driver moved to split buffers, and netmap was not updated until end of june 2013. Symptoms were inability to receive short packets. + there are reports of ixgbe and igb unable to read packets. We are unable to reproduce the problem. - Ubuntu 12.04 LTS 3.5.0-25-generic. igb read problems ? - 3.2.0-32-generic with 82598 not working + if_e1000_e uses regular descriptor up 3.1 at least 3.2.32 is reported to use extended descriptors (in my repo updated at -r 11975) netmap-release/LINUX/scripts/000755 000765 000024 00000000000 12230530510 016611 5ustar00luigistaff000000 000000 netmap-release/LINUX/wip-patches/000755 000765 000024 00000000000 12220335545 017360 5ustar00luigistaff000000 000000 netmap-release/LINUX/scripts/help000755 000765 000024 00000000051 12227500737 017501 0ustar00luigistaff000000 000000 #!/bin/sh sed -n 's/^## \?//p' $1 | fmt netmap-release/LINUX/scripts/np000755 000765 000024 00000022657 12230530510 017170 0ustar00luigistaff000000 000000 #!/bin/bash ## Manage linux driver patches for netmap. ## usage (from the dir containing the Makefile): ## ## scripts/np [args...] ## ## where is any of the functions below. ## [ -f scripts/conf ] && source scripts/conf ## The following enviroment variables must be set: ## ## GITDIR: the absolute path of the netmap linux ## git repository, containing all the required netmap-* ## branches. [ -n "$GITDIR" -a -d "$GITDIR/.git" ] || { echo "GITDIR not set or not valid" >&2 exit 1 } ## ## LINUX_SOURCES: the absolute path of a ## directory used to store all required linux-* source trees ## (The script will extract linux-x.y.z from GITDIR if it needs ## it and $LINUX_SOURCES does not already contain it). ## ## LINUX_CONFIGS: the absolute path of a ## directory containing the configuration files for ## the linux kernel. The file for version x must be named ## config-x. config-all can be used as a default. ## ## The configuration variables can be put in scripts/conf. ## ## ## Available actions: ## ## ## driver-path : ## retrieves the path of in the linux sources ## for version . The path is output to stdout. ## It uses a local cache to minimize the expensive ## file system search. function driver-path() { cat cache/$2/$1/path 2>/dev/null && return local kern=$(get-kernel $2) mkdir -p cache/$2/$1 ( cd $kern find drivers/net -name $1 ) | tee cache/$2/$1/path } ## ## get-patch [-c] ## extract the netmap patch for the given and the ## given kernel . The patch is stored in tmp-patches ## and the name of the patch is output to stdout. ## If a patch with the same name already exists in tmp-patches ## it is overwritten, unless the -c option is used, ## in which case the existing patch is kept (the patch name is still output). function get-patch() { local use_cache [ "$1" = -c ] && { use_cache=1; shift; } # convert kernel version to fixed notation local v1=$(scripts/vers $2 -c) # compute next kernel version (in fixed notation) local v2=$(scripts/vers $2 -i -c) local drvname=$1 local patchname=diff--$drvname--$v1--$v2 local out=tmp-patches/$patchname [ -n "$use_cache" -a -s $out ] && { echo $out; return; } local drvpath=$(driver-path $drvname $2) [ -n "$drvpath" ] || return local drvdir=$(dirname $drvpath) ( cd $GITDIR git diff --relative=$drvdir v$2..netmap-$2 -- $drvpath ) > $out # an empty patch means no netmap support for this driver [ -s $out ] || { rm $out; return 1; } echo $out return 0; } ## ## get-range : ## extracts the netmap patches for the given for ## all the kernel versions from (included) to ## (excluded). All patches are stored in tmp-patches ## and their names are output to stdout. function get-range() { local drv=$1 local v=$2 # while version is less than $3 while scripts/vers -b $v $3 -L; do get-patch $drv $v # compute next version v=$(scripts/vers $v -i) done } ## ## get-src : ## copies the original sources of the given , ## from the given kernel to the given ## directory. ## It uses a local cache to minimize the expensive ## checkouts in GITDIR. function get-src() { local kern=$(get-kernel $2) local src=$(driver-path $1 $2) cp -r $kern/$src $3 } ## ## extend : ## checks wether the range of applicability of the ## given can be extented to include . ## It returns 0 on success and 1 on failure. function extend() { local patch=$(realpath $1) local v=$2 # extract the driver name from the patch name local drv=$(scripts/vers $1 -s -p -p) local tmpdir1=$(mktemp -d) local tmpdir2=$(mktemp -d) trap "rm -rf $tmpdir1 $tmpdir2" 0 # we get the driver sources for the given and # we apply two patches separately: # i) the given ; # ii) the proper patch from GITDIR. # We declare to be extendable if # - it is still applicable AND # - we obtain the same files from i) and ii) (ignoring whitespace) get-src $drv $v $tmpdir1 get-src $drv $v $tmpdir2 ( cd $tmpdir1 patch --no-backup-if-mismatch -p1 < $patch >/dev/null 2>&1 ) || return 1 local patch2=$(get-patch -c $drv $v) patch2=$(realpath $patch2) ( cd $tmpdir2 patch -p1 < $patch2 >/dev/null 2>&1 ) # this will certainly apply diff -qbBr $tmpdir1 $tmpdir2 >/dev/null || return 1 return 0 } ## ## minimize : ## tries to minimize the number of patch files for the given ## . It uses the patches currently found in tmp-patches ## and stores the resulting patches in final-patches. ## If final-patches already contained patches for , ## they are deleted first. function minimize() { mkdir -p final-patches local drv=$(basename $1) local patches=$(ls tmp-patches/diff--$drv--* 2>/dev/null) [ -n "$patches" ] || return 1 # put the patch names in $1, $2, ... set $patches rm -f final-patches/diff--$drv--* # the original patches (in tmp-patches) are ordered by version number. # We consider one patch in turn (the 'pivot') and try # to extend its range to cover the range of the next # patch. If this succedes, the merged patch is the new # pivot, otherwise the current pivot is output and the # next patch becomes the new pivot. The process # is repeated until there are no more patches to consider. local pivot=$1 [ -n "$pivot" -a -e "$pivot" ] || return 1 # extract the left end and right end of the pivot's range local ple=$(scripts/vers $pivot -s -p -C) local pre=$(scripts/vers $pivot -s -C) while [ -n "$pivot" ]; do shift if [ -n "$1" ]; then # extract the left end and right end of the next patch local nle=$(scripts/vers $1 -s -p -C) local nre=$(scripts/vers $1 -s -C) # we admit no gaps in the range if [ $pre = $nle ] && extend $pivot $nle; then pre=$nre continue fi fi # either out of patches or failed merge. # Compute the file name of the current pivot and store # the patch in its final location out=$(scripts/vers diff $drv $ple -c $pre -c -S4) cp $pivot final-patches/$out # the new pivot becames the next patch (if any) pivot=$1 pre=$nre ple=$nle done return 0 } ## ## infty ## if final-patches contains a patch for with a range ## ending in , extend it to infinity. ## Do nothing otherwise. function infty() { local drv=$(basename $1) # convert kernel version to fixed notation local v=$(scripts/vers $2 -c) local last=$(ls final-patches/diff--$drv--*--$v 2>/dev/null|tail -n1) [ -n "$last" ] || return 1 mv -n $last $(scripts/vers $last -s -p 99999 -S4) 2>/dev/null } function get-kernel() { local v=$1 local dst="$(realpath $LINUX_SOURCES)/linux-$v" [ -d $dst ] && { echo $dst; return; } mkdir -p $dst ( cd $GITDIR git archive v$v | tar xf - -C $dst ) echo $dst } ## ## build-prep ## prepare the linux tree for to be ready ## for external modules compilation. ## The tree is put in $LINUX_SOURCES/linux- and the ## configuration is obtained from $LINUX_CONFIGS/config- ## (or $LINUX_CONFIGS/config-all by default). ## Errors are logged to $LINUX_CONFIGS/linux-.log. ## If $LINUX_SOURCES/linux- already exists, ## nothing is done. ## In all cases, the absolute path of linux- is ## output. function build-prep() { local v=$1 local dst=$(get-kernel $v) exec 3>&1 4>&2 >$dst.log 2>&1 cp $LINUX_CONFIGS/config-$v $dst/.config 2>/dev/null || cp $LINUX_CONFIGS/config-all $dst/.config ( cd $dst yes '' | make oldconfig make modules_prepare ) exec 1>&3 2>&4 echo $dst } ## ## check-patch ## check that the given applies and compiles without ## error for all its declared range of applicability. ## Errors are logged to log/. function check-patch() { # extract the left version local v1=$(scripts/vers $1 -s -p -C) # extract the right version local v2=$(scripts/vers $1 -s -C) local p=$(realpath $1) mkdir -p log local log="$(realpath log)/$(basename $1)" echo -n $1... while scripts/vers -b $v1 $v2 -L; do local ksrc=$(build-prep $v1) local tmpdir=$(mktemp -d) trap "rm -rf $tmpdir" 0 (cd ..; git archive master | tar xf - -C $tmpdir ) pushd $tmpdir/LINUX >/dev/null mkdir single-patch rm patches ln -s single-patch patches cp $p single-patch ok=false make KSRC=$ksrc >$log 2>&1 && ok=true popd >/dev/null [ $ok = true ] || { echo FAILED; return 1; } rm -rf $tmpdir # compute next version v1=$(scripts/vers $v1 -i) done echo OK } ## ## build-check ## do a check-patch for all the patches of that are ## currently in tmp-patches. Patches that fail the check ## are moved to failed-patches. function build-check() { mkdir -p failed-patches local drv=$(basename $1) local patches=$(ls tmp-patches/diff--$drv--* 2>/dev/null) local p for p in $patches; do check-patch $p || mv $p failed-patches done } ## ## forall [args...] ## exec [args...] for all known drivers. function forall() { local cmd=$1 shift # we obtain the value of DRIVER_SRC from the makefile # (the +% target is defined in our Makefile and prints # the contents of variable %) local driver_srcs=$(make +DRIVER_SRCS) local driver for driver in $driver_srcs; do $cmd $(basename $driver) "$@" done } mkdir -p tmp-patches [ -n "$1" ] && { cmd=$1 shift [ -n "$cmd" ] || { scripts/help $0; exit 1; } case $cmd in *-all) forall ${cmd%-all} "$@" ;; -[hH]|--help|-help|help) scripts/help $0 ;; *) $cmd "$@" ;; esac } netmap-release/LINUX/scripts/vers000755 000765 000024 00000006073 12227500737 017542 0ustar00luigistaff000000 000000 #!/usr/bin/perl ## Simple stack-based RPN calculator for linux version numbers. ## Usage: ## ## scripts/vers [operand|operation ...] ## ## Operations all start with '-', everything else is an operand ## and is pushed on the stack as-is. ## When all arguments have been processed, the content of the ## top of the stack is printed on stdout and the script ends. ## ## Available operations: sub badversion { my $v = shift; die "Bad version $v"; } sub conv { my $v = shift; return sprintf "%x%02x%02x", (split /\./, $v); } sub rconv { my $v = shift; $v =~ /(.*)(..)(..)$/; if ($1 > 2 && $3 == 0) { return sprintf "%d.%d", (hex $1), (hex $2); } return sprintf "%d.%d.%d", (hex $1), (hex $2), (hex $3); } sub next { my $v = shift; my ($may, $min, $sub) = split /\./, $v; if ($may < 2 || ($may == 2 && $min != 6)) { &badversion($v); } if ($may == 2) { if ($sub < 39) { return "2.6." . ($sub + 1); } elsif ($sub == 39) { return "3.0"; } else { &badversion($v); } } else { return "$may." . ($min + 1); } } @ARGV or do { system("scripts/help $0"); exit 1; }; for (@ARGV) { ## ## -b (nullary) suppress normal output. On exit, return 1 ## if stack top is "false", 0 otherwise. /^-b$/ && do { $silent=1; next; }; ## ## -c (unary) convert from dot to fixed notation /^-c$/ && do { $v = pop @stack; push @stack, &conv($v); next; }; ## ## -C (unary) convert from fixed to dot notation /^-C$/ && do { $v = pop @stack; push @stack, &rconv($v); next; }; ## ## -i (unary) increment version number ## (must be in dot notation) /^-i$/ && do { $v = pop @stack; push @stack, &next($v); next; }; ## ## -s (unary) assume the stack top is a ## string containing several fields separated ## by '--'. Replace the stack top with these ## fields (last on top) /^-s$/ && do { $v = pop @stack; push @stack, split /--/, $v; next; }; ## ## -SN (N-ary) pop N elements from the stack, ## join them using '--' as a separator ## (top as last) and push the resulting ## string /^-S(\d+)$/ && do { $n = $1; @t = @stack[-$n..-1]; while ($n--) { pop @stack; } push @stack, (join '--', @t); next; }; ## ## -p (unary) pop /^-p$/ && do { pop @stack; next; }; ## ## -l (binary) push "true" if first version ## number is stricly less then second version ## number (versions in fixed notation) ## ## -L (binary) like -l, but for version numbers ## in dot notation /^-[lL]$/ && do { $v1 = pop @stack; $v2 = pop @stack; /^-L$/ && do { $v1 = &conv($v1); $v2 = &conv($v2); }; push @stack, (($v2 lt $v1) ? "true" : "false"); next; }; ## ## -a (binary) logical and. Arguments must be ## either "true" or "false". /^-a$/ && do { $v1 = pop @stack; $v2 = pop @stack; push @stack, (($v1 eq "true" && $v2 eq "true") ? "true" : "false"); next; }; ## ## -n (unary) logical not. Argument must be ## either "true" or "false". /^-n$/ && do { $v1 = pop @stack; push @stack, (($v1 eq "true") ? "false" : "true"); next; }; push @stack, $_; } $v = pop @stack; if ($silent) { exit ($v eq "false"); } print "$v\n"; netmap-release/LINUX/final-patches/diff--e1000--20620--99999000644 000765 000024 00000006170 12227500737 022630 0ustar00luigistaff000000 000000 diff --git a/e1000/e1000_main.c b/e1000/e1000_main.c index bcd192c..5de7009 100644 --- a/e1000/e1000_main.c +++ b/e1000/e1000_main.c @@ -213,6 +213,10 @@ static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * e1000_init_module - Driver Registration Routine * @@ -375,6 +379,10 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); +#ifdef DEV_NETMAP + if (e1000_netmap_init_buffers(adapter)) + return; +#endif /* DEV_NETMAP */ /* call E1000_DESC_UNUSED which always leaves * at least 1 descriptor unused to make sure * next_to_use != next_to_clean */ @@ -402,6 +410,10 @@ int e1000_up(struct e1000_adapter *adapter) netif_wake_queue(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* fire a link change interrupt to start the watchdog */ ew32(ICS, E1000_ICS_LSC); return 0; @@ -485,6 +497,10 @@ void e1000_down(struct e1000_adapter *adapter) ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + netif_tx_disable(netdev); /* disable transmits in the hardware */ @@ -1035,6 +1051,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->wol = adapter->eeprom_wol; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); +#ifdef DEV_NETMAP + e1000_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + /* print bus type/speed/width info */ DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), @@ -1113,6 +1133,10 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); + +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ iounmap(hw->hw_addr); if (hw->flash_address) @@ -1291,6 +1315,10 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif + /* fire a link status change interrupt to start the watchdog */ ew32(ICS, E1000_ICS_LSC); @@ -3429,6 +3457,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, unsigned int count = 0; unsigned int total_tx_bytes=0, total_tx_packets=0; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, 0)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -3795,6 +3827,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, bool cleaned = false; unsigned int total_rx_bytes=0, total_rx_packets=0; +#ifdef DEV_NETMAP + ND("calling netmap_rx_irq"); + if (netmap_rx_irq(netdev, 0, work_done)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; netmap-release/LINUX/final-patches/diff--e1000e--20620--20623000644 000765 000024 00000005306 12227500737 022735 0ustar00luigistaff000000 000000 diff --git a/e1000e/netdev.c b/e1000e/netdev.c index fad8f9e..50f74e2 100644 --- a/e1000e/netdev.c +++ b/e1000e/netdev.c @@ -87,6 +87,10 @@ static int e1000_desc_unused(struct e1000_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure @@ -446,6 +450,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, bool cleaned = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, 0, work_done)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; @@ -624,6 +632,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) unsigned int count = 0; unsigned int total_tx_bytes = 0, total_tx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, 0)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -2632,6 +2644,10 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); +#ifdef DEV_NETMAP + if (e1000e_netmap_init_buffers(adapter)) + return; +#endif /* DEV_NETMAP */ adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring)); } @@ -2892,6 +2908,10 @@ void e1000e_down(struct e1000_adapter *adapter) netif_stop_queue(netdev); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* disable transmits in the hardware */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; @@ -3174,6 +3194,10 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + /* fire a link status change interrupt to start the watchdog */ ew32(ICS, E1000_ICS_LSC); @@ -5227,6 +5251,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_register; +#ifdef DEV_NETMAP + e1000_netmap_attach(adapter); +#endif /* DEV_NETMAP */ /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -5300,6 +5327,10 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); netmap-release/LINUX/final-patches/diff--e1000e--20623--30100000644 000765 000024 00000005246 12227500737 022732 0ustar00luigistaff000000 000000 diff --git a/e1000e/netdev.c b/e1000e/netdev.c index 57a7e41..d8bc988 100644 --- a/e1000e/netdev.c +++ b/e1000e/netdev.c @@ -435,6 +435,10 @@ static int e1000_desc_unused(struct e1000_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure @@ -763,6 +767,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, bool cleaned = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, 0, work_done)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; @@ -977,6 +985,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) unsigned int count = 0; unsigned int total_tx_bytes = 0, total_tx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, 0)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -3001,6 +3013,10 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); +#ifdef DEV_NETMAP + if (e1000e_netmap_init_buffers(adapter)) + return; +#endif /* DEV_NETMAP */ adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring)); } @@ -3240,6 +3256,10 @@ void e1000e_down(struct e1000_adapter *adapter) netif_stop_queue(netdev); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* disable transmits in the hardware */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; @@ -3532,6 +3552,10 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + adapter->idle_check = true; pm_runtime_put(&pdev->dev); @@ -5716,6 +5740,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_register; +#ifdef DEV_NETMAP + e1000_netmap_attach(adapter); +#endif /* DEV_NETMAP */ /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -5813,6 +5840,10 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); netmap-release/LINUX/final-patches/diff--e1000e--30100--30400000644 000765 000024 00000005274 12225031332 022711 0ustar00luigistaff000000 000000 diff --git a/e1000e/netdev.c b/e1000e/netdev.c index 2198e61..caf2767 100644 --- a/e1000e/netdev.c +++ b/e1000e/netdev.c @@ -452,6 +452,10 @@ static int e1000_desc_unused(struct e1000_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure @@ -849,6 +853,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, bool cleaned = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, 0, work_done)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); buffer_info = &rx_ring->buffer_info[i]; @@ -1066,6 +1074,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) unsigned int count = 0; unsigned int total_tx_bytes = 0, total_tx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, 0)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -3177,6 +3189,10 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); +#ifdef DEV_NETMAP + if (e1000e_netmap_init_buffers(adapter)) + return; +#endif /* DEV_NETMAP */ adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), GFP_KERNEL); } @@ -3468,6 +3484,10 @@ void e1000e_down(struct e1000_adapter *adapter) netif_stop_queue(netdev); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* disable transmits in the hardware */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; @@ -3755,6 +3775,10 @@ static int e1000_open(struct net_device *netdev) netif_start_queue(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + adapter->idle_check = true; pm_runtime_put(&pdev->dev); @@ -6147,6 +6171,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_register; +#ifdef DEV_NETMAP + e1000_netmap_attach(adapter); +#endif /* DEV_NETMAP */ /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -6234,6 +6261,10 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); netmap-release/LINUX/final-patches/diff--e1000e--30400--30900000644 000765 000024 00000005410 12225031332 022711 0ustar00luigistaff000000 000000 diff --git a/e1000e/netdev.c b/e1000e/netdev.c index 9520a6a..f6f2df6 100644 --- a/e1000e/netdev.c +++ b/e1000e/netdev.c @@ -467,6 +467,10 @@ static int e1000_desc_unused(struct e1000_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure @@ -875,6 +879,10 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, 0, work_done)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -1129,6 +1137,10 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) unsigned int total_tx_bytes = 0, total_tx_packets = 0; unsigned int bytes_compl = 0, pkts_compl = 0; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, 0)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -3358,6 +3370,10 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000e_setup_rss_hash(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); +#ifdef DEV_NETMAP + if (e1000e_netmap_init_buffers(adapter)) + return; +#endif /* DEV_NETMAP */ adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); } @@ -3657,6 +3673,10 @@ void e1000e_down(struct e1000_adapter *adapter) netif_stop_queue(netdev); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* disable transmits in the hardware */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; @@ -3946,6 +3966,10 @@ static int e1000_open(struct net_device *netdev) adapter->tx_hang_recheck = false; netif_start_queue(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + adapter->idle_check = true; pm_runtime_put(&pdev->dev); @@ -6417,6 +6441,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (err) goto err_register; +#ifdef DEV_NETMAP + e1000_netmap_attach(adapter); +#endif /* DEV_NETMAP */ /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -6504,6 +6531,10 @@ static void __devexit e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); netmap-release/LINUX/final-patches/diff--e1000e--30900--99999000644 000765 000024 00000005477 12225031332 022774 0ustar00luigistaff000000 000000 diff --git a/e1000e/netdev.c b/e1000e/netdev.c index 7e615e2..f9d8a88 100644 --- a/e1000e/netdev.c +++ b/e1000e/netdev.c @@ -473,6 +473,10 @@ static int e1000_desc_unused(struct e1000_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp * @adapter: board private structure @@ -914,6 +918,10 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, bool cleaned = false; unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, 0, work_done)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -1203,6 +1211,10 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) unsigned int total_tx_bytes = 0, total_tx_packets = 0; unsigned int bytes_compl = 0, pkts_compl = 0; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, 0)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -3685,6 +3697,10 @@ static void e1000_configure(struct e1000_adapter *adapter) e1000e_setup_rss_hash(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); +#ifdef DEV_NETMAP + if (e1000e_netmap_init_buffers(adapter)) + return; +#endif /* DEV_NETMAP */ adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); } @@ -3988,6 +4004,10 @@ void e1000e_down(struct e1000_adapter *adapter) netif_stop_queue(netdev); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* disable transmits in the hardware */ tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; @@ -4307,6 +4327,10 @@ static int e1000_open(struct net_device *netdev) adapter->tx_hang_recheck = false; netif_start_queue(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + adapter->idle_check = true; hw->mac.get_link_status = true; pm_runtime_put(&pdev->dev); @@ -6768,6 +6792,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_register; +#ifdef DEV_NETMAP + e1000_netmap_attach(adapter); +#endif /* DEV_NETMAP */ /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -6866,6 +6893,10 @@ static void e1000_remove(struct pci_dev *pdev) kfree(adapter->tx_ring); kfree(adapter->rx_ring); +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); netmap-release/LINUX/final-patches/diff--forcedeth.c--20626--99999000644 000765 000024 00000003750 12227500737 024276 0ustar00luigistaff000000 000000 diff --git a/forcedeth.c b/forcedeth.c index 9c0b1ba..b081d6b 100644 --- a/forcedeth.c +++ b/forcedeth.c @@ -1865,12 +1865,25 @@ static void nv_init_tx(struct net_device *dev) } } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* we need a few forward declarations */ +static void nv_drain_rxtx(struct net_device *dev); +static int nv_init_ring(struct net_device *dev); +#include +#endif + static int nv_init_ring(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); nv_init_tx(dev); nv_init_rx(dev); +#ifdef DEV_NETMAP + forcedeth_netmap_tx_init(np); + if (forcedeth_netmap_rx_init(np)) + return 0; /* success */ +#endif /* DEV_NETMAP */ + if (!nv_optimized(np)) return nv_alloc_rx(dev); @@ -3386,6 +3399,11 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) int i; unsigned long flags; +#ifdef DEV_NETMAP + if (netmap_tx_irq(dev, 0)) + return IRQ_HANDLED; +#endif /* DEV_NETMAP */ + for (i = 0;; i++) { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); @@ -3497,6 +3515,11 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) int i; unsigned long flags; +#ifdef DEV_NETMAP + if (netmap_rx_irq(dev, 0, &i)) + return IRQ_HANDLED; +#endif /* DEV_NETMAP */ + for (i = 0;; i++) { events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); @@ -5645,6 +5668,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } +#ifdef DEV_NETMAP + forcedeth_netmap_attach(np); +#endif /* DEV_NETMAP */ + netif_carrier_off(dev); dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", @@ -5728,6 +5755,10 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) unregister_netdev(dev); +#ifdef DEV_NETMAP + netmap_detach(dev); +#endif /* DEV_NETMAP */ + nv_restore_mac_addr(pci_dev); /* restore any phy related changes */ netmap-release/LINUX/final-patches/diff--igb--20620--20621000644 000765 000024 00000006735 12227500737 022611 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index 714c3a4..5ef47a5 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -235,6 +235,10 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + /** * Scale the NIC clock cycle by a large factor so that * relatively small clock corrections can be added or @@ -991,8 +995,13 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* Fire a link change interrupt to start the watchdog. */ wr32(E1000_ICS, E1000_ICS_LSC); + return 0; } @@ -1012,6 +1021,10 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -1502,6 +1515,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); +#ifdef DEV_NETMAP + igb_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -1640,6 +1657,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } #endif +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -1792,6 +1813,10 @@ static int igb_open(struct net_device *netdev) /* Fire a link status change interrupt to start the watchdog. */ wr32(E1000_ICS, E1000_ICS_LSC); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + return 0; err_req_irq: @@ -1942,6 +1967,9 @@ static void igb_configure_tx(struct igb_adapter *adapter) txdctl = rd32(E1000_TXDCTL(j)); txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(j), txdctl); +#ifdef DEV_NETMAP + igb_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ /* Turn off Relaxed Ordering on head write-backs. The * writebacks MUST be delivered in order or it will @@ -4448,6 +4476,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) unsigned int i, eop, count = 0; bool cleaned = false; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, tx_ring->queue_index)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ + i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); @@ -4629,6 +4662,11 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, u32 staterr; u16 length; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, rx_ring->queue_index, work_done)) + return 1; +#endif /* DEV_NETMAP */ + i = rx_ring->next_to_clean; buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); @@ -4804,6 +4842,10 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, unsigned int i; int bufsz; +#ifdef DEV_NETMAP + if (igb_netmap_configure_rx_ring(rx_ring)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; netmap-release/LINUX/final-patches/diff--igb--20621--20623000644 000765 000024 00000001771 12227500737 022607 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index c881347..77b3fda 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -1144,6 +1144,10 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1167,6 +1171,10 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -2018,6 +2026,10 @@ static int igb_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); netmap-release/LINUX/final-patches/diff--igb--20623--30200000644 000765 000024 00000006312 12227500737 022575 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index cea37e0..70777e4 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -201,6 +201,10 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + struct igb_reg_info { u32 ofs; char *name; @@ -1478,6 +1482,10 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1501,6 +1509,10 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -1963,6 +1975,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); +#ifdef DEV_NETMAP + igb_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -2072,6 +2088,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) dev_info(&pdev->dev, "IOV Disabled\n"); } #endif +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(hw->hw_addr); if (hw->flash_address) @@ -2366,6 +2386,10 @@ static int igb_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -2545,6 +2569,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); +#ifdef DEV_NETMAP + igb_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } /** @@ -5338,6 +5365,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) unsigned int i, eop, count = 0; bool cleaned = false; +#ifdef DEV_NETMAP + if (netmap_tx_irq(netdev, tx_ring->queue_index)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ + i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); @@ -5540,6 +5572,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, u16 length; u16 vlan_tag; +#ifdef DEV_NETMAP + if (netmap_rx_irq(netdev, rx_ring->queue_index, work_done)) + return 1; +#endif /* DEV_NETMAP */ + i = rx_ring->next_to_clean; buffer_info = &rx_ring->buffer_info[i]; rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); @@ -5668,6 +5705,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count) unsigned int i; int bufsz; +#ifdef DEV_NETMAP + if (igb_netmap_configure_rx_ring(rx_ring)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; netmap-release/LINUX/final-patches/diff--igb--30200--30300000644 000765 000024 00000007701 12227500737 022571 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index ced5444..fb7c766 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -225,6 +225,10 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + struct igb_reg_info { u32 ofs; char *name; @@ -1551,6 +1555,10 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1584,6 +1592,10 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + for (i = 0; i < adapter->num_q_vectors; i++) napi_disable(&(adapter->q_vector[i]->napi)); @@ -2073,6 +2085,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); +#ifdef DEV_NETMAP + igb_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -2199,6 +2215,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) dev_info(&pdev->dev, "IOV Disabled\n"); } #endif +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(hw->hw_addr); if (hw->flash_address) @@ -2529,6 +2549,10 @@ static int igb_open(struct net_device *netdev) netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -2711,6 +2735,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); +#ifdef DEV_NETMAP + igb_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } /** @@ -3088,6 +3115,19 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; +#ifdef DEV_NETMAP + { + /* The driver uses split buffers, which are not + * supported in netmap mode */ + struct ifnet *ifp = adapter->netdev; + struct netmap_adapter *na = NA(ifp); + if (na && ifp->if_capenable & IFCAP_NETMAP) { + srrctl &= ~(7 << 25); /* clear descriptor type */ + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + /* XXX we should set tail here */ + } + } +#endif wr32(E1000_SRRCTL(reg_idx), srrctl); @@ -5705,6 +5745,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) if (test_bit(__IGB_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + if (netmap_tx_irq(tx_ring->netdev, tx_ring->queue_index)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IGB_TX_DESC(tx_ring, i); @@ -5980,6 +6024,12 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) u16 cleaned_count = igb_desc_unused(rx_ring); u16 i = rx_ring->next_to_clean; +#ifdef DEV_NETMAP + int dummy = 1; // select rx irq handling + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &dummy)) + return 1; +#endif /* DEV_NETMAP */ + rx_desc = IGB_RX_DESC(rx_ring, i); while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { @@ -6170,6 +6220,11 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) struct igb_rx_buffer *bi; u16 i = rx_ring->next_to_use; +#ifdef DEV_NETMAP + if (igb_netmap_configure_rx_ring(rx_ring)) + return; +#endif /* DEV_NETMAP */ + rx_desc = IGB_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; netmap-release/LINUX/final-patches/diff--igb--30300--30800000644 000765 000024 00000007637 12227500737 022607 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index 94be6c3..294051b 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -236,6 +236,10 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + struct igb_reg_info { u32 ofs; char *name; @@ -1557,6 +1561,10 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1590,6 +1598,10 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + for (i = 0; i < adapter->num_q_vectors; i++) napi_disable(&(adapter->q_vector[i]->napi)); @@ -2081,6 +2093,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); +#ifdef DEV_NETMAP + igb_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -2211,6 +2227,10 @@ static void __devexit igb_remove(struct pci_dev *pdev) dev_info(&pdev->dev, "IOV Disabled\n"); } #endif +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + iounmap(hw->hw_addr); if (hw->flash_address) @@ -2547,6 +2567,10 @@ static int __igb_open(struct net_device *netdev, bool resuming) netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + if (!resuming) pm_runtime_put(&pdev->dev); @@ -2750,6 +2774,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); +#ifdef DEV_NETMAP + igb_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } /** @@ -3127,6 +3154,19 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; +#ifdef DEV_NETMAP + { + /* The driver uses split buffers, which are not + * supported in netmap mode */ + struct ifnet *ifp = adapter->netdev; + struct netmap_adapter *na = NA(ifp); + if (na && ifp->if_capenable & IFCAP_NETMAP) { + srrctl &= ~(7 << 25); /* clear descriptor type */ + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + /* XXX we should set tail here */ + } + } +#endif wr32(E1000_SRRCTL(reg_idx), srrctl); @@ -5753,6 +5793,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) if (test_bit(__IGB_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + if (netmap_tx_irq(tx_ring->netdev, tx_ring->queue_index)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IGB_TX_DESC(tx_ring, i); @@ -6030,6 +6074,12 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) u16 cleaned_count = igb_desc_unused(rx_ring); u16 i = rx_ring->next_to_clean; +#ifdef DEV_NETMAP + int dummy = 1; // select rx irq handling + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &dummy)) + return 1; +#endif /* DEV_NETMAP */ + rx_desc = IGB_RX_DESC(rx_ring, i); while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { @@ -6220,6 +6270,11 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) struct igb_rx_buffer *bi; u16 i = rx_ring->next_to_use; +#ifdef DEV_NETMAP + if (igb_netmap_configure_rx_ring(rx_ring)) + return; +#endif /* DEV_NETMAP */ + rx_desc = IGB_RX_DESC(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; netmap-release/LINUX/final-patches/diff--igb--30800--30b00000644 000765 000024 00000006347 12227500737 022663 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index 31cfe2e..8439bc6 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -247,6 +247,10 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + struct igb_reg_info { u32 ofs; char *name; @@ -1520,6 +1524,10 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1553,6 +1561,10 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ + for (i = 0; i < adapter->num_q_vectors; i++) napi_disable(&(adapter->q_vector[i]->napi)); @@ -2127,6 +2139,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); +#ifdef DEV_NETMAP + igb_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -2233,6 +2249,10 @@ static void igb_remove(struct pci_dev *pdev) wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } #endif +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -2553,6 +2573,10 @@ static int __igb_open(struct net_device *netdev, bool resuming) netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + if (!resuming) pm_runtime_put(&pdev->dev); @@ -2746,6 +2770,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); +#ifdef DEV_NETMAP + igb_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } /** @@ -5690,6 +5717,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) if (test_bit(__IGB_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + if (netmap_tx_irq(tx_ring->netdev, tx_ring->queue_index)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IGB_TX_DESC(tx_ring, i); @@ -6349,6 +6380,10 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); +#ifdef DEV_NETMAP + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &total_packets)) + return true; +#endif /* DEV_NETMAP */ do { union e1000_adv_rx_desc *rx_desc; @@ -6461,6 +6496,11 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) struct igb_rx_buffer *bi; u16 i = rx_ring->next_to_use; +#ifdef DEV_NETMAP + if (igb_netmap_configure_rx_ring(rx_ring)) + return; +#endif /* DEV_NETMAP */ + /* nothing to do */ if (!cleaned_count) return; netmap-release/LINUX/final-patches/diff--igb--30b00--99999000644 000765 000024 00000006536 12227500737 022725 0ustar00luigistaff000000 000000 diff --git a/igb/igb_main.c b/igb/igb_main.c index c1d72c0..7d00631 100644 --- a/igb/igb_main.c +++ b/igb/igb_main.c @@ -255,6 +255,10 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + struct igb_reg_info { u32 ofs; char *name; @@ -1633,6 +1637,10 @@ int igb_up(struct igb_adapter *adapter) netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif /* DEV_NETMAP */ + /* start the watchdog. */ hw->mac.get_link_status = 1; schedule_work(&adapter->watchdog_task); @@ -1671,9 +1679,11 @@ void igb_down(struct igb_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { napi_synchronize(&(adapter->q_vector[i]->napi)); - napi_disable(&(adapter->q_vector[i]->napi)); } +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif /* DEV_NETMAP */ del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -2295,6 +2305,10 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); +#ifdef DEV_NETMAP + igb_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -2536,6 +2550,10 @@ static void igb_remove(struct pci_dev *pdev) wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); } #endif +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -2814,6 +2832,10 @@ static int __igb_open(struct net_device *netdev, bool resuming) netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(netdev); +#endif /* DEV_NETMAP */ + if (!resuming) pm_runtime_put(&pdev->dev); @@ -3007,6 +3029,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); +#ifdef DEV_NETMAP + igb_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } /** @@ -5991,6 +6016,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) if (test_bit(__IGB_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + if (netmap_tx_irq(tx_ring->netdev, tx_ring->queue_index)) + return 1; /* cleaned ok */ +#endif /* DEV_NETMAP */ tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IGB_TX_DESC(tx_ring, i); @@ -6650,6 +6679,10 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); +#ifdef DEV_NETMAP + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &total_packets)) + return true; +#endif /* DEV_NETMAP */ do { union e1000_adv_rx_desc *rx_desc; @@ -6767,6 +6800,11 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) struct igb_rx_buffer *bi; u16 i = rx_ring->next_to_use; +#ifdef DEV_NETMAP + if (igb_netmap_configure_rx_ring(rx_ring)) + return; +#endif /* DEV_NETMAP */ + /* nothing to do */ if (!cleaned_count) return; netmap-release/LINUX/final-patches/diff--ixgbe--20623--20625000644 000765 000024 00000007457 12227500737 023157 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index 74d9b6d..2ae47cf 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -214,6 +214,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -741,6 +757,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, unsigned int i, eop, count = 0; unsigned int total_bytes = 0, total_packets = 0; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); @@ -1187,6 +1213,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, int ddp_bytes = 0; #endif /* IXGBE_FCOE */ +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + if (netmap_rx_irq(adapter->netdev, rx_ring->queue_index, work_done)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -3159,6 +3192,12 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); +#ifdef DEV_NETMAP + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_netmap_configure_tx_ring(adapter, + adapter->rx_ring[i].reg_idx); + return; +#endif /* DEV_NETMAP */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i], (adapter->rx_ring[i]->count - 1)); @@ -3390,6 +3429,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) DPRINTK(DRV, ERR, "Could not enable " "Tx Queue %d\n", j); } +#ifdef DEV_NETMAP // XXX i and j are the same ? + ixgbe_netmap_configure_tx_ring(adapter, j); +#endif /* DEV_NETMAP */ + } for (i = 0; i < num_rx_rings; i++) { @@ -3476,6 +3519,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -3718,6 +3765,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) cancel_work_sync(&adapter->fdir_reinit_task); @@ -6833,6 +6884,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--20625--20626000644 000765 000024 00000007343 12227500737 023154 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index eee0b29..70581eb 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -214,6 +214,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -740,6 +756,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, unsigned int i, eop, count = 0; unsigned int total_bytes = 0, total_packets = 0; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); @@ -1185,6 +1211,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, int ddp_bytes = 0; #endif /* IXGBE_FCOE */ +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + if (netmap_rx_irq(adapter->netdev, rx_ring->queue_index, work_done)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -2519,6 +2552,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -2833,6 +2869,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); } @@ -3614,6 +3654,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -3863,6 +3907,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* Cleanup the affinity_hint CPU mask memory and callback */ for (i = 0; i < num_q_vectors; i++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; @@ -7048,6 +7096,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--20626--30000000644 000765 000024 00000007327 12227500737 023142 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index 30f9ccf..60c0252 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -221,6 +221,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -826,6 +842,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, unsigned int total_bytes = 0, total_packets = 0; u16 i, eop, count = 0; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); @@ -1308,6 +1334,13 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, u16 cleaned_count = 0; bool pkt_is_rsc = false; +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + if (netmap_rx_irq(adapter->netdev, rx_ring->queue_index, work_done)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -2730,6 +2763,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3094,6 +3130,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); } @@ -3882,6 +3922,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4121,6 +4165,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + /* Cleanup the affinity_hint CPU mask memory and callback */ for (i = 0; i < num_q_vectors; i++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; @@ -7450,6 +7498,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--30000--30100000644 000765 000024 00000007310 12227500737 023116 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index 08e8e25..8070930 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -247,6 +247,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -864,6 +880,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, unsigned int total_bytes = 0, total_packets = 0; u16 i, eop, count = 0; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); @@ -1348,6 +1374,13 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, u16 cleaned_count = 0; bool pkt_is_rsc = false; +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + if (netmap_rx_irq(adapter->netdev, rx_ring->queue_index, work_done)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -2808,6 +2841,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3183,6 +3219,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); } @@ -3976,6 +4016,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4212,6 +4256,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -7683,6 +7731,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--30100--30200000644 000765 000024 00000007312 12220335545 023116 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index e1fcc95..1aab0df 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -249,6 +249,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -801,6 +817,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, unsigned int total_bytes = 0, total_packets = 0; u16 i, eop, count = 0; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ + i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); @@ -1303,6 +1330,13 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, u16 cleaned_count = 0; bool pkt_is_rsc = false; +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + if (netmap_rx_irq(adapter->netdev, rx_ring->queue_index, work_done)) + return; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -2676,6 +2710,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3039,6 +3076,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } @@ -3873,6 +3914,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4126,6 +4171,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -7696,6 +7745,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--30200--30400000644 000765 000024 00000007276 12220335545 023132 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index 8ef92d1..6a37803 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -188,6 +188,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -745,6 +761,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, unsigned int budget = q_vector->tx.work_limit; u16 i = tx_ring->next_to_clean; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return true; /* seems to be ignored */ +#endif /* DEV_NETMAP */ + tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); @@ -1253,6 +1280,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, u16 cleaned_count = 0; bool pkt_is_rsc = false; +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + int dummy; + if (netmap_rx_irq(adapter->netdev, rx_ring->queue_index, &dummy)) + return true; +#endif /* DEV_NETMAP */ i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); @@ -2420,6 +2455,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -2783,6 +2821,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } @@ -3757,6 +3799,10 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4007,6 +4053,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -7710,6 +7760,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("Intel(R) 10 Gigabit Network Connection\n"); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--30400--30500000644 000765 000024 00000007473 12220335545 023134 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index 467948e..0aa1511 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -204,6 +204,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -749,6 +765,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (test_bit(__IXGBE_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ + tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -1629,6 +1656,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + int dummy; + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &dummy)) + return true; /* no more interrupts */ +#endif /* DEV_NETMAP */ + do { struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; @@ -2683,6 +2719,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3032,6 +3071,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } @@ -3986,6 +4029,10 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4249,6 +4296,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4764,6 +4815,7 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); + return 0; err_req_irq: @@ -7152,6 +7204,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_dev_info("%s\n", ixgbe_default_device_descr); cards_found++; + +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--30500--30900000644 000765 000024 00000007520 12225031332 023122 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index e242104..02e1544 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -204,6 +204,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -764,6 +780,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (test_bit(__IXGBE_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ + tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -1665,6 +1692,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + int dummy; + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &dummy)) + return true; /* no more interrupts */ +#endif /* DEV_NETMAP */ + do { struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; @@ -2725,6 +2761,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3102,6 +3141,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } @@ -4051,6 +4094,10 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4315,6 +4362,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4827,6 +4878,7 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); + return 0; err_req_irq: @@ -7358,6 +7410,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_err(probe, "failed to allocate sysfs resources\n"); #endif /* CONFIG_IXGBE_HWMON */ +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--ixgbe--30900--30a00000644 000765 000024 00000010120 12227500737 023200 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index 79f4a26..4b8a25b 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -202,6 +202,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -826,6 +842,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (test_bit(__IXGBE_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ + tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -1860,6 +1887,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + int dummy; + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &dummy)) + return true; /* no more interrupts */ +#endif /* DEV_NETMAP */ + do { union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; @@ -2846,6 +2882,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3207,6 +3246,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } @@ -4155,6 +4198,10 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4402,6 +4449,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4976,6 +5027,7 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); + return 0; err_set_queues: @@ -7619,6 +7671,10 @@ skip_sriov: ixgbe_dbg_adapter_init(adapter); #endif /* CONFIG_DEBUG_FS */ +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: @@ -7653,6 +7709,10 @@ static void ixgbe_remove(struct pci_dev *pdev) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; +#ifdef DEV_NETMAP + netmap_detach(netdev); +#endif /* DEV_NETMAP */ + #ifdef CONFIG_DEBUG_FS ixgbe_dbg_adapter_exit(adapter); #endif /*CONFIG_DEBUG_FS */ netmap-release/LINUX/final-patches/diff--ixgbe--30a00--99999000644 000765 000024 00000007407 12227500737 023257 0ustar00luigistaff000000 000000 diff --git a/ixgbe/ixgbe_main.c b/ixgbe/ixgbe_main.c index d30fbdd..7418c57 100644 --- a/ixgbe/ixgbe_main.c +++ b/ixgbe/ixgbe_main.c @@ -248,6 +248,22 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {} }; +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap_linux.h . + * + * The code is originally developed on FreeBSD and in the interest + * of maintainability we try to limit differences between the two systems. + * + * contains functions for netmap support + * that extend the standard driver. + * It also defines DEV_NETMAP so further conditional sections use + * that instead of CONFIG_NETMAP + */ +#include +#endif /* * ixgbe_regdump - register printout routine @@ -872,6 +888,17 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (test_bit(__IXGBE_DOWN, &adapter->state)) return true; +#ifdef DEV_NETMAP + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + */ + if (netmap_tx_irq(adapter->netdev, tx_ring->queue_index)) + return 1; /* seems to be ignored */ +#endif /* DEV_NETMAP */ + tx_buffer = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBE_TX_DESC(tx_ring, i); i -= tx_ring->count; @@ -1906,6 +1933,15 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); +#ifdef DEV_NETMAP + /* + * Same as the txeof routine: only wakeup clients on intr. + */ + int dummy; + if (netmap_rx_irq(rx_ring->netdev, rx_ring->queue_index, &dummy)) + return true; /* no more interrupts */ +#endif /* DEV_NETMAP */ + do { union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; @@ -2905,6 +2941,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +#ifdef DEV_NETMAP + ixgbe_netmap_configure_tx_ring(adapter, reg_idx); +#endif /* DEV_NETMAP */ } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3266,6 +3305,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); +#ifdef DEV_NETMAP + if (ixgbe_netmap_configure_rx_ring(adapter, reg_idx)) + return; +#endif /* DEV_NETMAP */ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } @@ -4216,6 +4259,10 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); +#ifdef DEV_NETMAP + netmap_enable_all_rings(adapter->netdev); +#endif + /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; @@ -4463,6 +4510,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); +#ifdef DEV_NETMAP + netmap_disable_all_rings(netdev); +#endif + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | IXGBE_FLAG2_RESET_REQUESTED); adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; @@ -5037,6 +5088,7 @@ static int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); + return 0; err_set_queues: @@ -7658,6 +7710,10 @@ skip_sriov: IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ + return 0; err_register: netmap-release/LINUX/final-patches/diff--r8169.c--20620--20625000644 000765 000024 00000005567 12230530510 023052 0ustar00luigistaff000000 000000 diff --git a/r8169.c b/r8169.c index 0fe2fc9..efee0a4 100644 --- a/r8169.c +++ b/r8169.c @@ -537,6 +537,10 @@ static int rtl8169_poll(struct napi_struct *napi, int budget); static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + static void mdio_write(void __iomem *ioaddr, int reg_addr, int value) { int i; @@ -3210,6 +3214,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); +#ifdef DEV_NETMAP + re_netmap_attach(tp); +#endif /* DEV_NETMAP */ + out: return rc; @@ -3236,6 +3244,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) unregister_netdev(dev); +#ifdef DEV_NETMAP + netmap_detach(dev); +#endif /* DEV_NETMAP */ + /* restore original MAC address */ rtl_rar_set(tp, dev->perm_addr); @@ -3291,6 +3303,10 @@ static int rtl8169_open(struct net_device *dev) napi_enable(&tp->napi); +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl_hw_start(dev); rtl8169_request_timer(dev); @@ -3993,6 +4009,11 @@ err_out: static void rtl8169_rx_clear(struct rtl8169_private *tp) { unsigned int i; +#ifdef DEV_NETMAP + re_netmap_tx_init(tp); + if (re_netmap_rx_init(tp)) + return 0; // success +#endif /* DEV_NETMAP */ for (i = 0; i < NUM_RX_DESC; i++) { if (tp->Rx_skbuff[i]) { @@ -4112,11 +4133,19 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl8169_irq_mask_and_ack(ioaddr); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); napi_enable(&tp->napi); + +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ } static void rtl8169_reinit_task(struct work_struct *work) @@ -4372,6 +4401,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, { unsigned int dirty_tx, tx_left; +#ifdef DEV_NETMAP + if (netmap_tx_irq(dev, 0)) + return; +#endif /* DEV_NETMAP */ + dirty_tx = tp->dirty_tx; smp_rmb(); tx_left = tp->cur_tx - dirty_tx; @@ -4468,6 +4502,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, unsigned int cur_rx, rx_left; unsigned int delta, count; +#ifdef DEV_NETMAP + if (netmap_rx_irq(dev, 0, &count)) + return count; +#endif /* DEV_NETMAP */ + cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = min(rx_left, budget); @@ -4687,7 +4726,12 @@ static void rtl8169_down(struct net_device *dev) napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + core_down: + spin_lock_irq(&tp->lock); rtl8169_asic_down(ioaddr); netmap-release/LINUX/final-patches/diff--r8169.c--20625--20626000644 000765 000024 00000005704 12230530510 023051 0ustar00luigistaff000000 000000 diff --git a/r8169.c b/r8169.c index 53b13de..745a59d 100644 --- a/r8169.c +++ b/r8169.c @@ -535,6 +535,10 @@ static int rtl8169_poll(struct napi_struct *napi, int budget); static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + static void mdio_write(void __iomem *ioaddr, int reg_addr, int value) { int i; @@ -3229,6 +3233,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); +#ifdef DEV_NETMAP + re_netmap_attach(tp); +#endif /* DEV_NETMAP */ + out: return rc; @@ -3257,6 +3265,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); +#ifdef DEV_NETMAP + netmap_detach(dev); +#endif /* DEV_NETMAP */ + /* restore original MAC address */ rtl_rar_set(tp, dev->perm_addr); @@ -3303,6 +3315,10 @@ static int rtl8169_open(struct net_device *dev) napi_enable(&tp->napi); +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl_hw_start(dev); rtl8169_request_timer(dev); @@ -4018,6 +4034,11 @@ static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) static int rtl8169_rx_fill(struct rtl8169_private *tp) { unsigned int i; +#ifdef DEV_NETMAP + re_netmap_tx_init(tp); + if (re_netmap_rx_init(tp)) + return 0; // success +#endif /* DEV_NETMAP */ for (i = 0; i < NUM_RX_DESC; i++) { void *data; @@ -4119,11 +4140,19 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl8169_irq_mask_and_ack(ioaddr); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); napi_enable(&tp->napi); + +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ } static void rtl8169_reinit_task(struct work_struct *work) @@ -4395,6 +4424,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, { unsigned int dirty_tx, tx_left; +#ifdef DEV_NETMAP + if (netmap_tx_irq(dev, 0)) + return; +#endif /* DEV_NETMAP */ + dirty_tx = tp->dirty_tx; smp_rmb(); tx_left = tp->cur_tx - dirty_tx; @@ -4490,6 +4524,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, unsigned int count; int polling = (budget != ~(u32)0) ? 1 : 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(dev, 0, &count)) + return count; +#endif /* DEV_NETMAP */ + cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = min(rx_left, budget); @@ -4691,6 +4730,10 @@ static void rtl8169_down(struct net_device *dev) napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + spin_lock_irq(&tp->lock); rtl8169_asic_down(ioaddr); netmap-release/LINUX/final-patches/diff--r8169.c--20626--30200000644 000765 000024 00000005651 12230530510 023040 0ustar00luigistaff000000 000000 diff --git a/r8169.c b/r8169.c index 7ffdb80..6bae7e6 100644 --- a/r8169.c +++ b/r8169.c @@ -590,6 +590,10 @@ static int rtl8169_poll(struct napi_struct *napi, int budget); static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { void __iomem *ioaddr = tp->mmio_addr; @@ -3207,6 +3211,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); +#ifdef DEV_NETMAP + re_netmap_attach(tp); +#endif /* DEV_NETMAP */ + netif_carrier_off(dev); out: @@ -3238,6 +3246,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&tp->task); rtl_release_firmware(tp); +#ifdef DEV_NETMAP + netmap_detach(dev); +#endif /* DEV_NETMAP */ unregister_netdev(dev); @@ -3291,6 +3302,10 @@ static int rtl8169_open(struct net_device *dev) napi_enable(&tp->napi); +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl8169_init_phy(dev, tp); /* @@ -4074,6 +4089,11 @@ static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) static int rtl8169_rx_fill(struct rtl8169_private *tp) { unsigned int i; +#ifdef DEV_NETMAP + re_netmap_tx_init(tp); + if (re_netmap_rx_init(tp)) + return 0; // success +#endif /* DEV_NETMAP */ for (i = 0; i < NUM_RX_DESC; i++) { void *data; @@ -4175,11 +4195,19 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl8169_irq_mask_and_ack(ioaddr); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); napi_enable(&tp->napi); + +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ } static void rtl8169_reinit_task(struct work_struct *work) @@ -4452,6 +4480,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, { unsigned int dirty_tx, tx_left; +#ifdef DEV_NETMAP + if (netmap_tx_irq(dev, 0)) + return; +#endif /* DEV_NETMAP */ + dirty_tx = tp->dirty_tx; smp_rmb(); tx_left = tp->cur_tx - dirty_tx; @@ -4547,6 +4580,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, unsigned int count; int polling = (budget != ~(u32)0) ? 1 : 0; +#ifdef DEV_NETMAP + if (netmap_rx_irq(dev, 0, &count)) + return count; +#endif /* DEV_NETMAP */ + cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = min(rx_left, budget); @@ -4769,6 +4807,10 @@ static void rtl8169_down(struct net_device *dev) napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + spin_lock_irq(&tp->lock); rtl8169_asic_down(ioaddr); netmap-release/LINUX/final-patches/diff--r8169.c--30200--30400000644 000765 000024 00000005556 12230530510 023033 0ustar00luigistaff000000 000000 diff --git a/r8169.c b/r8169.c index c8f47f1..a41e878 100644 --- a/r8169.c +++ b/r8169.c @@ -787,6 +787,10 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) } } +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +#include +#endif + static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { void __iomem *ioaddr = tp->mmio_addr; @@ -4167,6 +4171,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); +#ifdef DEV_NETMAP + re_netmap_attach(tp); +#endif /* DEV_NETMAP */ + netif_carrier_off(dev); out: @@ -4201,6 +4209,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) unregister_netdev(dev); rtl_release_firmware(tp); +#ifdef DEV_NETMAP + netmap_detach(dev); +#endif /* DEV_NETMAP */ if (pci_dev_run_wake(pdev)) pm_runtime_get_noresume(&pdev->dev); @@ -4298,6 +4309,10 @@ static int rtl8169_open(struct net_device *dev) napi_enable(&tp->napi); +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl8169_init_phy(dev, tp); rtl8169_set_features(dev, dev->features); @@ -5252,6 +5267,11 @@ static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) static int rtl8169_rx_fill(struct rtl8169_private *tp) { unsigned int i; +#ifdef DEV_NETMAP + re_netmap_tx_init(tp); + if (re_netmap_rx_init(tp)) + return 0; // success +#endif /* DEV_NETMAP */ for (i = 0; i < NUM_RX_DESC; i++) { void *data; @@ -5348,11 +5368,19 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + rtl8169_irq_mask_and_ack(tp); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); napi_enable(&tp->napi); + +#ifdef DEV_NETMAP + netmap_enable_all_rings(dev); +#endif /* DEV_NETMAP */ } static void rtl8169_reinit_task(struct work_struct *work) @@ -5627,6 +5655,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev, { unsigned int dirty_tx, tx_left; +#ifdef DEV_NETMAP + if (netmap_tx_irq(dev, 0)) + return; +#endif /* DEV_NETMAP */ + dirty_tx = tp->dirty_tx; smp_rmb(); tx_left = tp->cur_tx - dirty_tx; @@ -5714,6 +5747,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, unsigned int cur_rx, rx_left; unsigned int count; +#ifdef DEV_NETMAP + if (netmap_rx_irq(dev, 0, &count)) + return count; +#endif /* DEV_NETMAP */ + cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = min(rx_left, budget); @@ -5920,6 +5958,10 @@ static void rtl8169_down(struct net_device *dev) napi_disable(&tp->napi); +#ifdef DEV_NETMAP + netmap_disable_all_rings(dev); +#endif /* DEV_NETMAP */ + spin_lock_irq(&tp->lock); rtl8169_hw_reset(tp);