@@ -1145,16 +1145,24 @@ nm_may_forward_up(struct netmap_kring *kring)
11451145}
11461146
11471147static inline int
1148- nm_may_forward_down (struct netmap_kring * kring )
1148+ nm_may_forward_down (struct netmap_kring * kring , int sync_flags )
11491149{
11501150 return _nm_may_forward (kring ) &&
1151+ (sync_flags & NAF_CAN_FORWARD_DOWN ) &&
11511152 kring -> ring_id == kring -> na -> num_rx_rings ;
11521153}
11531154
11541155/*
11551156 * Send to the NIC rings packets marked NS_FORWARD between
1156- * kring->nr_hwcur and kring->rhead
1157- * Called under kring->rx_queue.lock on the sw rx ring,
1157+ * kring->nr_hwcur and kring->rhead.
1158+ * Called under kring->rx_queue.lock on the sw rx ring.
1159+ *
1160+ * It can only be called if the user opened all the TX hw rings,
1161+ * see NAF_CAN_FORWARD_DOWN flag.
1162+ * We can touch the TX netmap rings (slots, head and cur) since
1163+ * we are in poll/ioctl system call context, and the application
1164+ * is not supposed to touch the ring (using a different thread)
1165+ * during the execution of the system call.
11581166 */
11591167static u_int
11601168netmap_sw_to_nic (struct netmap_adapter * na )
@@ -1168,8 +1176,6 @@ netmap_sw_to_nic(struct netmap_adapter *na)
11681176
11691177 /* scan rings to find space, then fill as much as possible */
11701178 for (i = 0 ; i < na -> num_tx_rings ; i ++ ) {
1171- /* XXX some krings may not be in netmap mode,
1172- * buffers may not be there */
11731179 struct netmap_kring * kdst = & na -> tx_rings [i ];
11741180 struct netmap_ring * rdst = kdst -> ring ;
11751181 u_int const dst_lim = kdst -> nkr_num_slots - 1 ;
@@ -1197,11 +1203,9 @@ netmap_sw_to_nic(struct netmap_adapter *na)
11971203 dst -> len = tmp .len ;
11981204 dst -> flags = NS_BUF_CHANGED ;
11991205
1200- /* XXX is it safe to write head/cur concurrently to
1201- * the userspace application? */
12021206 rdst -> head = rdst -> cur = nm_next (dst_head , dst_lim );
12031207 }
1204- /* if (sent) XXX txsync ? */
1208+ /* if (sent) XXX txsync ? it would be just an optimization */
12051209 }
12061210 return sent ;
12071211}
@@ -1291,7 +1295,7 @@ netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
12911295 */
12921296 nm_i = kring -> nr_hwcur ;
12931297 if (nm_i != head ) { /* something was released */
1294- if (nm_may_forward_down (kring )) {
1298+ if (nm_may_forward_down (kring , flags )) {
12951299 ret = netmap_sw_to_nic (na );
12961300 if (ret > 0 ) {
12971301 kring -> nr_kflags |= NR_FORWARD ;
@@ -1806,6 +1810,13 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags
18061810 }
18071811 priv -> np_flags = (flags & ~NR_REG_MASK ) | reg ;
18081812
1813+ /* Allow transparent forwarding mode in the host --> nic
1814+ * direction only if all the TX hw rings have been opened. */
1815+ if (priv -> np_qfirst [NR_TX ] == 0 &&
1816+ priv -> np_qlast [NR_TX ] >= na -> num_tx_rings ) {
1817+ priv -> np_sync_flags |= NAF_CAN_FORWARD_DOWN ;
1818+ }
1819+
18091820 if (netmap_verbose ) {
18101821 D ("%s: tx [%d,%d) rx [%d,%d) id %d" ,
18111822 na -> name ,
@@ -2069,7 +2080,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
20692080 goto err_rel_excl ;
20702081
20712082 /* in all cases, create a new netmap if */
2072- nifp = netmap_mem_if_new (na );
2083+ nifp = netmap_mem_if_new (na , priv );
20732084 if (nifp == NULL ) {
20742085 error = ENOMEM ;
20752086 goto err_del_rings ;
@@ -2177,6 +2188,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread
21772188 u_int i , qfirst , qlast ;
21782189 struct netmap_if * nifp ;
21792190 struct netmap_kring * krings ;
2191+ int sync_flags ;
21802192 enum txrx t ;
21812193
21822194 if (cmd == NIOCGINFO || cmd == NIOCREGIF ) {
@@ -2387,6 +2399,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread
23872399 krings = NMR (na , t );
23882400 qfirst = priv -> np_qfirst [t ];
23892401 qlast = priv -> np_qlast [t ];
2402+ sync_flags = priv -> np_sync_flags ;
23902403
23912404 for (i = qfirst ; i < qlast ; i ++ ) {
23922405 struct netmap_kring * kring = krings + i ;
@@ -2404,7 +2417,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread
24042417 kring -> nr_hwcur );
24052418 if (nm_txsync_prologue (kring , ring ) >= kring -> nkr_num_slots ) {
24062419 netmap_ring_reinit (kring );
2407- } else if (kring -> nm_sync (kring , NAF_FORCE_RECLAIM ) == 0 ) {
2420+ } else if (kring -> nm_sync (kring , sync_flags | NAF_FORCE_RECLAIM ) == 0 ) {
24082421 nm_sync_finalize (kring );
24092422 }
24102423 if (netmap_verbose & NM_VERB_TXSYNC )
@@ -2419,7 +2432,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, struct thread
24192432 /* transparent forwarding, see netmap_poll() */
24202433 netmap_grab_packets (kring , & q , netmap_fwd );
24212434 }
2422- if (kring -> nm_sync (kring , NAF_FORCE_READ ) == 0 ) {
2435+ if (kring -> nm_sync (kring , sync_flags | NAF_FORCE_READ ) == 0 ) {
24232436 nm_sync_finalize (kring );
24242437 }
24252438 ring_timestamp_set (ring );
@@ -2518,6 +2531,7 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
25182531 * file descriptor.
25192532 */
25202533 int send_down = 0 ;
2534+ int sync_flags = priv -> np_sync_flags ;
25212535
25222536 mbq_init (& q );
25232537
@@ -2627,7 +2641,7 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
26272641 netmap_ring_reinit (kring );
26282642 revents |= POLLERR ;
26292643 } else {
2630- if (kring -> nm_sync (kring , 0 ))
2644+ if (kring -> nm_sync (kring , sync_flags ))
26312645 revents |= POLLERR ;
26322646 else
26332647 nm_sync_finalize (kring );
@@ -2691,7 +2705,7 @@ netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
26912705 * the nm_sync() below only on for the host RX ring (see
26922706 * netmap_rxsync_from_host()). */
26932707 kring -> nr_kflags &= ~NR_FORWARD ;
2694- if (kring -> nm_sync (kring , 0 ))
2708+ if (kring -> nm_sync (kring , sync_flags ))
26952709 revents |= POLLERR ;
26962710 else
26972711 nm_sync_finalize (kring );
0 commit comments