@@ -40,8 +40,8 @@ get_next_co(struct gk_co *this_co)
4040 return list_next_entry (this_co , co_list );
4141}
4242
43- static void
44- yield_next (struct gk_co * this_co )
43+ void
44+ gk_yield_next (struct gk_co * this_co )
4545{
4646 struct gk_co * next_co = get_next_co (this_co );
4747 if (unlikely (this_co == next_co ))
@@ -156,7 +156,7 @@ parse_front_pkt(struct gk_co *this_co,
156156
157157 /* TODO Does this prefetch improve performance?
158158 rte_mbuf_prefetch_part1_non_temporal(pkt);
159- yield_next (this_co);
159+ gk_yield_next (this_co);
160160 */
161161 /*
162162 * This prefetch is enough to load Ethernet header (14 bytes),
@@ -167,7 +167,7 @@ parse_front_pkt(struct gk_co *this_co,
167167 * IPv6: 14 + 8 + 40 = 62
168168 */
169169 rte_prefetch_non_temporal (rte_pktmbuf_mtod_offset (pkt , void * , 0 ));
170- yield_next (this_co );
170+ gk_yield_next (this_co );
171171
172172 ret = extract_packet_info (pkt , packet );
173173 if (ret < 0 ) {
@@ -335,6 +335,10 @@ gk_process_request(struct gk_co *this_co, struct flow_entry *fe,
335335
336336 /* The assigned priority is @priority. */
337337
338+ /* Prepare packet for transmission. */
339+ if (likely (rte_mbuf_prefetch_part2_non_temporal (pkt )))
340+ gk_yield_next (this_co );
341+
338342 /* Encapsulate the packet as a request. */
339343 ret = encapsulate (pkt , priority , back , & fib -> u .grantor .gt_addr );
340344 if (ret < 0 )
@@ -398,6 +402,10 @@ gk_process_granted(struct gk_co *this_co, struct flow_entry *fe,
398402 priority = PRIORITY_RENEW_CAP ;
399403 }
400404
405+ /* Prepare packet for transmission. */
406+ if (likely (rte_mbuf_prefetch_part2_non_temporal (pkt )))
407+ gk_yield_next (this_co );
408+
401409 /*
402410 * Encapsulate packet as a granted packet,
403411 * mark it as a capability renewal request if @renew_cap is true,
@@ -447,7 +455,6 @@ gk_process_bpf(struct gk_co *this_co, struct flow_entry *fe,
447455{
448456 struct rte_mbuf * pkt = packet -> pkt ;
449457 struct gk_co_work * work = this_co -> work ;
450- struct gk_config * gk_conf = work -> gk_conf ;
451458 struct gk_measurement_metrics * stats ;
452459 uint64_t bpf_ret ;
453460 int program_index , rc ;
@@ -457,7 +464,7 @@ gk_process_bpf(struct gk_co *this_co, struct flow_entry *fe,
457464 goto expired ;
458465
459466 program_index = fe -> program_index ;
460- rc = gk_bpf_decide_pkt (gk_conf , program_index , fe , packet , now ,
467+ rc = gk_bpf_decide_pkt (this_co , program_index , fe , packet , now ,
461468 & bpf_ret );
462469 if (unlikely (rc != 0 )) {
463470 GK_LOG (WARNING ,
@@ -477,7 +484,7 @@ gk_process_bpf(struct gk_co *this_co, struct flow_entry *fe,
477484 * packet header space.
478485 */
479486 if (pkt_copy_cached_eth_header (pkt , eth_cache ,
480- gk_conf -> net -> back .l2_len_out ))
487+ work -> gk_conf -> net -> back .l2_len_out ))
481488 goto drop_pkt ;
482489
483490 stats -> pkts_num_granted ++ ;
@@ -890,7 +897,7 @@ static void
890897prefetch_and_yield (void * addr , void * this_co )
891898{
892899 rte_prefetch_non_temporal (addr );
893- yield_next (this_co );
900+ gk_yield_next (this_co );
894901}
895902
896903static void
@@ -922,7 +929,7 @@ gk_co_process_front_pkt_final(struct gk_co *this_co, struct gk_co_task *task)
922929 fe = & work -> instance -> ip_flow_entry_table [ret ];
923930 /* TODO Break this prefetch into part1 and part2. */
924931 prefetch_flow_entry (fe );
925- yield_next (this_co );
932+ gk_yield_next (this_co );
926933 process_flow_entry (this_co , fe , packet );
927934 save_fe_leftover (work , fe );
928935 return ;
@@ -1039,7 +1046,7 @@ gk_co_scan_flow_table(struct gk_co *this_co, struct gk_co_task *task)
10391046 * check if it's expired.
10401047 */
10411048 rte_prefetch_non_temporal (fe );
1042- yield_next (this_co );
1049+ gk_yield_next (this_co );
10431050
10441051 if (!fe -> in_use || !is_flow_expired (fe , rte_rdtsc ()))
10451052 return ;
0 commit comments