@@ -40,8 +40,8 @@ get_next_co(struct gk_co *this_co)
4040 return list_next_entry (this_co , cos_list );
4141}
4242
43- static void
44- yield_next (struct gk_co * this_co )
43+ void
44+ gk_yield_next (struct gk_co * this_co )
4545{
4646 struct gk_co * next_co = get_next_co (this_co );
4747 if (unlikely (this_co == next_co ))
@@ -177,7 +177,7 @@ parse_front_pkt(struct gk_co *this_co,
177177 * IPv6: 14 + 8 + 40 = 62
178178 */
179179 rte_prefetch0 (rte_pktmbuf_mtod_offset (pkt , void * , 0 ));
180- yield_next (this_co );
180+ gk_yield_next (this_co );
181181
182182 ret = extract_packet_info (pkt , packet );
183183 if (ret < 0 ) {
@@ -349,6 +349,10 @@ gk_process_request(struct gk_co *this_co, struct flow_entry *fe,
349349
350350 /* The assigned priority is @priority. */
351351
352+ /* Prepare packet for transmission. */
353+ if (likely (rte_mbuf_prefetch_part2_non_temporal (pkt )))
354+ gk_yield_next (this_co );
355+
352356 /* Encapsulate the packet as a request. */
353357 ret = encapsulate (pkt , priority , back , & fib -> u .grantor .gt_addr );
354358 if (ret < 0 )
@@ -421,6 +425,10 @@ gk_process_granted(struct gk_co *this_co, struct flow_entry *fe,
421425 priority = PRIORITY_RENEW_CAP ;
422426 }
423427
428+ /* Prepare packet for transmission. */
429+ if (likely (rte_mbuf_prefetch_part2_non_temporal (pkt )))
430+ gk_yield_next (this_co );
431+
424432 /*
425433 * Encapsulate packet as a granted packet,
426434 * mark it as a capability renewal request if @renew_cap is true,
@@ -487,7 +495,6 @@ gk_process_bpf(struct gk_co *this_co, struct flow_entry *fe,
487495{
488496 struct rte_mbuf * pkt = packet -> pkt ;
489497 struct gk_co_work * work = this_co -> work ;
490- struct gk_config * gk_conf = work -> gk_conf ;
491498 struct gk_measurement_metrics * stats ;
492499 uint64_t bpf_ret ;
493500 int program_index , rc ;
@@ -497,7 +504,7 @@ gk_process_bpf(struct gk_co *this_co, struct flow_entry *fe,
497504 goto expired ;
498505
499506 program_index = fe -> program_index ;
500- rc = gk_bpf_decide_pkt (gk_conf , program_index , fe , packet , now ,
507+ rc = gk_bpf_decide_pkt (this_co , program_index , fe , packet , now ,
501508 & bpf_ret );
502509 if (unlikely (rc != 0 )) {
503510 GK_LOG (WARNING ,
@@ -517,7 +524,7 @@ gk_process_bpf(struct gk_co *this_co, struct flow_entry *fe,
517524 * packet header space.
518525 */
519526 if (pkt_copy_cached_eth_header (pkt , eth_cache ,
520- gk_conf -> net -> back .l2_len_out ))
527+ work -> gk_conf -> net -> back .l2_len_out ))
521528 goto drop_pkt ;
522529
523530 stats -> pkts_num_granted ++ ;
@@ -996,14 +1003,14 @@ gk_co_process_front_pkt_final(struct gk_co *this_co,
9961003 /* Look up flow entry. */
9971004 rte_hash_prefetch_buckets_non_temporal (
9981005 work -> instance -> ip_flow_hash_table , ip_flow_hash_val );
999- yield_next (this_co );
1006+ gk_yield_next (this_co );
10001007 ret = rte_hash_lookup_with_hash (work -> instance -> ip_flow_hash_table ,
10011008 & packet -> flow , ip_flow_hash_val );
10021009 if (ret >= 0 ) {
10031010 fe = & work -> instance -> ip_flow_entry_table [ret ];
10041011 /* TODO Break this prefetch into part1 and part2. */
10051012 prefetch_flow_entry (fe );
1006- yield_next (this_co );
1013+ gk_yield_next (this_co );
10071014 process_flow_entry (this_co , fe , packet );
10081015 set_leftover_fe (leftover , fe );
10091016 return ;
@@ -1079,7 +1086,7 @@ gk_co_scan_flow_table_final(struct gk_co *this_co,
10791086
10801087 rte_hash_prefetch_buckets_non_temporal (instance -> ip_flow_hash_table ,
10811088 task -> task_hash );
1082- yield_next (this_co );
1089+ gk_yield_next (this_co );
10831090
10841091 gk_del_flow_entry_from_hash (instance -> ip_flow_hash_table , fe );
10851092 if (leftover -> fe == fe )
@@ -1136,7 +1143,7 @@ gk_co_scan_flow_table(struct gk_co *this_co,
11361143 * check if it's expired.
11371144 */
11381145 rte_prefetch_non_temporal (fe );
1139- yield_next (this_co );
1146+ gk_yield_next (this_co );
11401147
11411148 if (!fe -> in_use || !is_flow_expired (fe , rte_rdtsc ()))
11421149 return ;
0 commit comments