45 COMPILE_TIME_ASSERT((uintptr_t)
EM_EVENT_UNDEF == (uintptr_t)ODP_EVENT_INVALID,
46 EM_EVENT_NOT_EQUAL_TO_ODP_EVENT);
48 "EM_TMO_TYPE_NONE must be 0");
52 void print_event_info(
void);
53 em_event_t
pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool,
54 uint32_t offset, uint32_t size,
bool is_clone_part);
55 void output_queue_track(
queue_elem_t *
const output_q_elem);
56 void output_queue_drain(
const queue_elem_t *output_q_elem);
57 void output_queue_buffering_drain(
void);
59 uint32_t event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl);
60 em_status_t event_vector_max_size(em_event_t vector_event, uint32_t *max_size ,
66 static inline em_event_t
67 evhdr_init_pkt(
event_hdr_t *ev_hdr, em_event_t event,
68 odp_packet_t odp_pkt,
bool is_extev)
70 const int user_flag_set = odp_packet_user_flag(odp_pkt);
71 const bool esv_ena = esv_enabled();
76 event = ev_hdr->
event;
93 ev_hdr->
flags.all = 0;
94 ev_hdr->
event = event;
106 odp_pool_t odp_pool = odp_packet_pool(odp_pkt);
107 em_pool_t pool = pool_odp2em(odp_pool);
117 ev_hdr->
flags.all = 0;
128 const odp_packet_t odp_pkts[],
129 const int num,
bool is_extev)
131 const bool esv_ena = esv_enabled();
134 int needs_init_idx[num];
135 int needs_init_num = 0;
138 for (
int i = 0; i < num; i++) {
139 user_flag_set = odp_packet_user_flag(odp_pkts[i]);
143 events[i] = ev_hdrs[i]->
event;
146 EVSTATE__DISPATCH_MULTI);
151 needs_init_idx[needs_init_num] = i;
156 if (needs_init_num == 0)
164 for (
int i = 0; i < needs_init_num; i++) {
165 idx = needs_init_idx[i];
166 ev_hdrs[idx]->
flags.all = 0;
168 ev_hdrs[idx]->
event = events[idx];
179 if (!
em_shm->opt.esv.prealloc_pools) {
180 for (
int i = 0; i < needs_init_num; i++) {
181 idx = needs_init_idx[i];
182 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
186 for (
int i = 0; i < needs_init_num; i++) {
187 idx = needs_init_idx[i];
189 odp_pool_t odp_pool = odp_packet_pool(odp_pkts[idx]);
190 em_pool_t pool = pool_odp2em(odp_pool);
192 if (pool ==
EM_POOL_UNDEF || ev_hdrs[idx]->flags.refs_used) {
197 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
200 events[idx] =
evstate_update(events[idx], ev_hdrs[idx], is_extev);
205 for (
int i = 0; i < needs_init_num; i++) {
206 idx = needs_init_idx[i];
207 ev_hdrs[idx]->
flags.all = 0;
217 static inline em_event_t
218 evhdr_init_pktvec(
event_hdr_t *ev_hdr, em_event_t event,
219 odp_packet_vector_t odp_pktvec,
bool is_extev)
221 const int user_flag = odp_packet_vector_user_flag(odp_pktvec);
222 const bool esv_ena = esv_enabled();
227 event = ev_hdr->
event;
240 ev_hdr->
flags.all = 0;
246 ev_hdr->
event = event;
253 if (!
em_shm->opt.esv.prealloc_pools) {
257 odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvec);
258 em_pool_t pool = pool_odp2em(odp_pool);
280 const odp_packet_vector_t odp_pktvecs[],
281 const int num,
bool is_extev)
283 const bool esv_ena = esv_enabled();
285 int needs_init_idx[num];
286 int needs_init_num = 0;
289 for (
int i = 0; i < num; i++) {
290 int user_flag = odp_packet_vector_user_flag(odp_pktvecs[i]);
295 events[i] = ev_hdrs[i]->
event;
298 EVSTATE__DISPATCH_MULTI);
302 odp_packet_vector_user_flag_set(odp_pktvecs[i],
USER_FLAG_SET);
303 needs_init_idx[needs_init_num] = i;
308 if (needs_init_num == 0)
316 for (
int i = 0; i < needs_init_num; i++) {
317 idx = needs_init_idx[i];
318 ev_hdrs[idx]->
flags.all = 0;
320 ev_hdrs[idx]->
event = events[idx];
331 for (
int i = 0; i < needs_init_num; i++) {
332 idx = needs_init_idx[i];
334 ev_hdrs[idx]->
flags.all = 0;
340 if (!
em_shm->opt.esv.prealloc_pools) {
341 for (
int i = 0; i < needs_init_num; i++) {
342 idx = needs_init_idx[i];
343 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
352 for (
int i = 0; i < needs_init_num; i++) {
353 idx = needs_init_idx[i];
355 odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvecs[idx]);
356 em_pool_t pool = pool_odp2em(odp_pool);
360 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
363 events[idx] =
evstate_update(events[idx], ev_hdrs[idx], is_extev);
376 static inline em_event_t
377 event_init_odp(odp_event_t odp_event,
bool is_extev,
event_hdr_t **ev_hdr__out)
379 const odp_event_type_t odp_type = odp_event_type(odp_event);
380 em_event_t
event = event_odp2em(odp_event);
383 case ODP_EVENT_PACKET: {
384 odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
385 event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt);
388 event = evhdr_init_pkt(ev_hdr, event, odp_pkt, is_extev);
390 *ev_hdr__out = ev_hdr;
393 case ODP_EVENT_BUFFER: {
394 const bool esv_ena = esv_enabled();
396 if (!ev_hdr__out && !esv_ena)
399 odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
400 event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
403 event = ev_hdr->
event;
408 *ev_hdr__out = ev_hdr;
411 case ODP_EVENT_PACKET_VECTOR: {
412 odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event);
413 event_hdr_t *ev_hdr = odp_packet_vector_user_area(odp_pktvec);
416 event = evhdr_init_pktvec(ev_hdr, event, odp_pktvec, is_extev);
418 *ev_hdr__out = ev_hdr;
421 case ODP_EVENT_TIMEOUT: {
422 odp_timeout_t odp_tmo = odp_timeout_from_event(odp_event);
423 event_hdr_t *ev_hdr = odp_timeout_user_area(odp_tmo);
424 const bool esv_ena = esv_enabled();
432 evhdl_t evhdl = {.event =
event};
435 evhdl.evgen = evhdr_hdl.evgen;
436 ev_hdr->
event = evhdl.event;
441 *ev_hdr__out = ev_hdr;
446 EM_ESCOPE_EVENT_INIT_ODP,
447 "Unexpected odp event type:%u", odp_type);
448 __builtin_unreachable();
456 event_init_pkt_multi(
const odp_packet_t odp_pkts[],
458 const int num,
bool is_extev)
460 for (
int i = 0; i < num; i++)
461 ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
463 evhdr_init_pkt_multi(ev_hdrs, events, odp_pkts, num, is_extev);
468 event_init_buf_multi(
const odp_buffer_t odp_bufs[],
470 const int num,
bool is_extev)
472 for (
int i = 0; i < num; i++)
473 ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
477 for (
int i = 0; i < num; i++)
478 events[i] = ev_hdrs[i]->event;
482 EVSTATE__DISPATCH_MULTI);
488 event_init_tmo_multi(
const odp_timeout_t odp_tmos[],
492 for (
int i = 0; i < num; i++)
493 ev_hdrs[i] = odp_timeout_user_area(odp_tmos[i]);
501 event_init_pktvec_multi(
const odp_packet_vector_t odp_pktvecs[],
503 const int num,
bool is_extev)
505 for (
int i = 0; i < num; i++)
506 ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
508 evhdr_init_pktvec_multi(ev_hdrs, events, odp_pktvecs, num, is_extev);
520 event_init_odp_multi(
const odp_event_t odp_events[],
522 const int num,
bool is_extev)
524 for (
int i = 0; i < num; i++)
525 events[i] = event_init_odp(odp_events[i], is_extev, &ev_hdrs[i]);
532 event_alloc_buf(
const mpool_elem_t *
const pool_elem, uint32_t size)
534 odp_buffer_t odp_buf = ODP_BUFFER_INVALID;
541 subpool = pool_find_subpool(pool_elem, size);
542 if (unlikely(subpool < 0))
546 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
549 unlikely(odp_pool == ODP_POOL_INVALID))
552 odp_buf = odp_buffer_alloc(odp_pool);
553 if (likely(odp_buf != ODP_BUFFER_INVALID))
557 if (unlikely(odp_buf == ODP_BUFFER_INVALID))
564 event_hdr_t *
const ev_hdr = odp_buffer_user_area(odp_buf);
565 odp_event_t odp_event = odp_buffer_to_event(odp_buf);
566 em_event_t
event = event_odp2em(odp_event);
568 ev_hdr->
event = event;
580 event_alloc_buf_multi(em_event_t events[],
const int num,
584 odp_buffer_t odp_bufs[num];
585 odp_event_t odp_event;
588 const bool esv_ena = esv_enabled();
594 subpool = pool_find_subpool(pool_elem, size);
595 if (unlikely(subpool < 0))
603 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
606 unlikely(odp_pool == ODP_POOL_INVALID))
609 int ret = odp_buffer_alloc_multi(odp_pool, &odp_bufs[num_bufs],
611 if (unlikely(ret <= 0))
615 for (i = num_bufs; i < num_bufs + ret; i++) {
616 odp_event = odp_buffer_to_event(odp_bufs[i]);
617 events[i] = event_odp2em(odp_event);
621 for (i = num_bufs; i < num_bufs + ret; i++)
622 ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
627 &ev_hdrs[num_bufs], ret);
630 for (i = num_bufs; i < num_bufs + ret; i++) {
631 ev_hdrs[i]->
flags.all = 0;
634 ev_hdrs[i]->
event = events[i];
646 if (likely(num_bufs == num))
658 event_alloc_pkt(
const mpool_elem_t *pool_elem, uint32_t size)
663 odp_packet_t odp_pkt = ODP_PACKET_INVALID;
666 if (size > push_len) {
667 alloc_size = size - push_len;
671 pull_len = push_len + 1 - size;
678 subpool = pool_find_subpool(pool_elem, size);
679 if (unlikely(subpool < 0))
683 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
686 unlikely(odp_pool == ODP_POOL_INVALID))
689 odp_pkt = odp_packet_alloc(odp_pool, alloc_size);
690 if (likely(odp_pkt != ODP_PACKET_INVALID))
694 if (unlikely(odp_pkt == ODP_PACKET_INVALID))
706 ptr = odp_packet_push_head(odp_pkt, push_len);
711 ptr = odp_packet_pull_tail(odp_pkt, pull_len);
723 event_hdr_t *
const ev_hdr = odp_packet_user_area(odp_pkt);
724 odp_event_t odp_event = odp_packet_to_event(odp_pkt);
725 em_event_t
event = event_odp2em(odp_event);
731 ev_hdr->
event = event;
738 odp_packet_free(odp_pkt);
746 pktalloc_multi(odp_packet_t odp_pkts[],
int num,
747 odp_pool_t odp_pool, uint32_t size,
748 uint32_t push_len, uint32_t pull_len)
750 int ret = odp_packet_alloc_multi(odp_pool, size, odp_pkts, num);
752 if (unlikely(ret <= 0))
755 const int num_pkts = ret;
756 const void *ptr = NULL;
761 for (i = 0; i < num_pkts; i++) {
762 ptr = odp_packet_push_head(odp_pkts[i], push_len);
764 goto err_pktalloc_multi;
768 for (i = 0; i < num_pkts; i++) {
769 ptr = odp_packet_pull_tail(odp_pkts[i], pull_len);
771 goto err_pktalloc_multi;
780 for (i = 0; i < num_pkts; i++)
786 odp_packet_free_multi(odp_pkts, num_pkts);
794 event_alloc_pkt_multi(em_event_t events[],
const int num,
800 odp_packet_t odp_pkts[num];
802 odp_event_t *
const odp_events = (odp_event_t *)events;
806 const bool esv_ena = esv_enabled();
808 if (size > push_len) {
809 alloc_size = size - push_len;
813 pull_len = push_len + 1 - size;
820 subpool = pool_find_subpool(pool_elem, size);
821 if (unlikely(subpool < 0))
829 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
832 unlikely(odp_pool == ODP_POOL_INVALID))
835 int ret = pktalloc_multi(&odp_pkts[num_pkts], num_req,
836 odp_pool, alloc_size,
838 if (unlikely(ret <= 0))
845 odp_packet_to_event_multi(&odp_pkts[num_pkts],
846 &odp_events[num_pkts], ret);
848 for (i = num_pkts; i < num_pkts + ret; i++)
849 ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
858 &ev_hdrs[num_pkts], ret);
861 for (i = num_pkts; i < num_pkts + ret; i++) {
862 ev_hdrs[i]->
flags.all = 0;
865 ev_hdrs[i]->
event = events[i];
876 if (likely(num_pkts == num))
885 event_alloc_vector(
const mpool_elem_t *pool_elem, uint32_t size)
887 odp_packet_vector_t odp_pktvec = ODP_PACKET_VECTOR_INVALID;
894 subpool = pool_find_subpool(pool_elem, size);
895 if (unlikely(subpool < 0))
899 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
902 unlikely(odp_pool == ODP_POOL_INVALID))
905 odp_pktvec = odp_packet_vector_alloc(odp_pool);
906 if (likely(odp_pktvec != ODP_PACKET_VECTOR_INVALID))
910 if (unlikely(odp_pktvec == ODP_PACKET_VECTOR_INVALID))
925 event_hdr_t *
const ev_hdr = odp_packet_vector_user_area(odp_pktvec);
926 odp_event_t odp_event = odp_packet_vector_to_event(odp_pktvec);
927 em_event_t
event = event_odp2em(odp_event);
932 ev_hdr->
event = event;
939 odp_packet_vector_free(odp_pktvec);
947 vecalloc_multi(odp_packet_vector_t odp_pktvecs[],
int num,
952 for (i = 0; i < num; i++) {
953 odp_pktvecs[i] = odp_packet_vector_alloc(odp_pool);
954 if (unlikely(odp_pktvecs[i] == ODP_PACKET_VECTOR_INVALID))
958 const int num_vecs = i;
960 if (unlikely(num_vecs == 0))
968 for (i = 0; i < num_vecs; i++)
969 odp_packet_vector_user_flag_set(odp_pktvecs[i],
USER_FLAG_SET);
978 event_alloc_vector_multi(em_event_t events[],
const int num,
982 odp_packet_vector_t odp_pktvecs[num];
984 odp_event_t *
const odp_events = (odp_event_t *)events;
987 const bool esv_ena = esv_enabled();
993 subpool = pool_find_subpool(pool_elem, size);
994 if (unlikely(subpool < 0))
1002 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
1005 unlikely(odp_pool == ODP_POOL_INVALID))
1008 int ret = vecalloc_multi(&odp_pktvecs[num_vecs], num_req,
1010 if (unlikely(ret <= 0))
1017 for (i = num_vecs; i < num_vecs + ret; i++) {
1018 odp_events[i] = odp_packet_vector_to_event(odp_pktvecs[i]);
1019 ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
1029 &ev_hdrs[num_vecs], ret);
1032 for (i = num_vecs; i < num_vecs + ret; i++) {
1033 ev_hdrs[i]->
flags.all = 0;
1036 ev_hdrs[i]->
event = events[i];
1047 if (likely(num_vecs == num))
1058 static inline em_event_t
1060 const uint16_t api_op)
1073 ev_hdr = event_alloc_pkt(pool_elem, size);
1075 ev_hdr = event_alloc_buf(pool_elem, size);
1077 ev_hdr = event_alloc_vector(pool_elem, size);
1079 if (unlikely(!ev_hdr))
1090 ev_hdr->
flags.all = 0;
1100 return ev_hdr->
event;
1107 event_prealloc(
const mpool_elem_t *pool_elem, uint32_t size)
1120 ev_hdr = event_alloc_pkt(pool_elem, size);
1122 ev_hdr = event_alloc_buf(pool_elem, size);
1124 ev_hdr = event_alloc_vector(pool_elem, size);
1126 if (unlikely(ev_hdr == NULL))
1131 if (esv_enabled()) {
1132 em_event_t
event = ev_hdr->
event;
1136 ev_hdr->
flags.all = 0;
1141 return prealloc_hdr;
1145 list_node_to_prealloc_hdr(
list_node_t *
const list_node)
1150 return likely(list_node != NULL) ? ev_hdr : NULL;
1160 vector_tbl2odp(odp_event_t odp_event_pktvec)
1162 odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1163 odp_packet_t *pkt_tbl = NULL;
1164 const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
1166 if (likely(pkts > 0)) {
1168 em_event_t *event_tbl = (em_event_t *)pkt_tbl;
1171 (void)events_em2pkt_inplace(event_tbl, pkts);
1184 vector_tbl2em(odp_event_t odp_event_pktvec)
1186 odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1187 odp_packet_t *pkt_tbl = NULL;
1188 const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
1190 if (likely(pkts > 0)) {
1191 em_event_t *
const ev_tbl = (em_event_t *
const)pkt_tbl;
1192 odp_packet_t odp_pkttbl[pkts];
1199 events_em2pkt(ev_tbl, odp_pkttbl, pkts);
1201 event_init_pkt_multi(odp_pkttbl , ev_tbl ,
1202 ev_hdr_tbl , pkts,
false);
1207 send_event(em_event_t event,
const queue_elem_t *q_elem)
1209 const bool esv_ena = esv_enabled();
1210 odp_event_t odp_event = event_em2odp(event);
1211 odp_queue_t odp_queue = q_elem->
odp_queue;
1215 (odp_event == ODP_EVENT_INVALID ||
1216 odp_queue == ODP_QUEUE_INVALID)))
1228 if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1229 vector_tbl2odp(odp_event);
1232 ret = odp_queue_enq(odp_queue, odp_event);
1236 if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1237 vector_tbl2em(odp_event);
1246 send_event_multi(
const em_event_t events[],
const int num,
1249 const bool esv_ena = esv_enabled();
1250 odp_event_t odp_events[num];
1251 odp_queue_t odp_queue = q_elem->
odp_queue;
1253 if (unlikely(
EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID))
1261 events_em2odp(events, odp_events, num);
1268 for (
int i = 0; i < num; i++) {
1269 if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1270 vector_tbl2odp(odp_events[i]);
1275 int ret = odp_queue_enq_multi(odp_queue, odp_events, num);
1277 if (likely(ret == num))
1283 int enq = ret < 0 ? 0 : ret;
1287 for (
int i = enq; i < num; i++) {
1288 if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1289 vector_tbl2em(odp_events[i]);
1297 send_local(em_event_t event,
const queue_elem_t *q_elem)
1301 evhdl_t evhdl = {.event =
event};
1308 em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->
queue;
1310 .evptr = evhdl.evptr};
1312 ret = odp_stash_put_u64(locm->
local_queues.prio[prio].stash,
1314 if (likely(ret == 1)) {
1324 send_local_multi(
const em_event_t events[],
const int num,
1336 em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->
queue;
1337 const uint16_t qidx = (uint16_t)queue_hdl2idx(queue);
1339 for (
int i = 0; i < num; i++) {
1340 entry_tbl[i].qidx = qidx;
1341 entry_tbl[i].evptr = evhdl_tbl[i].evptr;
1344 int ret = odp_stash_put_u64(locm->
local_queues.prio[prio].stash,
1345 &entry_tbl[0].u64, num);
1346 if (likely(ret > 0)) {
1359 send_output(em_event_t event,
queue_elem_t *
const output_q_elem)
1365 output_q_elem->
state != EM_QUEUE_STATE_UNSCHEDULED))
1375 const odp_queue_t odp_queue = output_q_elem->
odp_queue;
1376 odp_event_t odp_event = event_em2odp(event);
1380 (odp_event == ODP_EVENT_INVALID ||
1381 odp_queue == ODP_QUEUE_INVALID)))
1385 output_queue_track(output_q_elem);
1388 ret = odp_queue_enq(odp_queue, odp_event);
1389 if (unlikely(ret != 0))
1395 env_spinlock_t *
const lock =
1396 &output_q_elem->output.
lock;
1398 if (!env_spinlock_trylock(lock))
1400 output_queue_drain(output_q_elem);
1401 env_spinlock_unlock(lock);
1410 const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->
queue;
1413 void *
const output_fn_args =
1417 sent = output_fn(&event, 1, output_queue, output_fn_args);
1418 if (unlikely(sent != 1))
1428 send_output_multi(
const em_event_t events[],
const unsigned int num,
1436 output_q_elem->
state != EM_QUEUE_STATE_UNSCHEDULED))
1446 const odp_queue_t odp_queue = output_q_elem->
odp_queue;
1447 odp_event_t odp_events[num];
1450 odp_queue == ODP_QUEUE_INVALID))
1454 output_queue_track(output_q_elem);
1456 events_em2odp(events, odp_events, num);
1459 sent = odp_queue_enq_multi(odp_queue, odp_events, num);
1460 if (unlikely(sent <= 0))
1466 env_spinlock_t *
const lock =
1467 &output_q_elem->output.
lock;
1469 if (!env_spinlock_trylock(lock))
1471 output_queue_drain(output_q_elem);
1472 env_spinlock_unlock(lock);
1481 const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->
queue;
1485 sent = output_fn(events, num, output_queue, output_fn_args);
1494 static inline void *
1495 event_pointer(em_event_t event)
1497 const odp_event_t odp_event = event_em2odp(event);
1498 const odp_event_type_t odp_etype = odp_event_type(odp_event);
1499 void *ev_ptr = NULL;
1501 if (odp_etype == ODP_EVENT_PACKET) {
1502 const odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1504 ev_ptr = odp_packet_data(odp_pkt);
1505 }
else if (odp_etype == ODP_EVENT_BUFFER) {
1506 const odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
1507 const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
1510 ev_ptr = odp_buffer_addr(odp_buf);
1513 ev_ptr = (
void *)((uintptr_t)ev_ptr + 32 - align_offset);
1520 event_has_ref(em_event_t event)
1522 odp_event_t odp_event = event_em2odp(event);
1523 odp_event_type_t odp_etype = odp_event_type(odp_event);
1525 if (odp_etype != ODP_EVENT_PACKET)
1528 odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1530 return odp_packet_has_ref(odp_pkt) ? true :
false;