45 COMPILE_TIME_ASSERT((uintptr_t)
EM_EVENT_UNDEF == (uintptr_t)ODP_EVENT_INVALID,
46 EM_EVENT_NOT_EQUAL_TO_ODP_EVENT);
48 "EM_TMO_TYPE_NONE must be 0");
52 void print_event_info(
void);
53 em_event_t
pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool,
54 uint32_t offset, uint32_t size,
55 bool clone_uarea,
bool is_clone_part);
56 void output_queue_track(
queue_elem_t *
const output_q_elem);
57 void output_queue_drain(
const queue_elem_t *output_q_elem);
58 void output_queue_buffering_drain(
void);
60 uint32_t event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl);
61 em_status_t event_vector_max_size(em_event_t vector_event, uint32_t *max_size ,
67 static inline em_event_t
68 evhdr_init_pkt(
event_hdr_t *ev_hdr, em_event_t event,
69 odp_packet_t odp_pkt,
bool is_extev)
71 const int user_flag_set = odp_packet_user_flag(odp_pkt);
72 const bool esv_ena = esv_enabled();
77 event = ev_hdr->
event;
94 ev_hdr->
flags.all = 0;
95 ev_hdr->
event = event;
107 odp_pool_t odp_pool = odp_packet_pool(odp_pkt);
108 em_pool_t pool = pool_odp2em(odp_pool);
118 ev_hdr->
flags.all = 0;
129 const odp_packet_t odp_pkts[],
130 const int num,
bool is_extev)
132 const bool esv_ena = esv_enabled();
135 int needs_init_idx[num];
136 int needs_init_num = 0;
139 for (
int i = 0; i < num; i++) {
140 user_flag_set = odp_packet_user_flag(odp_pkts[i]);
144 events[i] = ev_hdrs[i]->
event;
147 EVSTATE__DISPATCH_MULTI);
152 needs_init_idx[needs_init_num] = i;
157 if (needs_init_num == 0)
165 for (
int i = 0; i < needs_init_num; i++) {
166 idx = needs_init_idx[i];
167 ev_hdrs[idx]->
flags.all = 0;
169 ev_hdrs[idx]->
event = events[idx];
180 if (!
em_shm->opt.esv.prealloc_pools) {
181 for (
int i = 0; i < needs_init_num; i++) {
182 idx = needs_init_idx[i];
183 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
187 for (
int i = 0; i < needs_init_num; i++) {
188 idx = needs_init_idx[i];
190 odp_pool_t odp_pool = odp_packet_pool(odp_pkts[idx]);
191 em_pool_t pool = pool_odp2em(odp_pool);
193 if (pool ==
EM_POOL_UNDEF || ev_hdrs[idx]->flags.refs_used) {
198 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
201 events[idx] =
evstate_update(events[idx], ev_hdrs[idx], is_extev);
206 for (
int i = 0; i < needs_init_num; i++) {
207 idx = needs_init_idx[i];
208 ev_hdrs[idx]->
flags.all = 0;
218 static inline em_event_t
219 evhdr_init_pktvec(
event_hdr_t *ev_hdr, em_event_t event,
220 odp_packet_vector_t odp_pktvec,
bool is_extev)
222 const int user_flag = odp_packet_vector_user_flag(odp_pktvec);
223 const bool esv_ena = esv_enabled();
228 event = ev_hdr->
event;
241 ev_hdr->
flags.all = 0;
247 ev_hdr->
event = event;
254 if (!
em_shm->opt.esv.prealloc_pools) {
258 odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvec);
259 em_pool_t pool = pool_odp2em(odp_pool);
281 const odp_packet_vector_t odp_pktvecs[],
282 const int num,
bool is_extev)
284 const bool esv_ena = esv_enabled();
286 int needs_init_idx[num];
287 int needs_init_num = 0;
290 for (
int i = 0; i < num; i++) {
291 int user_flag = odp_packet_vector_user_flag(odp_pktvecs[i]);
296 events[i] = ev_hdrs[i]->
event;
299 EVSTATE__DISPATCH_MULTI);
303 odp_packet_vector_user_flag_set(odp_pktvecs[i],
USER_FLAG_SET);
304 needs_init_idx[needs_init_num] = i;
309 if (needs_init_num == 0)
317 for (
int i = 0; i < needs_init_num; i++) {
318 idx = needs_init_idx[i];
319 ev_hdrs[idx]->
flags.all = 0;
321 ev_hdrs[idx]->
event = events[idx];
332 for (
int i = 0; i < needs_init_num; i++) {
333 idx = needs_init_idx[i];
335 ev_hdrs[idx]->
flags.all = 0;
341 if (!
em_shm->opt.esv.prealloc_pools) {
342 for (
int i = 0; i < needs_init_num; i++) {
343 idx = needs_init_idx[i];
344 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
353 for (
int i = 0; i < needs_init_num; i++) {
354 idx = needs_init_idx[i];
356 odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvecs[idx]);
357 em_pool_t pool = pool_odp2em(odp_pool);
361 events[idx] =
evstate_init(events[idx], ev_hdrs[idx], is_extev);
364 events[idx] =
evstate_update(events[idx], ev_hdrs[idx], is_extev);
377 static inline em_event_t
378 event_init_odp(odp_event_t odp_event,
bool is_extev,
event_hdr_t **ev_hdr__out)
380 const odp_event_type_t odp_type = odp_event_type(odp_event);
381 em_event_t
event = event_odp2em(odp_event);
384 case ODP_EVENT_PACKET: {
385 odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
386 event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt);
389 event = evhdr_init_pkt(ev_hdr, event, odp_pkt, is_extev);
391 *ev_hdr__out = ev_hdr;
394 case ODP_EVENT_BUFFER: {
395 const bool esv_ena = esv_enabled();
397 if (!ev_hdr__out && !esv_ena)
400 odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
401 event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
404 event = ev_hdr->
event;
409 *ev_hdr__out = ev_hdr;
412 case ODP_EVENT_PACKET_VECTOR: {
413 odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event);
414 event_hdr_t *ev_hdr = odp_packet_vector_user_area(odp_pktvec);
417 event = evhdr_init_pktvec(ev_hdr, event, odp_pktvec, is_extev);
419 *ev_hdr__out = ev_hdr;
422 case ODP_EVENT_TIMEOUT: {
423 odp_timeout_t odp_tmo = odp_timeout_from_event(odp_event);
424 event_hdr_t *ev_hdr = odp_timeout_user_area(odp_tmo);
425 const bool esv_ena = esv_enabled();
433 evhdl_t evhdl = {.event =
event};
436 evhdl.evgen = evhdr_hdl.evgen;
437 ev_hdr->
event = evhdl.event;
442 *ev_hdr__out = ev_hdr;
447 EM_ESCOPE_EVENT_INIT_ODP,
448 "Unexpected odp event type:%u", odp_type);
449 __builtin_unreachable();
457 event_init_pkt_multi(
const odp_packet_t odp_pkts[],
459 const int num,
bool is_extev)
461 for (
int i = 0; i < num; i++)
462 ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
464 evhdr_init_pkt_multi(ev_hdrs, events, odp_pkts, num, is_extev);
469 event_init_buf_multi(
const odp_buffer_t odp_bufs[],
471 const int num,
bool is_extev)
473 for (
int i = 0; i < num; i++)
474 ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
478 for (
int i = 0; i < num; i++)
479 events[i] = ev_hdrs[i]->event;
483 EVSTATE__DISPATCH_MULTI);
489 event_init_tmo_multi(
const odp_timeout_t odp_tmos[],
493 for (
int i = 0; i < num; i++)
494 ev_hdrs[i] = odp_timeout_user_area(odp_tmos[i]);
502 event_init_pktvec_multi(
const odp_packet_vector_t odp_pktvecs[],
504 const int num,
bool is_extev)
506 for (
int i = 0; i < num; i++)
507 ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
509 evhdr_init_pktvec_multi(ev_hdrs, events, odp_pktvecs, num, is_extev);
521 event_init_odp_multi(
const odp_event_t odp_events[],
523 const int num,
bool is_extev)
525 for (
int i = 0; i < num; i++)
526 events[i] = event_init_odp(odp_events[i], is_extev, &ev_hdrs[i]);
533 event_alloc_buf(
const mpool_elem_t *
const pool_elem, uint32_t size)
535 odp_buffer_t odp_buf = ODP_BUFFER_INVALID;
542 subpool = pool_find_subpool(pool_elem, size);
543 if (unlikely(subpool < 0))
547 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
550 unlikely(odp_pool == ODP_POOL_INVALID))
553 odp_buf = odp_buffer_alloc(odp_pool);
554 if (likely(odp_buf != ODP_BUFFER_INVALID))
558 if (unlikely(odp_buf == ODP_BUFFER_INVALID))
565 event_hdr_t *
const ev_hdr = odp_buffer_user_area(odp_buf);
566 odp_event_t odp_event = odp_buffer_to_event(odp_buf);
567 em_event_t
event = event_odp2em(odp_event);
569 ev_hdr->
event = event;
581 event_alloc_buf_multi(em_event_t events[],
const int num,
585 odp_buffer_t odp_bufs[num];
586 odp_event_t odp_event;
589 const bool esv_ena = esv_enabled();
595 subpool = pool_find_subpool(pool_elem, size);
596 if (unlikely(subpool < 0))
604 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
607 unlikely(odp_pool == ODP_POOL_INVALID))
610 int ret = odp_buffer_alloc_multi(odp_pool, &odp_bufs[num_bufs],
612 if (unlikely(ret <= 0))
616 for (i = num_bufs; i < num_bufs + ret; i++) {
617 odp_event = odp_buffer_to_event(odp_bufs[i]);
618 events[i] = event_odp2em(odp_event);
622 for (i = num_bufs; i < num_bufs + ret; i++)
623 ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
628 &ev_hdrs[num_bufs], ret);
631 for (i = num_bufs; i < num_bufs + ret; i++) {
632 ev_hdrs[i]->
flags.all = 0;
635 ev_hdrs[i]->
event = events[i];
647 if (likely(num_bufs == num))
659 event_alloc_pkt(
const mpool_elem_t *pool_elem, uint32_t size)
664 odp_packet_t odp_pkt = ODP_PACKET_INVALID;
667 if (size > push_len) {
668 alloc_size = size - push_len;
672 pull_len = push_len + 1 - size;
679 subpool = pool_find_subpool(pool_elem, size);
680 if (unlikely(subpool < 0))
684 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
687 unlikely(odp_pool == ODP_POOL_INVALID))
690 odp_pkt = odp_packet_alloc(odp_pool, alloc_size);
691 if (likely(odp_pkt != ODP_PACKET_INVALID))
695 if (unlikely(odp_pkt == ODP_PACKET_INVALID))
707 ptr = odp_packet_push_head(odp_pkt, push_len);
712 ptr = odp_packet_pull_tail(odp_pkt, pull_len);
724 event_hdr_t *
const ev_hdr = odp_packet_user_area(odp_pkt);
725 odp_event_t odp_event = odp_packet_to_event(odp_pkt);
726 em_event_t
event = event_odp2em(odp_event);
732 ev_hdr->
event = event;
739 odp_packet_free(odp_pkt);
747 pktalloc_multi(odp_packet_t odp_pkts[],
int num,
748 odp_pool_t odp_pool, uint32_t size,
749 uint32_t push_len, uint32_t pull_len)
751 int ret = odp_packet_alloc_multi(odp_pool, size, odp_pkts, num);
753 if (unlikely(ret <= 0))
756 const int num_pkts = ret;
757 const void *ptr = NULL;
762 for (i = 0; i < num_pkts; i++) {
763 ptr = odp_packet_push_head(odp_pkts[i], push_len);
765 goto err_pktalloc_multi;
769 for (i = 0; i < num_pkts; i++) {
770 ptr = odp_packet_pull_tail(odp_pkts[i], pull_len);
772 goto err_pktalloc_multi;
781 for (i = 0; i < num_pkts; i++)
787 odp_packet_free_multi(odp_pkts, num_pkts);
795 event_alloc_pkt_multi(em_event_t events[],
const int num,
801 odp_packet_t odp_pkts[num];
803 odp_event_t *
const odp_events = (odp_event_t *)events;
807 const bool esv_ena = esv_enabled();
809 if (size > push_len) {
810 alloc_size = size - push_len;
814 pull_len = push_len + 1 - size;
821 subpool = pool_find_subpool(pool_elem, size);
822 if (unlikely(subpool < 0))
830 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
833 unlikely(odp_pool == ODP_POOL_INVALID))
836 int ret = pktalloc_multi(&odp_pkts[num_pkts], num_req,
837 odp_pool, alloc_size,
839 if (unlikely(ret <= 0))
846 odp_packet_to_event_multi(&odp_pkts[num_pkts],
847 &odp_events[num_pkts], ret);
849 for (i = num_pkts; i < num_pkts + ret; i++)
850 ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
859 &ev_hdrs[num_pkts], ret);
862 for (i = num_pkts; i < num_pkts + ret; i++) {
863 ev_hdrs[i]->
flags.all = 0;
866 ev_hdrs[i]->
event = events[i];
877 if (likely(num_pkts == num))
886 event_alloc_vector(
const mpool_elem_t *pool_elem, uint32_t size)
888 odp_packet_vector_t odp_pktvec = ODP_PACKET_VECTOR_INVALID;
895 subpool = pool_find_subpool(pool_elem, size);
896 if (unlikely(subpool < 0))
900 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
903 unlikely(odp_pool == ODP_POOL_INVALID))
906 odp_pktvec = odp_packet_vector_alloc(odp_pool);
907 if (likely(odp_pktvec != ODP_PACKET_VECTOR_INVALID))
911 if (unlikely(odp_pktvec == ODP_PACKET_VECTOR_INVALID))
926 event_hdr_t *
const ev_hdr = odp_packet_vector_user_area(odp_pktvec);
927 odp_event_t odp_event = odp_packet_vector_to_event(odp_pktvec);
928 em_event_t
event = event_odp2em(odp_event);
933 ev_hdr->
event = event;
940 odp_packet_vector_free(odp_pktvec);
948 vecalloc_multi(odp_packet_vector_t odp_pktvecs[],
int num,
953 for (i = 0; i < num; i++) {
954 odp_pktvecs[i] = odp_packet_vector_alloc(odp_pool);
955 if (unlikely(odp_pktvecs[i] == ODP_PACKET_VECTOR_INVALID))
959 const int num_vecs = i;
961 if (unlikely(num_vecs == 0))
969 for (i = 0; i < num_vecs; i++)
970 odp_packet_vector_user_flag_set(odp_pktvecs[i],
USER_FLAG_SET);
979 event_alloc_vector_multi(em_event_t events[],
const int num,
983 odp_packet_vector_t odp_pktvecs[num];
985 odp_event_t *
const odp_events = (odp_event_t *)events;
988 const bool esv_ena = esv_enabled();
994 subpool = pool_find_subpool(pool_elem, size);
995 if (unlikely(subpool < 0))
1003 odp_pool_t odp_pool = pool_elem->
odp_pool[subpool];
1006 unlikely(odp_pool == ODP_POOL_INVALID))
1009 int ret = vecalloc_multi(&odp_pktvecs[num_vecs], num_req,
1011 if (unlikely(ret <= 0))
1018 for (i = num_vecs; i < num_vecs + ret; i++) {
1019 odp_events[i] = odp_packet_vector_to_event(odp_pktvecs[i]);
1020 ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
1030 &ev_hdrs[num_vecs], ret);
1033 for (i = num_vecs; i < num_vecs + ret; i++) {
1034 ev_hdrs[i]->
flags.all = 0;
1037 ev_hdrs[i]->
event = events[i];
1048 if (likely(num_vecs == num))
1059 static inline em_event_t
1061 const uint16_t api_op)
1074 ev_hdr = event_alloc_pkt(pool_elem, size);
1076 ev_hdr = event_alloc_buf(pool_elem, size);
1078 ev_hdr = event_alloc_vector(pool_elem, size);
1080 if (unlikely(!ev_hdr))
1091 ev_hdr->
flags.all = 0;
1101 return ev_hdr->
event;
1108 event_prealloc(
const mpool_elem_t *pool_elem, uint32_t size)
1121 ev_hdr = event_alloc_pkt(pool_elem, size);
1123 ev_hdr = event_alloc_buf(pool_elem, size);
1125 ev_hdr = event_alloc_vector(pool_elem, size);
1127 if (unlikely(ev_hdr == NULL))
1132 if (esv_enabled()) {
1133 em_event_t
event = ev_hdr->
event;
1137 ev_hdr->
flags.all = 0;
1142 return prealloc_hdr;
1146 list_node_to_prealloc_hdr(
list_node_t *
const list_node)
1151 return likely(list_node != NULL) ? ev_hdr : NULL;
1161 vector_tbl2odp(odp_event_t odp_event_pktvec)
1163 odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1164 odp_packet_t *pkt_tbl = NULL;
1165 const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
1167 if (likely(pkts > 0)) {
1169 em_event_t *event_tbl = (em_event_t *)pkt_tbl;
1172 (void)events_em2pkt_inplace(event_tbl, pkts);
1185 vector_tbl2em(odp_event_t odp_event_pktvec)
1187 odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1188 odp_packet_t *pkt_tbl = NULL;
1189 const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl);
1191 if (likely(pkts > 0)) {
1192 em_event_t *
const ev_tbl = (em_event_t *
const)pkt_tbl;
1193 odp_packet_t odp_pkttbl[pkts];
1200 events_em2pkt(ev_tbl, odp_pkttbl, pkts);
1202 event_init_pkt_multi(odp_pkttbl , ev_tbl ,
1203 ev_hdr_tbl , pkts,
false);
1208 send_event(em_event_t event,
const queue_elem_t *q_elem)
1210 const bool esv_ena = esv_enabled();
1211 odp_event_t odp_event = event_em2odp(event);
1212 odp_queue_t odp_queue = q_elem->
odp_queue;
1216 (odp_event == ODP_EVENT_INVALID ||
1217 odp_queue == ODP_QUEUE_INVALID)))
1229 if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1230 vector_tbl2odp(odp_event);
1233 ret = odp_queue_enq(odp_queue, odp_event);
1237 if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1238 vector_tbl2em(odp_event);
1247 send_event_multi(
const em_event_t events[],
const int num,
1250 const bool esv_ena = esv_enabled();
1251 odp_event_t odp_events[num];
1252 odp_queue_t odp_queue = q_elem->
odp_queue;
1254 if (unlikely(
EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID))
1262 events_em2odp(events, odp_events, num);
1269 for (
int i = 0; i < num; i++) {
1270 if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1271 vector_tbl2odp(odp_events[i]);
1276 int ret = odp_queue_enq_multi(odp_queue, odp_events, num);
1278 if (likely(ret == num))
1284 int enq = ret < 0 ? 0 : ret;
1288 for (
int i = enq; i < num; i++) {
1289 if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1290 vector_tbl2em(odp_events[i]);
1298 send_local(em_event_t event,
const queue_elem_t *q_elem)
1302 evhdl_t evhdl = {.event =
event};
1309 em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->
queue;
1311 .evptr = evhdl.evptr};
1313 ret = odp_stash_put_u64(locm->
local_queues.prio[prio].stash,
1315 if (likely(ret == 1)) {
1325 send_local_multi(
const em_event_t events[],
const int num,
1337 em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->
queue;
1338 const uint16_t qidx = (uint16_t)queue_hdl2idx(queue);
1340 for (
int i = 0; i < num; i++) {
1341 entry_tbl[i].qidx = qidx;
1342 entry_tbl[i].evptr = evhdl_tbl[i].evptr;
1345 int ret = odp_stash_put_u64(locm->
local_queues.prio[prio].stash,
1346 &entry_tbl[0].u64, num);
1347 if (likely(ret > 0)) {
1360 send_output(em_event_t event,
queue_elem_t *
const output_q_elem)
1366 output_q_elem->
state != EM_QUEUE_STATE_UNSCHEDULED))
1376 const odp_queue_t odp_queue = output_q_elem->
odp_queue;
1377 odp_event_t odp_event = event_em2odp(event);
1381 (odp_event == ODP_EVENT_INVALID ||
1382 odp_queue == ODP_QUEUE_INVALID)))
1386 output_queue_track(output_q_elem);
1389 ret = odp_queue_enq(odp_queue, odp_event);
1390 if (unlikely(ret != 0))
1396 env_spinlock_t *
const lock =
1397 &output_q_elem->output.
lock;
1399 if (!env_spinlock_trylock(lock))
1401 output_queue_drain(output_q_elem);
1402 env_spinlock_unlock(lock);
1411 const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->
queue;
1414 void *
const output_fn_args =
1418 sent = output_fn(&event, 1, output_queue, output_fn_args);
1419 if (unlikely(sent != 1))
1429 send_output_multi(
const em_event_t events[],
const unsigned int num,
1437 output_q_elem->
state != EM_QUEUE_STATE_UNSCHEDULED))
1447 const odp_queue_t odp_queue = output_q_elem->
odp_queue;
1448 odp_event_t odp_events[num];
1451 odp_queue == ODP_QUEUE_INVALID))
1455 output_queue_track(output_q_elem);
1457 events_em2odp(events, odp_events, num);
1460 sent = odp_queue_enq_multi(odp_queue, odp_events, num);
1461 if (unlikely(sent <= 0))
1467 env_spinlock_t *
const lock =
1468 &output_q_elem->output.
lock;
1470 if (!env_spinlock_trylock(lock))
1472 output_queue_drain(output_q_elem);
1473 env_spinlock_unlock(lock);
1482 const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->
queue;
1486 sent = output_fn(events, num, output_queue, output_fn_args);
1495 static inline void *
1496 event_pointer(em_event_t event)
1498 const odp_event_t odp_event = event_em2odp(event);
1499 const odp_event_type_t odp_etype = odp_event_type(odp_event);
1500 void *ev_ptr = NULL;
1502 if (odp_etype == ODP_EVENT_PACKET) {
1503 const odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1505 ev_ptr = odp_packet_data(odp_pkt);
1506 }
else if (odp_etype == ODP_EVENT_BUFFER) {
1507 const odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
1508 const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
1511 ev_ptr = odp_buffer_addr(odp_buf);
1514 ev_ptr = (
void *)((uintptr_t)ev_ptr + 32 - align_offset);
1521 event_has_ref(em_event_t event)
1523 odp_event_t odp_event = event_em2odp(event);
1524 odp_event_type_t odp_etype = odp_event_type(odp_event);
1526 if (odp_etype != ODP_EVENT_PACKET)
1529 odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1531 return odp_packet_has_ref(odp_pkt) ? true :
false;
#define INTERNAL_ERROR(error, escope, fmt,...)
em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool, uint32_t offset, uint32_t size, bool clone_uarea, bool is_clone_part)
em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, bool is_extev)
void evstate_alloc_multi(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], const int num)
em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
void evstate_em2usr_multi(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], const int num, const uint16_t api_op)
em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr)
em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, bool is_extev)
ENV_LOCAL em_locm_t em_locm
#define EM_OUTPUT_QUEUE_IMMEDIATE
@ EM_ERR_OPERATION_FAILED
int(* em_output_func_t)(const em_event_t events[], const unsigned int num, const em_queue_t output_queue, void *output_fn_args)
#define EM_EVENT_GROUP_UNDEF
@ EM_SCHED_CONTEXT_TYPE_ORDERED
em_sched_context_type_t sched_context_type
em_locm_current_t current
local_queues_t local_queues
em_output_func_t output_fn
union event_hdr::@34 flags
ev_hdr_user_area_t user_area
em_event_type_t event_type
struct mpool_elem_t::@54 user_area
odp_pool_t odp_pool[EM_MAX_SUBPOOLS]
em_event_type_t event_type
em_output_queue_conf_t output_conf