EM-ODP  3.7.0
Event Machine on ODP
em_event.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2023, Nokia Solutions and Networks
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * * Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  * * Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived
16  * from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31  /**
32  * @file
33  * EM internal event functions
34  *
35  */
36 
37 #ifndef EM_EVENT_H_
38 #define EM_EVENT_H_
39 
40 #ifdef __cplusplus
41 extern "C" {
42 #endif
43 
44 #ifndef __clang__
45 COMPILE_TIME_ASSERT((uintptr_t)EM_EVENT_UNDEF == (uintptr_t)ODP_EVENT_INVALID,
46  EM_EVENT_NOT_EQUAL_TO_ODP_EVENT);
47 COMPILE_TIME_ASSERT(EM_TMO_TYPE_NONE == 0,
48  "EM_TMO_TYPE_NONE must be 0");
49 #endif
50 
51 em_status_t event_init(void);
52 void print_event_info(void);
53 em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool,
54  uint32_t offset, uint32_t size, bool is_clone_part);
55 void output_queue_track(queue_elem_t *const output_q_elem);
56 void output_queue_drain(const queue_elem_t *output_q_elem);
57 void output_queue_buffering_drain(void);
58 
59 uint32_t event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl/*out*/);
60 em_status_t event_vector_max_size(em_event_t vector_event, uint32_t *max_size /*out*/,
61  em_escope_t escope);
62 
63 /**
64  * Initialize the event header of a packet allocated outside of EM.
65  */
66 static inline em_event_t
67 evhdr_init_pkt(event_hdr_t *ev_hdr, em_event_t event,
68  odp_packet_t odp_pkt, bool is_extev)
69 {
70  const int user_flag_set = odp_packet_user_flag(odp_pkt);
71  const bool esv_ena = esv_enabled();
72 
73  if (user_flag_set) {
74  /* Event already initialized by EM */
75  if (esv_ena) {
76  event = ev_hdr->event;
77  if (is_extev)
78  event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH);
79  }
80 
81  return event;
82  }
83 
84  /*
85  * ODP pkt from outside of EM - not allocated by EM & needs init
86  */
87  odp_packet_user_flag_set(odp_pkt, USER_FLAG_SET);
89  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
90  ev_hdr->user_area.all = 0; /* uarea fields init when used */
91 
92  if (!esv_ena) {
93  ev_hdr->flags.all = 0;
94  ev_hdr->event = event;
95  return event;
96  }
97 
98  /*
99  * ESV enabled:
100  */
101  if (!em_shm->opt.esv.prealloc_pools || ev_hdr->flags.refs_used) {
102  /* No prealloc OR pkt was a ref before being freed into the pool */
103  event = evstate_init(event, ev_hdr, is_extev);
104  } else {
105  /* esv.prealloc_pools == true: */
106  odp_pool_t odp_pool = odp_packet_pool(odp_pkt);
107  em_pool_t pool = pool_odp2em(odp_pool);
108 
109  if (pool == EM_POOL_UNDEF) {
110  /* External odp pkt originates from an ODP-pool */
111  event = evstate_init(event, ev_hdr, is_extev);
112  } else {
113  /* External odp pkt originates from an EM-pool */
114  event = evstate_update(event, ev_hdr, is_extev);
115  }
116  }
117  ev_hdr->flags.all = 0; /* clear only after evstate_...() */
118 
119  return event;
120 }
121 
122 /**
123  * Initialize the event headers of packets allocated outside of EM.
124  */
125 static inline void
126 evhdr_init_pkt_multi(event_hdr_t *const ev_hdrs[],
127  em_event_t events[/*in,out*/],
128  const odp_packet_t odp_pkts[/*in*/],
129  const int num, bool is_extev)
130 {
131  const bool esv_ena = esv_enabled();
132  int user_flag_set;
133 
134  int needs_init_idx[num];
135  int needs_init_num = 0;
136  int idx;
137 
138  for (int i = 0; i < num; i++) {
139  user_flag_set = odp_packet_user_flag(odp_pkts[i]);
140  if (user_flag_set) {
141  /* Event already initialized by EM */
142  if (esv_ena) {
143  events[i] = ev_hdrs[i]->event;
144  if (is_extev)
145  events[i] = evstate_em2usr(events[i], ev_hdrs[i],
146  EVSTATE__DISPATCH_MULTI);
147  }
148  /* else events[i] = events[i] */
149  } else {
150  odp_packet_user_flag_set(odp_pkts[i], USER_FLAG_SET);
151  needs_init_idx[needs_init_num] = i;
152  needs_init_num++;
153  }
154  }
155 
156  if (needs_init_num == 0)
157  return;
158 
159  /*
160  * ODP pkt from outside of EM - not allocated by EM & needs init
161  */
162 
163  if (!esv_ena) {
164  for (int i = 0; i < needs_init_num; i++) {
165  idx = needs_init_idx[i];
166  ev_hdrs[idx]->flags.all = 0;
167  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET;
168  ev_hdrs[idx]->event = events[idx];
169  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
170  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
171  }
172 
173  return;
174  }
175 
176  /*
177  * ESV enabled:
178  */
179  if (!em_shm->opt.esv.prealloc_pools) {
180  for (int i = 0; i < needs_init_num; i++) {
181  idx = needs_init_idx[i];
182  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
183  }
184  } else {
185  /* em_shm->opt.esv.prealloc_pools == true */
186  for (int i = 0; i < needs_init_num; i++) {
187  idx = needs_init_idx[i];
188 
189  odp_pool_t odp_pool = odp_packet_pool(odp_pkts[idx]);
190  em_pool_t pool = pool_odp2em(odp_pool);
191 
192  if (pool == EM_POOL_UNDEF || ev_hdrs[idx]->flags.refs_used) {
193  /*
194  * External odp pkt originates from an ODP-pool,
195  * or pkt was a ref before being freed into the pool.
196  */
197  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
198  } else {
199  /* External odp pkt originates from an EM-pool */
200  events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev);
201  }
202  }
203  }
204 
205  for (int i = 0; i < needs_init_num; i++) {
206  idx = needs_init_idx[i];
207  ev_hdrs[idx]->flags.all = 0; /* clear only after evstate_...() */
208  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET;
209  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
210  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
211  }
212 }
213 
214 /**
215  * Initialize the event header of a packet vector allocated outside of EM.
216  */
217 static inline em_event_t
218 evhdr_init_pktvec(event_hdr_t *ev_hdr, em_event_t event,
219  odp_packet_vector_t odp_pktvec, bool is_extev)
220 {
221  const int user_flag = odp_packet_vector_user_flag(odp_pktvec);
222  const bool esv_ena = esv_enabled();
223 
224  if (user_flag == USER_FLAG_SET) {
225  /* Event already initialized by EM */
226  if (esv_ena) {
227  event = ev_hdr->event;
228  if (is_extev)
229  event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH);
230  }
231 
232  return event;
233  }
234 
235  /*
236  * ODP pkt from outside of EM - not allocated by EM & needs init
237  */
238  odp_packet_vector_user_flag_set(odp_pktvec, USER_FLAG_SET);
239  /* can clear flags before evstate_...(), flags.refs_used not set for vecs */
240  ev_hdr->flags.all = 0;
242  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
243  ev_hdr->user_area.all = 0; /* uarea fields init when used */
244 
245  if (!esv_ena) {
246  ev_hdr->event = event;
247  return event;
248  }
249 
250  /*
251  * ESV enabled:
252  */
253  if (!em_shm->opt.esv.prealloc_pools) {
254  event = evstate_init(event, ev_hdr, is_extev);
255  } else {
256  /* esv.prealloc_pools == true: */
257  odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvec);
258  em_pool_t pool = pool_odp2em(odp_pool);
259 
260  if (pool == EM_POOL_UNDEF) {
261  /* External odp pkt originates from an ODP-pool */
262  event = evstate_init(event, ev_hdr, is_extev);
263  } else {
264  /* External odp pkt originates from an EM-pool */
265  event = evstate_update(event, ev_hdr, is_extev);
266  if (is_extev)
267  return event;
268  }
269  }
270 
271  return event;
272 }
273 
274 /**
275  * Initialize the event headers of packet vectors allocated outside of EM.
276  */
277 static inline void
278 evhdr_init_pktvec_multi(event_hdr_t *ev_hdrs[/*out*/],
279  em_event_t events[/*in,out*/],
280  const odp_packet_vector_t odp_pktvecs[/*in*/],
281  const int num, bool is_extev)
282 {
283  const bool esv_ena = esv_enabled();
284 
285  int needs_init_idx[num];
286  int needs_init_num = 0;
287  int idx;
288 
289  for (int i = 0; i < num; i++) {
290  int user_flag = odp_packet_vector_user_flag(odp_pktvecs[i]);
291 
292  if (user_flag == USER_FLAG_SET) {
293  /* Event already initialized by EM */
294  if (esv_ena) {
295  events[i] = ev_hdrs[i]->event;
296  if (is_extev)
297  events[i] = evstate_em2usr(events[i], ev_hdrs[i],
298  EVSTATE__DISPATCH_MULTI);
299  }
300  /* else events[i] = events[i] */
301  } else {
302  odp_packet_vector_user_flag_set(odp_pktvecs[i], USER_FLAG_SET);
303  needs_init_idx[needs_init_num] = i;
304  needs_init_num++;
305  }
306  }
307 
308  if (needs_init_num == 0)
309  return;
310 
311  /*
312  * ODP pkt vector from outside of EM - not allocated by EM & needs init
313  */
314 
315  if (!esv_ena) {
316  for (int i = 0; i < needs_init_num; i++) {
317  idx = needs_init_idx[i];
318  ev_hdrs[idx]->flags.all = 0;
319  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_VECTOR;
320  ev_hdrs[idx]->event = events[idx];
321  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
322  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
323  }
324 
325  return;
326  }
327 
328  /*
329  * ESV enabled:
330  */
331  for (int i = 0; i < needs_init_num; i++) {
332  idx = needs_init_idx[i];
333  /* can clear flags before evstate_...(), flags.refs_used not set for vecs */
334  ev_hdrs[idx]->flags.all = 0;
335  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_VECTOR;
336  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
337  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
338  }
339 
340  if (!em_shm->opt.esv.prealloc_pools) {
341  for (int i = 0; i < needs_init_num; i++) {
342  idx = needs_init_idx[i];
343  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
344  }
345 
346  return;
347  }
348 
349  /*
350  * em_shm->opt.esv.prealloc_pools == true
351  */
352  for (int i = 0; i < needs_init_num; i++) {
353  idx = needs_init_idx[i];
354 
355  odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvecs[idx]);
356  em_pool_t pool = pool_odp2em(odp_pool);
357 
358  if (pool == EM_POOL_UNDEF) {
359  /* External odp pkt originates from an ODP-pool */
360  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
361  } else {
362  /* External odp pkt originates from an EM-pool */
363  events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev);
364  }
365  }
366 }
367 
368 /**
369  * Initialize an external ODP event that have been input into EM.
370  *
371  * Initialize the event header if needed, i.e. if event originated from outside
372  * of EM from pktio or other input and was not allocated by EM via em_alloc().
373  * The odp pkt-user-ptr is used to determine whether the header has been
374  * initialized or not.
375  */
376 static inline em_event_t
377 event_init_odp(odp_event_t odp_event, bool is_extev, event_hdr_t **ev_hdr__out)
378 {
379  const odp_event_type_t odp_type = odp_event_type(odp_event);
380  em_event_t event = event_odp2em(odp_event); /* return value, updated by ESV */
381 
382  switch (odp_type) {
383  case ODP_EVENT_PACKET: {
384  odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
385  event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt);
386 
387  /* init event-hdr if needed (also ESV-state if used) */
388  event = evhdr_init_pkt(ev_hdr, event, odp_pkt, is_extev);
389  if (ev_hdr__out)
390  *ev_hdr__out = ev_hdr;
391  return event;
392  }
393  case ODP_EVENT_BUFFER: {
394  const bool esv_ena = esv_enabled();
395 
396  if (!ev_hdr__out && !esv_ena)
397  return event;
398 
399  odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
400  event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
401 
402  if (esv_ena) { /* update event handle (ESV) */
403  event = ev_hdr->event;
404  if (is_extev)
405  event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH);
406  }
407  if (ev_hdr__out)
408  *ev_hdr__out = ev_hdr;
409  return event;
410  }
411  case ODP_EVENT_PACKET_VECTOR: {
412  odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event);
413  event_hdr_t *ev_hdr = odp_packet_vector_user_area(odp_pktvec);
414 
415  /* init event-hdr if needed (also ESV-state if used) */
416  event = evhdr_init_pktvec(ev_hdr, event, odp_pktvec, is_extev);
417  if (ev_hdr__out)
418  *ev_hdr__out = ev_hdr;
419  return event;
420  }
421  case ODP_EVENT_TIMEOUT: {
422  odp_timeout_t odp_tmo = odp_timeout_from_event(odp_event);
423  event_hdr_t *ev_hdr = odp_timeout_user_area(odp_tmo);
424  const bool esv_ena = esv_enabled();
425 
426  if (esv_ena) {
427  /*
428  * Update event handle, no other ESV checks done.
429  * Some timers might send a copy of the original event
430  * in tear-down, thus keep ptr but update evgen.
431  */
432  evhdl_t evhdl = {.event = event}; /* .evptr from here */
433  evhdl_t evhdr_hdl = {.event = ev_hdr->event}; /* .evgen from here */
434 
435  evhdl.evgen = evhdr_hdl.evgen; /* update .evgen */
436  ev_hdr->event = evhdl.event; /* store updated hdl in hdr */
437  event = evhdl.event; /* return updated event */
438  }
439 
440  if (ev_hdr__out)
441  *ev_hdr__out = ev_hdr;
442  return event;
443  }
444  default:
446  EM_ESCOPE_EVENT_INIT_ODP,
447  "Unexpected odp event type:%u", odp_type);
448  __builtin_unreachable();
449  /* never reached */
450  return EM_EVENT_UNDEF;
451  }
452 }
453 
454 /* Helper to event_init_odp_multi() */
455 static inline void
456 event_init_pkt_multi(const odp_packet_t odp_pkts[/*in*/],
457  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
458  const int num, bool is_extev)
459 {
460  for (int i = 0; i < num; i++)
461  ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
462 
463  evhdr_init_pkt_multi(ev_hdrs, events, odp_pkts, num, is_extev);
464 }
465 
466 /* Helper to event_init_odp_multi() */
467 static inline void
468 event_init_buf_multi(const odp_buffer_t odp_bufs[/*in*/],
469  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
470  const int num, bool is_extev)
471 {
472  for (int i = 0; i < num; i++)
473  ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
474 
475  if (esv_enabled()) {
476  /* update event handle (ESV) */
477  for (int i = 0; i < num; i++)
478  events[i] = ev_hdrs[i]->event;
479 
480  if (is_extev)
481  evstate_em2usr_multi(events, ev_hdrs, num,
482  EVSTATE__DISPATCH_MULTI);
483  }
484 }
485 
486 /* Helper to event_init_odp_multi() */
487 static inline void
488 event_init_tmo_multi(const odp_timeout_t odp_tmos[/*in*/],
489  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
490  const int num)
491 {
492  for (int i = 0; i < num; i++)
493  ev_hdrs[i] = odp_timeout_user_area(odp_tmos[i]);
494 
495  /* ignore ESV */
496  (void)events;
497 }
498 
499 /* Helper to event_init_odp_multi() */
500 static inline void
501 event_init_pktvec_multi(const odp_packet_vector_t odp_pktvecs[/*in*/],
502  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
503  const int num, bool is_extev)
504 {
505  for (int i = 0; i < num; i++)
506  ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
507 
508  evhdr_init_pktvec_multi(ev_hdrs, events, odp_pktvecs, num, is_extev);
509 }
510 
511 /**
512  * Convert from EM events to event headers and initialize the headers as needed.
513  *
514  * Initialize the event header if needed, i.e. if event originated from outside
515  * of EM from pktio or other input and was not allocated by EM via em_alloc().
516  * The odp pkt-user-ptr is used to determine whether the header has been
517  * initialized or not.
518  */
519 static inline void
520 event_init_odp_multi(const odp_event_t odp_events[/*in*/],
521  em_event_t events[/*out*/], event_hdr_t *ev_hdrs[/*out*/],
522  const int num, bool is_extev)
523 {
524  for (int i = 0; i < num; i++)
525  events[i] = event_init_odp(odp_events[i], is_extev, &ev_hdrs[i]);
526 }
527 
528 /**
529  * Allocate an event based on an odp-buf.
530  */
531 static inline event_hdr_t *
532 event_alloc_buf(const mpool_elem_t *const pool_elem, uint32_t size)
533 {
534  odp_buffer_t odp_buf = ODP_BUFFER_INVALID;
535  int subpool;
536 
537  /*
538  * Allocate from the 'best fit' subpool, or if that is full, from the
539  * next subpool that has buffers available of a bigger size.
540  */
541  subpool = pool_find_subpool(pool_elem, size);
542  if (unlikely(subpool < 0))
543  return NULL;
544 
545  for (; subpool < pool_elem->num_subpools; subpool++) {
546  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
547 
548  if (EM_CHECK_LEVEL >= 3 &&
549  unlikely(odp_pool == ODP_POOL_INVALID))
550  return NULL;
551 
552  odp_buf = odp_buffer_alloc(odp_pool);
553  if (likely(odp_buf != ODP_BUFFER_INVALID))
554  break;
555  }
556 
557  if (unlikely(odp_buf == ODP_BUFFER_INVALID))
558  return NULL;
559 
560  /*
561  * odp buffer now allocated - init the EM event header
562  * in the odp user area.
563  */
564  event_hdr_t *const ev_hdr = odp_buffer_user_area(odp_buf);
565  odp_event_t odp_event = odp_buffer_to_event(odp_buf);
566  em_event_t event = event_odp2em(odp_event);
567 
568  ev_hdr->event = event; /* store this event handle */
569  ev_hdr->align_offset = pool_elem->align_offset;
570 
571  /* init common ev_hdr fields in the caller */
572 
573  return ev_hdr;
574 }
575 
576 /**
577  * Allocate & initialize multiple events based on odp-bufs.
578  */
579 static inline int
580 event_alloc_buf_multi(em_event_t events[/*out*/], const int num,
581  const mpool_elem_t *pool_elem, uint32_t size,
582  em_event_type_t type)
583 {
584  odp_buffer_t odp_bufs[num];
585  odp_event_t odp_event;
586  event_hdr_t *ev_hdrs[num];
587  int subpool;
588  const bool esv_ena = esv_enabled();
589 
590  /*
591  * Allocate from the 'best fit' subpool, or if that is full, from the
592  * next subpool that has buffers available of a bigger size.
593  */
594  subpool = pool_find_subpool(pool_elem, size);
595  if (unlikely(subpool < 0))
596  return 0;
597 
598  int num_req = num;
599  int num_bufs = 0;
600  int i;
601 
602  for (; subpool < pool_elem->num_subpools; subpool++) {
603  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
604 
605  if (EM_CHECK_LEVEL >= 3 &&
606  unlikely(odp_pool == ODP_POOL_INVALID))
607  return 0;
608 
609  int ret = odp_buffer_alloc_multi(odp_pool, &odp_bufs[num_bufs],
610  num_req);
611  if (unlikely(ret <= 0))
612  continue; /* try next subpool */
613 
614  /* store the allocated events[] */
615  for (i = num_bufs; i < num_bufs + ret; i++) {
616  odp_event = odp_buffer_to_event(odp_bufs[i]);
617  events[i] = event_odp2em(odp_event);
618  }
619 
620  /* Init 'ret' ev-hdrs from this 'subpool'=='odp-pool' */
621  for (i = num_bufs; i < num_bufs + ret; i++)
622  ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
623 
624  if (esv_ena) {
625  /* reads ev_hdrs[i]->flags if prealloc_pools used */
626  evstate_alloc_multi(&events[num_bufs] /*in/out*/,
627  &ev_hdrs[num_bufs], ret);
628  }
629 
630  for (i = num_bufs; i < num_bufs + ret; i++) {
631  ev_hdrs[i]->flags.all = 0;
632  ev_hdrs[i]->event_type = type;
633  if (!esv_ena)
634  ev_hdrs[i]->event = events[i];
635  ev_hdrs[i]->event_size = size;
636  ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF;
637 
638  ev_hdrs[i]->user_area.all = 0;
639  ev_hdrs[i]->user_area.size = pool_elem->user_area.size;
640  ev_hdrs[i]->user_area.isinit = 1;
641 
642  ev_hdrs[i]->align_offset = pool_elem->align_offset;
643  }
644 
645  num_bufs += ret;
646  if (likely(num_bufs == num))
647  break; /* all allocated */
648  num_req -= ret;
649  }
650 
651  return num_bufs; /* number of allocated bufs (0 ... num) */
652 }
653 
654 /**
655  * Allocate & initialize an event based on an odp-pkt.
656  */
657 static inline event_hdr_t *
658 event_alloc_pkt(const mpool_elem_t *pool_elem, uint32_t size)
659 {
660  const uint32_t push_len = pool_elem->align_offset;
661  uint32_t pull_len;
662  uint32_t alloc_size;
663  odp_packet_t odp_pkt = ODP_PACKET_INVALID;
664  int subpool;
665 
666  if (size > push_len) {
667  alloc_size = size - push_len;
668  pull_len = 0;
669  } else {
670  alloc_size = 1; /* min allowed */
671  pull_len = push_len + 1 - size;
672  }
673 
674  /*
675  * Allocate from the 'best fit' subpool, or if that is full, from the
676  * next subpool that has pkts available of a bigger size.
677  */
678  subpool = pool_find_subpool(pool_elem, size);
679  if (unlikely(subpool < 0))
680  return NULL;
681 
682  for (; subpool < pool_elem->num_subpools; subpool++) {
683  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
684 
685  if (EM_CHECK_LEVEL >= 3 &&
686  unlikely(odp_pool == ODP_POOL_INVALID))
687  return NULL;
688 
689  odp_pkt = odp_packet_alloc(odp_pool, alloc_size);
690  if (likely(odp_pkt != ODP_PACKET_INVALID))
691  break;
692  }
693 
694  if (unlikely(odp_pkt == ODP_PACKET_INVALID))
695  return NULL;
696 
697  /*
698  * odp packet now allocated - adjust the payload start address and
699  * init the EM event header in the odp-pkt user-area
700  */
701 
702  /* Adjust event payload start-address based on alignment config */
703  const void *ptr;
704 
705  if (push_len) {
706  ptr = odp_packet_push_head(odp_pkt, push_len);
707  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
708  goto err_pktalloc;
709  }
710  if (pull_len) {
711  ptr = odp_packet_pull_tail(odp_pkt, pull_len);
712  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
713  goto err_pktalloc;
714  }
715 
716  /*
717  * Set the pkt user ptr to be able to recognize pkt-events that
718  * EM has created vs pkts from pkt-input that needs their
719  * ev-hdrs to be initialized.
720  */
721  odp_packet_user_flag_set(odp_pkt, USER_FLAG_SET);
722 
723  event_hdr_t *const ev_hdr = odp_packet_user_area(odp_pkt);
724  odp_event_t odp_event = odp_packet_to_event(odp_pkt);
725  em_event_t event = event_odp2em(odp_event);
726 
727  if (EM_CHECK_LEVEL >= 3 && unlikely(ev_hdr == NULL))
728  goto err_pktalloc;
729 
730  /* store this event handle */
731  ev_hdr->event = event;
732 
733  /* init common ev_hdr fields in the caller */
734 
735  return ev_hdr;
736 
737 err_pktalloc:
738  odp_packet_free(odp_pkt);
739  return NULL;
740 }
741 
742 /*
743  * Helper for event_alloc_pkt_multi()
744  */
745 static inline int
746 pktalloc_multi(odp_packet_t odp_pkts[/*out*/], int num,
747  odp_pool_t odp_pool, uint32_t size,
748  uint32_t push_len, uint32_t pull_len)
749 {
750  int ret = odp_packet_alloc_multi(odp_pool, size, odp_pkts, num);
751 
752  if (unlikely(ret <= 0))
753  return 0;
754 
755  const int num_pkts = ret; /* return value > 0 */
756  const void *ptr = NULL;
757  int i;
758 
759  /* Adjust payload start-address based on alignment config */
760  if (push_len) {
761  for (i = 0; i < num_pkts; i++) {
762  ptr = odp_packet_push_head(odp_pkts[i], push_len);
763  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
764  goto err_pktalloc_multi;
765  }
766  }
767  if (pull_len) {
768  for (i = 0; i < num_pkts; i++) {
769  ptr = odp_packet_pull_tail(odp_pkts[i], pull_len);
770  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
771  goto err_pktalloc_multi; /* only before esv */
772  }
773  }
774 
775  /*
776  * Set the pkt user ptr to be able to recognize pkt-events that
777  * EM has created vs pkts from pkt-input that needs their
778  * ev-hdrs to be initialized.
779  */
780  for (i = 0; i < num_pkts; i++)
781  odp_packet_user_flag_set(odp_pkts[i], USER_FLAG_SET);
782 
783  return num_pkts;
784 
785 err_pktalloc_multi:
786  odp_packet_free_multi(odp_pkts, num_pkts);
787  return 0;
788 }
789 
790 /**
791  * Allocate & initialize multiple events based on odp-pkts.
792  */
793 static inline int
794 event_alloc_pkt_multi(em_event_t events[/*out*/], const int num,
795  const mpool_elem_t *pool_elem, uint32_t size,
796  em_event_type_t type)
797 {
798  const uint32_t push_len = pool_elem->align_offset;
799  uint32_t pull_len;
800  odp_packet_t odp_pkts[num];
801  /* use same output-array: odp_events[] = events[] */
802  odp_event_t *const odp_events = (odp_event_t *)events;
803  event_hdr_t *ev_hdrs[num];
804  uint32_t alloc_size;
805  int subpool;
806  const bool esv_ena = esv_enabled();
807 
808  if (size > push_len) {
809  alloc_size = size - push_len;
810  pull_len = 0;
811  } else {
812  alloc_size = 1; /* min allowed */
813  pull_len = push_len + 1 - size;
814  }
815 
816  /*
817  * Allocate from the 'best fit' subpool, or if that is full, from the
818  * next subpool that has pkts available of a bigger size.
819  */
820  subpool = pool_find_subpool(pool_elem, size);
821  if (unlikely(subpool < 0))
822  return 0;
823 
824  int num_req = num;
825  int num_pkts = 0;
826  int i;
827 
828  for (; subpool < pool_elem->num_subpools; subpool++) {
829  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
830 
831  if (EM_CHECK_LEVEL >= 3 &&
832  unlikely(odp_pool == ODP_POOL_INVALID))
833  return 0;
834 
835  int ret = pktalloc_multi(&odp_pkts[num_pkts], num_req,
836  odp_pool, alloc_size,
837  push_len, pull_len);
838  if (unlikely(ret <= 0))
839  continue; /* try next subpool */
840 
841  /*
842  * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'.
843  * Note: odp_events[] points&writes into events[out]
844  */
845  odp_packet_to_event_multi(&odp_pkts[num_pkts],
846  &odp_events[num_pkts], ret);
847 
848  for (i = num_pkts; i < num_pkts + ret; i++)
849  ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
850 
851  /*
852  * Note: events[] == odp_events[] before ESV init.
853  * Don't touch odp_events[] during this loop-round anymore.
854  */
855  if (esv_ena) {
856  /* reads ev_hdrs[i]->flags if prealloc_pools used */
857  evstate_alloc_multi(&events[num_pkts] /*in/out*/,
858  &ev_hdrs[num_pkts], ret);
859  }
860 
861  for (i = num_pkts; i < num_pkts + ret; i++) {
862  ev_hdrs[i]->flags.all = 0;
863  ev_hdrs[i]->event_type = type;
864  if (!esv_ena)
865  ev_hdrs[i]->event = events[i];
866  ev_hdrs[i]->event_size = size; /* original size */
867  ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF;
868 
869  ev_hdrs[i]->user_area.all = 0;
870  ev_hdrs[i]->user_area.size = pool_elem->user_area.size;
871  ev_hdrs[i]->user_area.isinit = 1;
872  /*ev_hdrs[i]->align_offset = needed by odp bufs only*/
873  }
874 
875  num_pkts += ret;
876  if (likely(num_pkts == num))
877  break; /* all allocated */
878  num_req -= ret;
879  }
880 
881  return num_pkts; /* number of allocated pkts */
882 }
883 
884 static inline event_hdr_t *
885 event_alloc_vector(const mpool_elem_t *pool_elem, uint32_t size)
886 {
887  odp_packet_vector_t odp_pktvec = ODP_PACKET_VECTOR_INVALID;
888  int subpool;
889 
890  /*
891  * Allocate from the 'best fit' subpool, or if that is full, from the
892  * next subpool that has pkts available of a bigger size.
893  */
894  subpool = pool_find_subpool(pool_elem, size);
895  if (unlikely(subpool < 0))
896  return NULL;
897 
898  for (; subpool < pool_elem->num_subpools; subpool++) {
899  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
900 
901  if (EM_CHECK_LEVEL >= 3 &&
902  unlikely(odp_pool == ODP_POOL_INVALID))
903  return NULL;
904 
905  odp_pktvec = odp_packet_vector_alloc(odp_pool);
906  if (likely(odp_pktvec != ODP_PACKET_VECTOR_INVALID))
907  break;
908  }
909 
910  if (unlikely(odp_pktvec == ODP_PACKET_VECTOR_INVALID))
911  return NULL;
912 
913  /*
914  * Packet vector now allocated:
915  * Init the EM event header in the odp-pkt-vector user-area.
916  */
917 
918  /*
919  * Set the pktvec user flag to be able to recognize vectors that
920  * EM has created vs. vectors from pkt-input that needs their
921  * ev-hdrs to be initialized.
922  */
923  odp_packet_vector_user_flag_set(odp_pktvec, USER_FLAG_SET);
924 
925  event_hdr_t *const ev_hdr = odp_packet_vector_user_area(odp_pktvec);
926  odp_event_t odp_event = odp_packet_vector_to_event(odp_pktvec);
927  em_event_t event = event_odp2em(odp_event);
928 
929  if (EM_CHECK_LEVEL >= 3 && unlikely(ev_hdr == NULL))
930  goto err_vecalloc;
931 
932  ev_hdr->event = event; /* store this event handle */
933 
934  /* init common ev_hdr fields in the caller */
935 
936  return ev_hdr;
937 
938 err_vecalloc:
939  odp_packet_vector_free(odp_pktvec);
940  return NULL;
941 }
942 
943 /*
944  * Helper for event_alloc_vec_multi()
945  */
946 static inline int
947 vecalloc_multi(odp_packet_vector_t odp_pktvecs[/*out*/], int num,
948  odp_pool_t odp_pool)
949 {
950  int i;
951 
952  for (i = 0; i < num; i++) {
953  odp_pktvecs[i] = odp_packet_vector_alloc(odp_pool);
954  if (unlikely(odp_pktvecs[i] == ODP_PACKET_VECTOR_INVALID))
955  break;
956  }
957 
958  const int num_vecs = i;
959 
960  if (unlikely(num_vecs == 0))
961  return 0;
962 
963  /*
964  * Set the pkt vector user ptr to be able to recognize vector-events
965  * that EM has created vs vectors from pkt-input that needs their
966  * ev-hdrs to be initialized.
967  */
968  for (i = 0; i < num_vecs; i++)
969  odp_packet_vector_user_flag_set(odp_pktvecs[i], USER_FLAG_SET);
970 
971  return num_vecs;
972 }
973 
974 /**
975  * Allocate & initialize multiple events based on odp-pkt-vectors.
976  */
977 static inline int
978 event_alloc_vector_multi(em_event_t events[/*out*/], const int num,
979  const mpool_elem_t *pool_elem, uint32_t size,
980  em_event_type_t type)
981 {
982  odp_packet_vector_t odp_pktvecs[num];
983  /* use same output-array: odp_events[] = events[] */
984  odp_event_t *const odp_events = (odp_event_t *)events;
985  event_hdr_t *ev_hdrs[num];
986  int subpool;
987  const bool esv_ena = esv_enabled();
988 
989  /*
990  * Allocate from the 'best fit' subpool, or if that is full, from the
991  * next subpool that has pkts available of a bigger size.
992  */
993  subpool = pool_find_subpool(pool_elem, size);
994  if (unlikely(subpool < 0))
995  return 0;
996 
997  int num_req = num;
998  int num_vecs = 0;
999  int i;
1000 
1001  for (; subpool < pool_elem->num_subpools; subpool++) {
1002  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
1003 
1004  if (EM_CHECK_LEVEL >= 3 &&
1005  unlikely(odp_pool == ODP_POOL_INVALID))
1006  return 0;
1007 
1008  int ret = vecalloc_multi(&odp_pktvecs[num_vecs], num_req,
1009  odp_pool);
1010  if (unlikely(ret <= 0))
1011  continue; /* try next subpool */
1012 
1013  /*
1014  * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'.
1015  * Note: odp_events[] points&writes into events[out]
1016  */
1017  for (i = num_vecs; i < num_vecs + ret; i++) {
1018  odp_events[i] = odp_packet_vector_to_event(odp_pktvecs[i]);
1019  ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
1020  }
1021 
1022  /*
1023  * Note: events[] == odp_events[] before ESV init.
1024  * Don't touch odp_events[] during this loop-round anymore.
1025  */
1026  if (esv_ena) {
1027  /* reads ev_hdrs[i]->flags if prealloc_pools used */
1028  evstate_alloc_multi(&events[num_vecs] /*in/out*/,
1029  &ev_hdrs[num_vecs], ret);
1030  }
1031 
1032  for (i = num_vecs; i < num_vecs + ret; i++) {
1033  ev_hdrs[i]->flags.all = 0;
1034  ev_hdrs[i]->event_type = type;
1035  if (!esv_ena)
1036  ev_hdrs[i]->event = events[i];
1037  ev_hdrs[i]->event_size = size; /* original vec size */
1038  ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF;
1039 
1040  ev_hdrs[i]->user_area.all = 0;
1041  ev_hdrs[i]->user_area.size = pool_elem->user_area.size;
1042  ev_hdrs[i]->user_area.isinit = 1;
1043  /*ev_hdrs[i]->align_offset = needed by odp bufs only*/
1044  }
1045 
1046  num_vecs += ret;
1047  if (likely(num_vecs == num))
1048  break; /* all allocated */
1049  num_req -= ret;
1050  }
1051 
1052  return num_vecs; /* number of allocated pkts */
1053 }
1054 
1055 /**
1056  * Helper for em_alloc() and em_event_clone()
1057  */
1058 static inline em_event_t
1059 event_alloc(const mpool_elem_t *pool_elem, uint32_t size, em_event_type_t type,
1060  const uint16_t api_op)
1061 {
1062  /*
1063  * EM event pools created with type=PKT can support:
1064  * - SW events (bufs)
1065  * - pkt events.
1066  *
1067  * EM event pools created with type=SW can support:
1068  * - SW events (bufs) only
1069  */
1070  event_hdr_t *ev_hdr = NULL;
1071 
1072  if (pool_elem->event_type == EM_EVENT_TYPE_PACKET)
1073  ev_hdr = event_alloc_pkt(pool_elem, size);
1074  else if (pool_elem->event_type == EM_EVENT_TYPE_SW)
1075  ev_hdr = event_alloc_buf(pool_elem, size);
1076  else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR)
1077  ev_hdr = event_alloc_vector(pool_elem, size);
1078 
1079  if (unlikely(!ev_hdr))
1080  return EM_EVENT_UNDEF;
1081 
1082  /*
1083  * event now allocated:
1084  * ev_hdr->event = stored by event_alloc_pkt/buf/vector()
1085  */
1086  /* Update event ESV state for alloc */
1087  if (esv_enabled())
1088  (void)evstate_alloc(ev_hdr->event, ev_hdr, api_op);
1089 
1090  ev_hdr->flags.all = 0; /* clear only after evstate_alloc() */
1091  ev_hdr->event_type = type; /* store the event type */
1092  ev_hdr->event_size = size; /* store requested size */
1093  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
1094 
1095  ev_hdr->user_area.all = 0;
1096  ev_hdr->user_area.size = pool_elem->user_area.size;
1097  ev_hdr->user_area.isinit = 1;
1098  /* ev_hdr->align_offset = init by event_alloc_buf() when needed */
1099 
1100  return ev_hdr->event;
1101 }
1102 
1103 /**
1104  * Start-up helper for pool preallocation
1105  */
1106 static inline event_prealloc_hdr_t *
1107 event_prealloc(const mpool_elem_t *pool_elem, uint32_t size)
1108 {
1109  /*
1110  * EM event pools created with type=PKT can support:
1111  * - SW events (bufs)
1112  * - pkt events.
1113  *
1114  * EM event pools created with type=SW can support:
1115  * - SW events (bufs) only
1116  */
1117  event_hdr_t *ev_hdr = NULL;
1118 
1119  if (pool_elem->event_type == EM_EVENT_TYPE_PACKET)
1120  ev_hdr = event_alloc_pkt(pool_elem, size);
1121  else if (pool_elem->event_type == EM_EVENT_TYPE_SW)
1122  ev_hdr = event_alloc_buf(pool_elem, size);
1123  else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR)
1124  ev_hdr = event_alloc_vector(pool_elem, size);
1125 
1126  if (unlikely(ev_hdr == NULL))
1127  return NULL;
1128 
1129  /* event now allocated */
1130 
1131  if (esv_enabled()) {
1132  em_event_t event = ev_hdr->event;
1133 
1134  (void)evstate_prealloc(event, ev_hdr);
1135  }
1136  ev_hdr->flags.all = 0; /* clear only after evstate_alloc() */
1137  ev_hdr->user_area.all = 0;
1138 
1139  event_prealloc_hdr_t *prealloc_hdr = (event_prealloc_hdr_t *)ev_hdr;
1140 
1141  return prealloc_hdr;
1142 }
1143 
1144 static inline event_prealloc_hdr_t *
1145 list_node_to_prealloc_hdr(list_node_t *const list_node)
1146 {
1147  event_prealloc_hdr_t *const ev_hdr = (event_prealloc_hdr_t *)(uintptr_t)
1148  ((uint8_t *)list_node - offsetof(event_prealloc_hdr_t, list_node));
1149 
1150  return likely(list_node != NULL) ? ev_hdr : NULL;
1151 }
1152 
1153 /**
1154  * @brief Convert event vector table content to odp packets in-place.
1155  *
1156  * Convert an EM event vector table, containing em_event_t:s with
1157  * esv-info (evgen), to a table of odp packets (remove handles' evgen in-place).
1158  */
1159 static inline void
1160 vector_tbl2odp(odp_event_t odp_event_pktvec)
1161 {
1162  odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1163  odp_packet_t *pkt_tbl = NULL;
1164  const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/);
1165 
1166  if (likely(pkts > 0)) {
1167  /* Careful! Points to same table */
1168  em_event_t *event_tbl = (em_event_t *)pkt_tbl;
1169 
1170  /* Drop ESV event generation (evgen) from event handle */
1171  (void)events_em2pkt_inplace(event_tbl, pkts);
1172  }
1173 }
1174 
1175 /**
1176  * @brief Convert ODP packet vector table content to EM events.
1177  *
1178  * Convert an ODP packet vector table to a table of EM events.
1179  * The content must be known to be raw odp packets.
1180  *
1181  * For recovery purposes only.
1182  */
1183 static inline void
1184 vector_tbl2em(odp_event_t odp_event_pktvec)
1185 {
1186  odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1187  odp_packet_t *pkt_tbl = NULL;
1188  const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/);
1189 
1190  if (likely(pkts > 0)) {
1191  em_event_t *const ev_tbl = (em_event_t *const)pkt_tbl;
1192  odp_packet_t odp_pkttbl[pkts];
1193  event_hdr_t *ev_hdr_tbl[pkts];
1194 
1195  /*
1196  * Copy pkts from vector's pkt-table using events_em2pkt() that
1197  * also drops any evgen-info from the handles if present.
1198  */
1199  events_em2pkt(ev_tbl/*in*/, odp_pkttbl/*out*/, pkts);
1200 
1201  event_init_pkt_multi(odp_pkttbl /*in*/, ev_tbl /*in,out*/,
1202  ev_hdr_tbl /*out*/, pkts, false);
1203  }
1204 }
1205 
1206 static inline em_status_t
1207 send_event(em_event_t event, const queue_elem_t *q_elem)
1208 {
1209  const bool esv_ena = esv_enabled();
1210  odp_event_t odp_event = event_em2odp(event);
1211  odp_queue_t odp_queue = q_elem->odp_queue;
1212  int ret;
1213 
1214  if (unlikely(EM_CHECK_LEVEL > 1 &&
1215  (odp_event == ODP_EVENT_INVALID ||
1216  odp_queue == ODP_QUEUE_INVALID)))
1217  return EM_ERR_NOT_FOUND;
1218 
1219  if (unlikely(EM_CHECK_LEVEL > 0 &&
1220  q_elem->state != EM_QUEUE_STATE_READY)) {
1221  return EM_ERR_BAD_STATE;
1222  }
1223 
1224  /*
1225  * Vector: convert the event vector table to a table of odp packets
1226  * (in-place) before passing the vector and contents to the scheduler.
1227  */
1228  if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1229  vector_tbl2odp(odp_event);
1230 
1231  /* Enqueue event for scheduling */
1232  ret = odp_queue_enq(odp_queue, odp_event);
1233 
1234  if (unlikely(EM_CHECK_LEVEL > 0 && ret != 0)) {
1235  /* Restore EM vector event-table before returning vector to user */
1236  if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1237  vector_tbl2em(odp_event);
1238 
1239  return EM_ERR_LIB_FAILED;
1240  }
1241 
1242  return EM_OK;
1243 }
1244 
1245 static inline int
1246 send_event_multi(const em_event_t events[], const int num,
1247  const queue_elem_t *q_elem)
1248 {
1249  const bool esv_ena = esv_enabled();
1250  odp_event_t odp_events[num];
1251  odp_queue_t odp_queue = q_elem->odp_queue;
1252 
1253  if (unlikely(EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID))
1254  return 0;
1255 
1256  if (unlikely(EM_CHECK_LEVEL > 0 &&
1257  q_elem->state != EM_QUEUE_STATE_READY)) {
1258  return 0;
1259  }
1260 
1261  events_em2odp(events, odp_events/*out*/, num);
1262 
1263  /*
1264  * Vector: convert the event vector table to a table of odp packets
1265  * (in-place) before passing the vector and contents to the scheduler.
1266  */
1267  if (esv_ena) {
1268  for (int i = 0; i < num; i++) {
1269  if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1270  vector_tbl2odp(odp_events[i]);
1271  }
1272  }
1273 
1274  /* Enqueue events for scheduling */
1275  int ret = odp_queue_enq_multi(odp_queue, odp_events, num);
1276 
1277  if (likely(ret == num))
1278  return num; /* Success! */
1279 
1280  /*
1281  * Fail: could not enqueue all events (ret != num)
1282  */
1283  int enq = ret < 0 ? 0 : ret;
1284 
1285  /* Restore EM vector event-table before returning vector to user */
1286  if (esv_ena) {
1287  for (int i = enq; i < num; i++) {
1288  if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1289  vector_tbl2em(odp_events[i]);
1290  }
1291  }
1292 
1293  return enq; /* enq < num */
1294 }
1295 
1296 static inline em_status_t
1297 send_local(em_event_t event, const queue_elem_t *q_elem)
1298 {
1299  em_locm_t *const locm = &em_locm;
1300  const em_queue_prio_t prio = q_elem->priority;
1301  evhdl_t evhdl = {.event = event};
1302  int ret;
1303 
1304  if (unlikely(EM_CHECK_LEVEL > 0 &&
1305  q_elem->state != EM_QUEUE_STATE_READY))
1306  return EM_ERR_BAD_STATE;
1307 
1308  em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->queue;
1309  stash_entry_t entry = {.qidx = queue_hdl2idx(queue),
1310  .evptr = evhdl.evptr};
1311 
1312  ret = odp_stash_put_u64(locm->local_queues.prio[prio].stash,
1313  &entry.u64, 1);
1314  if (likely(ret == 1)) {
1315  locm->local_queues.empty = 0;
1316  locm->local_queues.prio[prio].empty_prio = 0;
1317  return EM_OK;
1318  }
1319 
1320  return EM_ERR_LIB_FAILED;
1321 }
1322 
1323 static inline int
1324 send_local_multi(const em_event_t events[], const int num,
1325  const queue_elem_t *q_elem)
1326 {
1327  em_locm_t *const locm = &em_locm;
1328  const em_queue_prio_t prio = q_elem->priority;
1329  const evhdl_t *const evhdl_tbl = (const evhdl_t *const)events;
1330 
1331  if (unlikely(EM_CHECK_LEVEL > 0 &&
1332  q_elem->state != EM_QUEUE_STATE_READY))
1333  return 0;
1334 
1335  stash_entry_t entry_tbl[num];
1336  em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->queue;
1337  const uint16_t qidx = (uint16_t)queue_hdl2idx(queue);
1338 
1339  for (int i = 0; i < num; i++) {
1340  entry_tbl[i].qidx = qidx;
1341  entry_tbl[i].evptr = evhdl_tbl[i].evptr;
1342  }
1343 
1344  int ret = odp_stash_put_u64(locm->local_queues.prio[prio].stash,
1345  &entry_tbl[0].u64, num);
1346  if (likely(ret > 0)) {
1347  locm->local_queues.empty = 0;
1348  locm->local_queues.prio[prio].empty_prio = 0;
1349  return ret;
1350  }
1351 
1352  return 0;
1353 }
1354 
1355 /**
1356  * Send one event to a queue of type EM_QUEUE_TYPE_OUTPUT
1357  */
1358 static inline em_status_t
1359 send_output(em_event_t event, queue_elem_t *const output_q_elem)
1360 {
1361  const em_sched_context_type_t sched_ctx_type =
1363 
1364  if (unlikely(EM_CHECK_LEVEL > 0 &&
1365  output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED))
1366  return EM_ERR_BAD_STATE;
1367 
1368  /*
1369  * An event sent to an output queue from an ordered context needs to
1370  * be 're-ordered' before calling the user provided output-function.
1371  * Order is maintained by enqueuing and dequeuing into an odp-queue
1372  * that takes care of order.
1373  */
1374  if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) {
1375  const odp_queue_t odp_queue = output_q_elem->odp_queue;
1376  odp_event_t odp_event = event_em2odp(event);
1377  int ret;
1378 
1379  if (unlikely(EM_CHECK_LEVEL > 1 &&
1380  (odp_event == ODP_EVENT_INVALID ||
1381  odp_queue == ODP_QUEUE_INVALID)))
1382  return EM_ERR_NOT_FOUND;
1383 
1385  output_queue_track(output_q_elem);
1386 
1387  /* enqueue to enforce odp to handle ordering */
1388  ret = odp_queue_enq(odp_queue, odp_event);
1389  if (unlikely(ret != 0))
1390  return EM_ERR_LIB_FAILED;
1391 
1392  /* return value must be EM_OK after this since event enqueued */
1393 
1395  env_spinlock_t *const lock =
1396  &output_q_elem->output.lock;
1397 
1398  if (!env_spinlock_trylock(lock))
1399  return EM_OK;
1400  output_queue_drain(output_q_elem);
1401  env_spinlock_unlock(lock);
1402  }
1403 
1404  return EM_OK;
1405  }
1406 
1407  /*
1408  * No ordered context - call output_fn() directly
1409  */
1410  const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->queue;
1411  const em_output_func_t output_fn =
1412  output_q_elem->output.output_conf.output_fn;
1413  void *const output_fn_args =
1414  output_q_elem->output.output_conf.output_fn_args;
1415  int sent;
1416 
1417  sent = output_fn(&event, 1, output_queue, output_fn_args);
1418  if (unlikely(sent != 1))
1419  return EM_ERR_OPERATION_FAILED;
1420 
1421  return EM_OK;
1422 }
1423 
1424 /**
1425  * Send events to a queue of type EM_QUEUE_TYPE_OUTPUT
1426  */
1427 static inline int
1428 send_output_multi(const em_event_t events[], const unsigned int num,
1429  queue_elem_t *const output_q_elem)
1430 {
1431  const em_sched_context_type_t sched_ctx_type =
1433  int sent;
1434 
1435  if (unlikely(EM_CHECK_LEVEL > 0 &&
1436  output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED))
1437  return 0;
1438 
1439  /*
1440  * Event sent to an output queue from an ordered context needs to
1441  * be 're-ordered' before calling the user provided output-function.
1442  * Order is maintained by enqueuing and dequeuing into an odp-queue
1443  * that takes care of order.
1444  */
1445  if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) {
1446  const odp_queue_t odp_queue = output_q_elem->odp_queue;
1447  odp_event_t odp_events[num];
1448 
1449  if (unlikely(EM_CHECK_LEVEL > 1 &&
1450  odp_queue == ODP_QUEUE_INVALID))
1451  return 0;
1452 
1454  output_queue_track(output_q_elem);
1455 
1456  events_em2odp(events, odp_events/*out*/, num);
1457 
1458  /* enqueue to enforce odp to handle ordering */
1459  sent = odp_queue_enq_multi(odp_queue, odp_events, num);
1460  if (unlikely(sent <= 0))
1461  return 0;
1462 
1463  /* the return value must be the number of enqueued events */
1464 
1466  env_spinlock_t *const lock =
1467  &output_q_elem->output.lock;
1468 
1469  if (!env_spinlock_trylock(lock))
1470  return sent;
1471  output_queue_drain(output_q_elem);
1472  env_spinlock_unlock(lock);
1473  }
1474 
1475  return sent;
1476  }
1477 
1478  /*
1479  * No ordered context - call output_fn() directly
1480  */
1481  const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->queue;
1482  const em_output_func_t output_fn = output_q_elem->output.output_conf.output_fn;
1483  void *const output_fn_args = output_q_elem->output.output_conf.output_fn_args;
1484 
1485  sent = output_fn(events, num, output_queue, output_fn_args);
1486 
1487  return sent;
1488 }
1489 
1490 /**
1491  * Return a pointer to the EM event user payload.
1492  * Helper to e.g. EM API em_event_pointer()
1493  */
1494 static inline void *
1495 event_pointer(em_event_t event)
1496 {
1497  const odp_event_t odp_event = event_em2odp(event);
1498  const odp_event_type_t odp_etype = odp_event_type(odp_event);
1499  void *ev_ptr = NULL; /* return value */
1500 
1501  if (odp_etype == ODP_EVENT_PACKET) {
1502  const odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1503 
1504  ev_ptr = odp_packet_data(odp_pkt);
1505  } else if (odp_etype == ODP_EVENT_BUFFER) {
1506  const odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
1507  const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
1508  const uint32_t align_offset = ev_hdr->align_offset;
1509 
1510  ev_ptr = odp_buffer_addr(odp_buf);
1511 
1512  if (align_offset)
1513  ev_ptr = (void *)((uintptr_t)ev_ptr + 32 - align_offset);
1514  }
1515 
1516  return ev_ptr; /* NULL for unrecognized odp_etype, also for vectors */
1517 }
1518 
1519 static inline bool
1520 event_has_ref(em_event_t event)
1521 {
1522  odp_event_t odp_event = event_em2odp(event);
1523  odp_event_type_t odp_etype = odp_event_type(odp_event);
1524 
1525  if (odp_etype != ODP_EVENT_PACKET)
1526  return false;
1527 
1528  odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1529 
1530  return odp_packet_has_ref(odp_pkt) ? true : false;
1531 }
1532 
1533 #ifdef __cplusplus
1534 }
1535 #endif
1536 
1537 #endif /* EM_EVENT_H_ */
EM_TMO_TYPE_NONE
@ EM_TMO_TYPE_NONE
Definition: api/event_machine_timer.h:251
mpool_elem_t::size
uint16_t size
Definition: em_pool_types.h:55
EM_EVENT_TYPE_VECTOR
@ EM_EVENT_TYPE_VECTOR
Definition: event_machine_hw_types.h:84
EM_OK
#define EM_OK
Definition: event_machine_types.h:329
event_hdr::event_size
uint32_t event_size
Definition: em_event_types.h:255
EM_EVENT_TYPE_SW
@ EM_EVENT_TYPE_SW
Definition: event_machine_hw_types.h:72
ODP_PACKED::sched_context_type
em_sched_context_type_t sched_context_type
Definition: em_mem.h:170
em_locm_t::local_queues
local_queues_t local_queues
Definition: em_mem.h:222
mpool_elem_t::user_area
struct mpool_elem_t::@54 user_area
em_output_func_t
int(* em_output_func_t)(const em_event_t events[], const unsigned int num, const em_queue_t output_queue, void *output_fn_args)
Definition: event_machine_hw_types.h:387
EM_EVENT_UNDEF
#define EM_EVENT_UNDEF
Definition: event_machine_types.h:62
USER_FLAG_SET
#define USER_FLAG_SET
Definition: em_event_types.h:56
mpool_elem_t::odp_pool
odp_pool_t odp_pool[EM_MAX_SUBPOOLS]
Definition: em_pool_types.h:69
em_output_queue_conf_t::output_fn
em_output_func_t output_fn
Definition: event_machine_hw_types.h:402
event_hdr::event_type
em_event_type_t event_type
Definition: em_event_types.h:241
em_locm
ENV_LOCAL em_locm_t em_locm
em_output_queue_conf_t::output_fn_args
void * output_fn_args
Definition: event_machine_hw_types.h:406
stash_entry_t
Definition: em_event_types.h:86
em_locm_t::current
em_locm_current_t current
Definition: em_mem.h:190
EM_ERR_LIB_FAILED
@ EM_ERR_LIB_FAILED
Definition: event_machine_hw_types.h:291
event_hdr::refs_used
uint8_t refs_used
Definition: em_event_types.h:220
q_elem_output_::lock
env_spinlock_t lock
Definition: em_queue_types.h:174
evstate_init
em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, bool is_extev)
Definition: em_event_state.c:788
queue_elem_t::state
queue_state_t state
Definition: em_queue_types.h:210
evhdl_t
Definition: em_event_types.h:67
event_hdr::flags
union event_hdr::@34 flags
list_node_t
Definition: list.h:42
pkt_clone_odp
em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool, uint32_t offset, uint32_t size, bool is_clone_part)
Definition: em_event.c:105
event_hdr::event
em_event_t event
Definition: em_event_types.h:246
q_elem_output_::output_conf
em_output_queue_conf_t output_conf
Definition: em_queue_types.h:168
event_prealloc_hdr
Definition: em_event_types.h:301
event_hdr
Definition: em_event_types.h:184
evstate_update
em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, bool is_extev)
Definition: em_event_state.c:895
queue_elem_t::odp_queue
odp_queue_t odp_queue
Definition: em_queue_types.h:228
mpool_elem_t
Definition: em_pool_types.h:47
queue_elem_t::queue
uint32_t queue
Definition: em_queue_types.h:225
evstate_alloc
em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
Definition: em_event_state.c:738
INTERNAL_ERROR
#define INTERNAL_ERROR(error, escope, fmt,...)
Definition: em_error.h:43
EM_OUTPUT_QUEUE_IMMEDIATE
#define EM_OUTPUT_QUEUE_IMMEDIATE
Definition: event_machine_hw_config.h:261
event_hdr::align_offset
uint8_t align_offset
Definition: em_event_types.h:236
mpool_elem_t::event_type
em_event_type_t event_type
Definition: em_pool_types.h:49
mpool_elem_t::align_offset
uint32_t align_offset
Definition: em_pool_types.h:51
mpool_elem_t::num_subpools
int num_subpools
Definition: em_pool_types.h:65
event_hdr::user_area
ev_hdr_user_area_t user_area
Definition: em_event_types.h:277
em_escope_t
uint32_t em_escope_t
Definition: event_machine_types.h:348
em_status_t
uint32_t em_status_t
Definition: event_machine_types.h:321
EM_QUEUE_STATE_READY
@ EM_QUEUE_STATE_READY
Definition: em_queue_types.h:137
EM_SCHED_CONTEXT_TYPE_ORDERED
@ EM_SCHED_CONTEXT_TYPE_ORDERED
Definition: event_machine_types.h:289
EM_CHECK_LEVEL
#define EM_CHECK_LEVEL
Definition: event_machine_config.h:253
EM_ERR_OPERATION_FAILED
@ EM_ERR_OPERATION_FAILED
Definition: event_machine_hw_types.h:289
event_hdr::egrp
em_event_group_t egrp
Definition: em_event_types.h:265
queue_elem_t::priority
uint8_t priority
Definition: em_queue_types.h:213
evstate_em2usr
em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
Definition: em_event_state.c:952
evstate_prealloc
em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr)
Definition: em_event_state.c:733
em_event_type_t
uint32_t em_event_type_t
Definition: event_machine_types.h:85
em_shm
em_shm_t * em_shm
Definition: event_machine_init.c:41
evstate_alloc_multi
void evstate_alloc_multi(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], const int num)
Definition: em_event_state.c:755
EM_ERR_NOT_FOUND
@ EM_ERR_NOT_FOUND
Definition: event_machine_hw_types.h:278
evstate_em2usr_multi
void evstate_em2usr_multi(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], const int num, const uint16_t api_op)
Definition: em_event_state.c:970
EM_POOL_UNDEF
#define EM_POOL_UNDEF
Definition: event_machine_hw_types.h:60
EM_EVENT_TYPE_PACKET
@ EM_EVENT_TYPE_PACKET
Definition: event_machine_hw_types.h:75
em_queue_prio_t
uint32_t em_queue_prio_t
Definition: event_machine_types.h:186
EM_ERR_NOT_IMPLEMENTED
@ EM_ERR_NOT_IMPLEMENTED
Definition: event_machine_hw_types.h:282
em_locm_t
Definition: em_mem.h:188
ev_hdr_user_area_t::size
uint32_t size
Definition: em_event_types.h:170
EM_ERR_BAD_STATE
@ EM_ERR_BAD_STATE
Definition: event_machine_hw_types.h:263
EM_EVENT_GROUP_UNDEF
#define EM_EVENT_GROUP_UNDEF
Definition: event_machine_types.h:141
queue_elem_t
Definition: em_queue_types.h:180
em_sched_context_type_t
em_sched_context_type_t
Definition: event_machine_types.h:277
ev_hdr_user_area_t::isinit
uint32_t isinit
Definition: em_event_types.h:168