EM-ODP  3.8.0-1
Event Machine on ODP
em_event.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2023, Nokia Solutions and Networks
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * * Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  * * Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived
16  * from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31  /**
32  * @file
33  * EM internal event functions
34  *
35  */
36 
37 #ifndef EM_EVENT_H_
38 #define EM_EVENT_H_
39 
40 #ifdef __cplusplus
41 extern "C" {
42 #endif
43 
44 #ifndef __clang__
45 COMPILE_TIME_ASSERT((uintptr_t)EM_EVENT_UNDEF == (uintptr_t)ODP_EVENT_INVALID,
46  EM_EVENT_NOT_EQUAL_TO_ODP_EVENT);
47 COMPILE_TIME_ASSERT(EM_TMO_TYPE_NONE == 0,
48  "EM_TMO_TYPE_NONE must be 0");
49 #endif
50 
51 em_status_t event_init(void);
52 void print_event_info(void);
53 em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool,
54  uint32_t offset, uint32_t size,
55  bool clone_uarea, bool is_clone_part);
56 void output_queue_track(queue_elem_t *const output_q_elem);
57 void output_queue_drain(const queue_elem_t *output_q_elem);
58 void output_queue_buffering_drain(void);
59 
60 uint32_t event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl/*out*/);
61 em_status_t event_vector_max_size(em_event_t vector_event, uint32_t *max_size /*out*/,
62  em_escope_t escope);
63 
64 /**
65  * Initialize the event header of a packet allocated outside of EM.
66  */
67 static inline em_event_t
68 evhdr_init_pkt(event_hdr_t *ev_hdr, em_event_t event,
69  odp_packet_t odp_pkt, bool is_extev)
70 {
71  const int user_flag_set = odp_packet_user_flag(odp_pkt);
72  const bool esv_ena = esv_enabled();
73 
74  if (user_flag_set) {
75  /* Event already initialized by EM */
76  if (esv_ena) {
77  event = ev_hdr->event;
78  if (is_extev)
79  event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH);
80  }
81 
82  return event;
83  }
84 
85  /*
86  * ODP pkt from outside of EM - not allocated by EM & needs init
87  */
88  odp_packet_user_flag_set(odp_pkt, USER_FLAG_SET);
90  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
91  ev_hdr->user_area.all = 0; /* uarea fields init when used */
92 
93  if (!esv_ena) {
94  ev_hdr->flags.all = 0;
95  ev_hdr->event = event;
96  return event;
97  }
98 
99  /*
100  * ESV enabled:
101  */
102  if (!em_shm->opt.esv.prealloc_pools || ev_hdr->flags.refs_used) {
103  /* No prealloc OR pkt was a ref before being freed into the pool */
104  event = evstate_init(event, ev_hdr, is_extev);
105  } else {
106  /* esv.prealloc_pools == true: */
107  odp_pool_t odp_pool = odp_packet_pool(odp_pkt);
108  em_pool_t pool = pool_odp2em(odp_pool);
109 
110  if (pool == EM_POOL_UNDEF) {
111  /* External odp pkt originates from an ODP-pool */
112  event = evstate_init(event, ev_hdr, is_extev);
113  } else {
114  /* External odp pkt originates from an EM-pool */
115  event = evstate_update(event, ev_hdr, is_extev);
116  }
117  }
118  ev_hdr->flags.all = 0; /* clear only after evstate_...() */
119 
120  return event;
121 }
122 
123 /**
124  * Initialize the event headers of packets allocated outside of EM.
125  */
126 static inline void
127 evhdr_init_pkt_multi(event_hdr_t *const ev_hdrs[],
128  em_event_t events[/*in,out*/],
129  const odp_packet_t odp_pkts[/*in*/],
130  const int num, bool is_extev)
131 {
132  const bool esv_ena = esv_enabled();
133  int user_flag_set;
134 
135  int needs_init_idx[num];
136  int needs_init_num = 0;
137  int idx;
138 
139  for (int i = 0; i < num; i++) {
140  user_flag_set = odp_packet_user_flag(odp_pkts[i]);
141  if (user_flag_set) {
142  /* Event already initialized by EM */
143  if (esv_ena) {
144  events[i] = ev_hdrs[i]->event;
145  if (is_extev)
146  events[i] = evstate_em2usr(events[i], ev_hdrs[i],
147  EVSTATE__DISPATCH_MULTI);
148  }
149  /* else events[i] = events[i] */
150  } else {
151  odp_packet_user_flag_set(odp_pkts[i], USER_FLAG_SET);
152  needs_init_idx[needs_init_num] = i;
153  needs_init_num++;
154  }
155  }
156 
157  if (needs_init_num == 0)
158  return;
159 
160  /*
161  * ODP pkt from outside of EM - not allocated by EM & needs init
162  */
163 
164  if (!esv_ena) {
165  for (int i = 0; i < needs_init_num; i++) {
166  idx = needs_init_idx[i];
167  ev_hdrs[idx]->flags.all = 0;
168  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET;
169  ev_hdrs[idx]->event = events[idx];
170  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
171  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
172  }
173 
174  return;
175  }
176 
177  /*
178  * ESV enabled:
179  */
180  if (!em_shm->opt.esv.prealloc_pools) {
181  for (int i = 0; i < needs_init_num; i++) {
182  idx = needs_init_idx[i];
183  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
184  }
185  } else {
186  /* em_shm->opt.esv.prealloc_pools == true */
187  for (int i = 0; i < needs_init_num; i++) {
188  idx = needs_init_idx[i];
189 
190  odp_pool_t odp_pool = odp_packet_pool(odp_pkts[idx]);
191  em_pool_t pool = pool_odp2em(odp_pool);
192 
193  if (pool == EM_POOL_UNDEF || ev_hdrs[idx]->flags.refs_used) {
194  /*
195  * External odp pkt originates from an ODP-pool,
196  * or pkt was a ref before being freed into the pool.
197  */
198  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
199  } else {
200  /* External odp pkt originates from an EM-pool */
201  events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev);
202  }
203  }
204  }
205 
206  for (int i = 0; i < needs_init_num; i++) {
207  idx = needs_init_idx[i];
208  ev_hdrs[idx]->flags.all = 0; /* clear only after evstate_...() */
209  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET;
210  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
211  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
212  }
213 }
214 
215 /**
216  * Initialize the event header of a packet vector allocated outside of EM.
217  */
218 static inline em_event_t
219 evhdr_init_pktvec(event_hdr_t *ev_hdr, em_event_t event,
220  odp_packet_vector_t odp_pktvec, bool is_extev)
221 {
222  const int user_flag = odp_packet_vector_user_flag(odp_pktvec);
223  const bool esv_ena = esv_enabled();
224 
225  if (user_flag == USER_FLAG_SET) {
226  /* Event already initialized by EM */
227  if (esv_ena) {
228  event = ev_hdr->event;
229  if (is_extev)
230  event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH);
231  }
232 
233  return event;
234  }
235 
236  /*
237  * ODP pkt from outside of EM - not allocated by EM & needs init
238  */
239  odp_packet_vector_user_flag_set(odp_pktvec, USER_FLAG_SET);
240  /* can clear flags before evstate_...(), flags.refs_used not set for vecs */
241  ev_hdr->flags.all = 0;
243  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
244  ev_hdr->user_area.all = 0; /* uarea fields init when used */
245 
246  if (!esv_ena) {
247  ev_hdr->event = event;
248  return event;
249  }
250 
251  /*
252  * ESV enabled:
253  */
254  if (!em_shm->opt.esv.prealloc_pools) {
255  event = evstate_init(event, ev_hdr, is_extev);
256  } else {
257  /* esv.prealloc_pools == true: */
258  odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvec);
259  em_pool_t pool = pool_odp2em(odp_pool);
260 
261  if (pool == EM_POOL_UNDEF) {
262  /* External odp pkt originates from an ODP-pool */
263  event = evstate_init(event, ev_hdr, is_extev);
264  } else {
265  /* External odp pkt originates from an EM-pool */
266  event = evstate_update(event, ev_hdr, is_extev);
267  if (is_extev)
268  return event;
269  }
270  }
271 
272  return event;
273 }
274 
275 /**
276  * Initialize the event headers of packet vectors allocated outside of EM.
277  */
278 static inline void
279 evhdr_init_pktvec_multi(event_hdr_t *ev_hdrs[/*out*/],
280  em_event_t events[/*in,out*/],
281  const odp_packet_vector_t odp_pktvecs[/*in*/],
282  const int num, bool is_extev)
283 {
284  const bool esv_ena = esv_enabled();
285 
286  int needs_init_idx[num];
287  int needs_init_num = 0;
288  int idx;
289 
290  for (int i = 0; i < num; i++) {
291  int user_flag = odp_packet_vector_user_flag(odp_pktvecs[i]);
292 
293  if (user_flag == USER_FLAG_SET) {
294  /* Event already initialized by EM */
295  if (esv_ena) {
296  events[i] = ev_hdrs[i]->event;
297  if (is_extev)
298  events[i] = evstate_em2usr(events[i], ev_hdrs[i],
299  EVSTATE__DISPATCH_MULTI);
300  }
301  /* else events[i] = events[i] */
302  } else {
303  odp_packet_vector_user_flag_set(odp_pktvecs[i], USER_FLAG_SET);
304  needs_init_idx[needs_init_num] = i;
305  needs_init_num++;
306  }
307  }
308 
309  if (needs_init_num == 0)
310  return;
311 
312  /*
313  * ODP pkt vector from outside of EM - not allocated by EM & needs init
314  */
315 
316  if (!esv_ena) {
317  for (int i = 0; i < needs_init_num; i++) {
318  idx = needs_init_idx[i];
319  ev_hdrs[idx]->flags.all = 0;
320  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_VECTOR;
321  ev_hdrs[idx]->event = events[idx];
322  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
323  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
324  }
325 
326  return;
327  }
328 
329  /*
330  * ESV enabled:
331  */
332  for (int i = 0; i < needs_init_num; i++) {
333  idx = needs_init_idx[i];
334  /* can clear flags before evstate_...(), flags.refs_used not set for vecs */
335  ev_hdrs[idx]->flags.all = 0;
336  ev_hdrs[idx]->event_type = EM_EVENT_TYPE_VECTOR;
337  ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF;
338  ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */
339  }
340 
341  if (!em_shm->opt.esv.prealloc_pools) {
342  for (int i = 0; i < needs_init_num; i++) {
343  idx = needs_init_idx[i];
344  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
345  }
346 
347  return;
348  }
349 
350  /*
351  * em_shm->opt.esv.prealloc_pools == true
352  */
353  for (int i = 0; i < needs_init_num; i++) {
354  idx = needs_init_idx[i];
355 
356  odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvecs[idx]);
357  em_pool_t pool = pool_odp2em(odp_pool);
358 
359  if (pool == EM_POOL_UNDEF) {
360  /* External odp pkt originates from an ODP-pool */
361  events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev);
362  } else {
363  /* External odp pkt originates from an EM-pool */
364  events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev);
365  }
366  }
367 }
368 
369 /**
370  * Initialize an external ODP event that have been input into EM.
371  *
372  * Initialize the event header if needed, i.e. if event originated from outside
373  * of EM from pktio or other input and was not allocated by EM via em_alloc().
374  * The odp pkt-user-ptr is used to determine whether the header has been
375  * initialized or not.
376  */
377 static inline em_event_t
378 event_init_odp(odp_event_t odp_event, bool is_extev, event_hdr_t **ev_hdr__out)
379 {
380  const odp_event_type_t odp_type = odp_event_type(odp_event);
381  em_event_t event = event_odp2em(odp_event); /* return value, updated by ESV */
382 
383  switch (odp_type) {
384  case ODP_EVENT_PACKET: {
385  odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
386  event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt);
387 
388  /* init event-hdr if needed (also ESV-state if used) */
389  event = evhdr_init_pkt(ev_hdr, event, odp_pkt, is_extev);
390  if (ev_hdr__out)
391  *ev_hdr__out = ev_hdr;
392  return event;
393  }
394  case ODP_EVENT_BUFFER: {
395  const bool esv_ena = esv_enabled();
396 
397  if (!ev_hdr__out && !esv_ena)
398  return event;
399 
400  odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
401  event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
402 
403  if (esv_ena) { /* update event handle (ESV) */
404  event = ev_hdr->event;
405  if (is_extev)
406  event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH);
407  }
408  if (ev_hdr__out)
409  *ev_hdr__out = ev_hdr;
410  return event;
411  }
412  case ODP_EVENT_PACKET_VECTOR: {
413  odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event);
414  event_hdr_t *ev_hdr = odp_packet_vector_user_area(odp_pktvec);
415 
416  /* init event-hdr if needed (also ESV-state if used) */
417  event = evhdr_init_pktvec(ev_hdr, event, odp_pktvec, is_extev);
418  if (ev_hdr__out)
419  *ev_hdr__out = ev_hdr;
420  return event;
421  }
422  case ODP_EVENT_TIMEOUT: {
423  odp_timeout_t odp_tmo = odp_timeout_from_event(odp_event);
424  event_hdr_t *ev_hdr = odp_timeout_user_area(odp_tmo);
425  const bool esv_ena = esv_enabled();
426 
427  if (esv_ena) {
428  /*
429  * Update event handle, no other ESV checks done.
430  * Some timers might send a copy of the original event
431  * in tear-down, thus keep ptr but update evgen.
432  */
433  evhdl_t evhdl = {.event = event}; /* .evptr from here */
434  evhdl_t evhdr_hdl = {.event = ev_hdr->event}; /* .evgen from here */
435 
436  evhdl.evgen = evhdr_hdl.evgen; /* update .evgen */
437  ev_hdr->event = evhdl.event; /* store updated hdl in hdr */
438  event = evhdl.event; /* return updated event */
439  }
440 
441  if (ev_hdr__out)
442  *ev_hdr__out = ev_hdr;
443  return event;
444  }
445  default:
447  EM_ESCOPE_EVENT_INIT_ODP,
448  "Unexpected odp event type:%u", odp_type);
449  __builtin_unreachable();
450  /* never reached */
451  return EM_EVENT_UNDEF;
452  }
453 }
454 
455 /* Helper to event_init_odp_multi() */
456 static inline void
457 event_init_pkt_multi(const odp_packet_t odp_pkts[/*in*/],
458  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
459  const int num, bool is_extev)
460 {
461  for (int i = 0; i < num; i++)
462  ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
463 
464  evhdr_init_pkt_multi(ev_hdrs, events, odp_pkts, num, is_extev);
465 }
466 
467 /* Helper to event_init_odp_multi() */
468 static inline void
469 event_init_buf_multi(const odp_buffer_t odp_bufs[/*in*/],
470  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
471  const int num, bool is_extev)
472 {
473  for (int i = 0; i < num; i++)
474  ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
475 
476  if (esv_enabled()) {
477  /* update event handle (ESV) */
478  for (int i = 0; i < num; i++)
479  events[i] = ev_hdrs[i]->event;
480 
481  if (is_extev)
482  evstate_em2usr_multi(events, ev_hdrs, num,
483  EVSTATE__DISPATCH_MULTI);
484  }
485 }
486 
487 /* Helper to event_init_odp_multi() */
488 static inline void
489 event_init_tmo_multi(const odp_timeout_t odp_tmos[/*in*/],
490  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
491  const int num)
492 {
493  for (int i = 0; i < num; i++)
494  ev_hdrs[i] = odp_timeout_user_area(odp_tmos[i]);
495 
496  /* ignore ESV */
497  (void)events;
498 }
499 
500 /* Helper to event_init_odp_multi() */
501 static inline void
502 event_init_pktvec_multi(const odp_packet_vector_t odp_pktvecs[/*in*/],
503  em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/],
504  const int num, bool is_extev)
505 {
506  for (int i = 0; i < num; i++)
507  ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
508 
509  evhdr_init_pktvec_multi(ev_hdrs, events, odp_pktvecs, num, is_extev);
510 }
511 
512 /**
513  * Convert from EM events to event headers and initialize the headers as needed.
514  *
515  * Initialize the event header if needed, i.e. if event originated from outside
516  * of EM from pktio or other input and was not allocated by EM via em_alloc().
517  * The odp pkt-user-ptr is used to determine whether the header has been
518  * initialized or not.
519  */
520 static inline void
521 event_init_odp_multi(const odp_event_t odp_events[/*in*/],
522  em_event_t events[/*out*/], event_hdr_t *ev_hdrs[/*out*/],
523  const int num, bool is_extev)
524 {
525  for (int i = 0; i < num; i++)
526  events[i] = event_init_odp(odp_events[i], is_extev, &ev_hdrs[i]);
527 }
528 
529 /**
530  * Allocate an event based on an odp-buf.
531  */
532 static inline event_hdr_t *
533 event_alloc_buf(const mpool_elem_t *const pool_elem, uint32_t size)
534 {
535  odp_buffer_t odp_buf = ODP_BUFFER_INVALID;
536  int subpool;
537 
538  /*
539  * Allocate from the 'best fit' subpool, or if that is full, from the
540  * next subpool that has buffers available of a bigger size.
541  */
542  subpool = pool_find_subpool(pool_elem, size);
543  if (unlikely(subpool < 0))
544  return NULL;
545 
546  for (; subpool < pool_elem->num_subpools; subpool++) {
547  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
548 
549  if (EM_CHECK_LEVEL >= 3 &&
550  unlikely(odp_pool == ODP_POOL_INVALID))
551  return NULL;
552 
553  odp_buf = odp_buffer_alloc(odp_pool);
554  if (likely(odp_buf != ODP_BUFFER_INVALID))
555  break;
556  }
557 
558  if (unlikely(odp_buf == ODP_BUFFER_INVALID))
559  return NULL;
560 
561  /*
562  * odp buffer now allocated - init the EM event header
563  * in the odp user area.
564  */
565  event_hdr_t *const ev_hdr = odp_buffer_user_area(odp_buf);
566  odp_event_t odp_event = odp_buffer_to_event(odp_buf);
567  em_event_t event = event_odp2em(odp_event);
568 
569  ev_hdr->event = event; /* store this event handle */
570  ev_hdr->align_offset = pool_elem->align_offset;
571 
572  /* init common ev_hdr fields in the caller */
573 
574  return ev_hdr;
575 }
576 
577 /**
578  * Allocate & initialize multiple events based on odp-bufs.
579  */
580 static inline int
581 event_alloc_buf_multi(em_event_t events[/*out*/], const int num,
582  const mpool_elem_t *pool_elem, uint32_t size,
583  em_event_type_t type)
584 {
585  odp_buffer_t odp_bufs[num];
586  odp_event_t odp_event;
587  event_hdr_t *ev_hdrs[num];
588  int subpool;
589  const bool esv_ena = esv_enabled();
590 
591  /*
592  * Allocate from the 'best fit' subpool, or if that is full, from the
593  * next subpool that has buffers available of a bigger size.
594  */
595  subpool = pool_find_subpool(pool_elem, size);
596  if (unlikely(subpool < 0))
597  return 0;
598 
599  int num_req = num;
600  int num_bufs = 0;
601  int i;
602 
603  for (; subpool < pool_elem->num_subpools; subpool++) {
604  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
605 
606  if (EM_CHECK_LEVEL >= 3 &&
607  unlikely(odp_pool == ODP_POOL_INVALID))
608  return 0;
609 
610  int ret = odp_buffer_alloc_multi(odp_pool, &odp_bufs[num_bufs],
611  num_req);
612  if (unlikely(ret <= 0))
613  continue; /* try next subpool */
614 
615  /* store the allocated events[] */
616  for (i = num_bufs; i < num_bufs + ret; i++) {
617  odp_event = odp_buffer_to_event(odp_bufs[i]);
618  events[i] = event_odp2em(odp_event);
619  }
620 
621  /* Init 'ret' ev-hdrs from this 'subpool'=='odp-pool' */
622  for (i = num_bufs; i < num_bufs + ret; i++)
623  ev_hdrs[i] = odp_buffer_user_area(odp_bufs[i]);
624 
625  if (esv_ena) {
626  /* reads ev_hdrs[i]->flags if prealloc_pools used */
627  evstate_alloc_multi(&events[num_bufs] /*in/out*/,
628  &ev_hdrs[num_bufs], ret);
629  }
630 
631  for (i = num_bufs; i < num_bufs + ret; i++) {
632  ev_hdrs[i]->flags.all = 0;
633  ev_hdrs[i]->event_type = type;
634  if (!esv_ena)
635  ev_hdrs[i]->event = events[i];
636  ev_hdrs[i]->event_size = size;
637  ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF;
638 
639  ev_hdrs[i]->user_area.all = 0;
640  ev_hdrs[i]->user_area.size = pool_elem->user_area.size;
641  ev_hdrs[i]->user_area.isinit = 1;
642 
643  ev_hdrs[i]->align_offset = pool_elem->align_offset;
644  }
645 
646  num_bufs += ret;
647  if (likely(num_bufs == num))
648  break; /* all allocated */
649  num_req -= ret;
650  }
651 
652  return num_bufs; /* number of allocated bufs (0 ... num) */
653 }
654 
655 /**
656  * Allocate & initialize an event based on an odp-pkt.
657  */
658 static inline event_hdr_t *
659 event_alloc_pkt(const mpool_elem_t *pool_elem, uint32_t size)
660 {
661  const uint32_t push_len = pool_elem->align_offset;
662  uint32_t pull_len;
663  uint32_t alloc_size;
664  odp_packet_t odp_pkt = ODP_PACKET_INVALID;
665  int subpool;
666 
667  if (size > push_len) {
668  alloc_size = size - push_len;
669  pull_len = 0;
670  } else {
671  alloc_size = 1; /* min allowed */
672  pull_len = push_len + 1 - size;
673  }
674 
675  /*
676  * Allocate from the 'best fit' subpool, or if that is full, from the
677  * next subpool that has pkts available of a bigger size.
678  */
679  subpool = pool_find_subpool(pool_elem, size);
680  if (unlikely(subpool < 0))
681  return NULL;
682 
683  for (; subpool < pool_elem->num_subpools; subpool++) {
684  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
685 
686  if (EM_CHECK_LEVEL >= 3 &&
687  unlikely(odp_pool == ODP_POOL_INVALID))
688  return NULL;
689 
690  odp_pkt = odp_packet_alloc(odp_pool, alloc_size);
691  if (likely(odp_pkt != ODP_PACKET_INVALID))
692  break;
693  }
694 
695  if (unlikely(odp_pkt == ODP_PACKET_INVALID))
696  return NULL;
697 
698  /*
699  * odp packet now allocated - adjust the payload start address and
700  * init the EM event header in the odp-pkt user-area
701  */
702 
703  /* Adjust event payload start-address based on alignment config */
704  const void *ptr;
705 
706  if (push_len) {
707  ptr = odp_packet_push_head(odp_pkt, push_len);
708  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
709  goto err_pktalloc;
710  }
711  if (pull_len) {
712  ptr = odp_packet_pull_tail(odp_pkt, pull_len);
713  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
714  goto err_pktalloc;
715  }
716 
717  /*
718  * Set the pkt user ptr to be able to recognize pkt-events that
719  * EM has created vs pkts from pkt-input that needs their
720  * ev-hdrs to be initialized.
721  */
722  odp_packet_user_flag_set(odp_pkt, USER_FLAG_SET);
723 
724  event_hdr_t *const ev_hdr = odp_packet_user_area(odp_pkt);
725  odp_event_t odp_event = odp_packet_to_event(odp_pkt);
726  em_event_t event = event_odp2em(odp_event);
727 
728  if (EM_CHECK_LEVEL >= 3 && unlikely(ev_hdr == NULL))
729  goto err_pktalloc;
730 
731  /* store this event handle */
732  ev_hdr->event = event;
733 
734  /* init common ev_hdr fields in the caller */
735 
736  return ev_hdr;
737 
738 err_pktalloc:
739  odp_packet_free(odp_pkt);
740  return NULL;
741 }
742 
743 /*
744  * Helper for event_alloc_pkt_multi()
745  */
746 static inline int
747 pktalloc_multi(odp_packet_t odp_pkts[/*out*/], int num,
748  odp_pool_t odp_pool, uint32_t size,
749  uint32_t push_len, uint32_t pull_len)
750 {
751  int ret = odp_packet_alloc_multi(odp_pool, size, odp_pkts, num);
752 
753  if (unlikely(ret <= 0))
754  return 0;
755 
756  const int num_pkts = ret; /* return value > 0 */
757  const void *ptr = NULL;
758  int i;
759 
760  /* Adjust payload start-address based on alignment config */
761  if (push_len) {
762  for (i = 0; i < num_pkts; i++) {
763  ptr = odp_packet_push_head(odp_pkts[i], push_len);
764  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
765  goto err_pktalloc_multi;
766  }
767  }
768  if (pull_len) {
769  for (i = 0; i < num_pkts; i++) {
770  ptr = odp_packet_pull_tail(odp_pkts[i], pull_len);
771  if (EM_CHECK_LEVEL >= 3 && unlikely(!ptr))
772  goto err_pktalloc_multi; /* only before esv */
773  }
774  }
775 
776  /*
777  * Set the pkt user ptr to be able to recognize pkt-events that
778  * EM has created vs pkts from pkt-input that needs their
779  * ev-hdrs to be initialized.
780  */
781  for (i = 0; i < num_pkts; i++)
782  odp_packet_user_flag_set(odp_pkts[i], USER_FLAG_SET);
783 
784  return num_pkts;
785 
786 err_pktalloc_multi:
787  odp_packet_free_multi(odp_pkts, num_pkts);
788  return 0;
789 }
790 
791 /**
792  * Allocate & initialize multiple events based on odp-pkts.
793  */
794 static inline int
795 event_alloc_pkt_multi(em_event_t events[/*out*/], const int num,
796  const mpool_elem_t *pool_elem, uint32_t size,
797  em_event_type_t type)
798 {
799  const uint32_t push_len = pool_elem->align_offset;
800  uint32_t pull_len;
801  odp_packet_t odp_pkts[num];
802  /* use same output-array: odp_events[] = events[] */
803  odp_event_t *const odp_events = (odp_event_t *)events;
804  event_hdr_t *ev_hdrs[num];
805  uint32_t alloc_size;
806  int subpool;
807  const bool esv_ena = esv_enabled();
808 
809  if (size > push_len) {
810  alloc_size = size - push_len;
811  pull_len = 0;
812  } else {
813  alloc_size = 1; /* min allowed */
814  pull_len = push_len + 1 - size;
815  }
816 
817  /*
818  * Allocate from the 'best fit' subpool, or if that is full, from the
819  * next subpool that has pkts available of a bigger size.
820  */
821  subpool = pool_find_subpool(pool_elem, size);
822  if (unlikely(subpool < 0))
823  return 0;
824 
825  int num_req = num;
826  int num_pkts = 0;
827  int i;
828 
829  for (; subpool < pool_elem->num_subpools; subpool++) {
830  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
831 
832  if (EM_CHECK_LEVEL >= 3 &&
833  unlikely(odp_pool == ODP_POOL_INVALID))
834  return 0;
835 
836  int ret = pktalloc_multi(&odp_pkts[num_pkts], num_req,
837  odp_pool, alloc_size,
838  push_len, pull_len);
839  if (unlikely(ret <= 0))
840  continue; /* try next subpool */
841 
842  /*
843  * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'.
844  * Note: odp_events[] points&writes into events[out]
845  */
846  odp_packet_to_event_multi(&odp_pkts[num_pkts],
847  &odp_events[num_pkts], ret);
848 
849  for (i = num_pkts; i < num_pkts + ret; i++)
850  ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]);
851 
852  /*
853  * Note: events[] == odp_events[] before ESV init.
854  * Don't touch odp_events[] during this loop-round anymore.
855  */
856  if (esv_ena) {
857  /* reads ev_hdrs[i]->flags if prealloc_pools used */
858  evstate_alloc_multi(&events[num_pkts] /*in/out*/,
859  &ev_hdrs[num_pkts], ret);
860  }
861 
862  for (i = num_pkts; i < num_pkts + ret; i++) {
863  ev_hdrs[i]->flags.all = 0;
864  ev_hdrs[i]->event_type = type;
865  if (!esv_ena)
866  ev_hdrs[i]->event = events[i];
867  ev_hdrs[i]->event_size = size; /* original size */
868  ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF;
869 
870  ev_hdrs[i]->user_area.all = 0;
871  ev_hdrs[i]->user_area.size = pool_elem->user_area.size;
872  ev_hdrs[i]->user_area.isinit = 1;
873  /*ev_hdrs[i]->align_offset = needed by odp bufs only*/
874  }
875 
876  num_pkts += ret;
877  if (likely(num_pkts == num))
878  break; /* all allocated */
879  num_req -= ret;
880  }
881 
882  return num_pkts; /* number of allocated pkts */
883 }
884 
885 static inline event_hdr_t *
886 event_alloc_vector(const mpool_elem_t *pool_elem, uint32_t size)
887 {
888  odp_packet_vector_t odp_pktvec = ODP_PACKET_VECTOR_INVALID;
889  int subpool;
890 
891  /*
892  * Allocate from the 'best fit' subpool, or if that is full, from the
893  * next subpool that has pkts available of a bigger size.
894  */
895  subpool = pool_find_subpool(pool_elem, size);
896  if (unlikely(subpool < 0))
897  return NULL;
898 
899  for (; subpool < pool_elem->num_subpools; subpool++) {
900  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
901 
902  if (EM_CHECK_LEVEL >= 3 &&
903  unlikely(odp_pool == ODP_POOL_INVALID))
904  return NULL;
905 
906  odp_pktvec = odp_packet_vector_alloc(odp_pool);
907  if (likely(odp_pktvec != ODP_PACKET_VECTOR_INVALID))
908  break;
909  }
910 
911  if (unlikely(odp_pktvec == ODP_PACKET_VECTOR_INVALID))
912  return NULL;
913 
914  /*
915  * Packet vector now allocated:
916  * Init the EM event header in the odp-pkt-vector user-area.
917  */
918 
919  /*
920  * Set the pktvec user flag to be able to recognize vectors that
921  * EM has created vs. vectors from pkt-input that needs their
922  * ev-hdrs to be initialized.
923  */
924  odp_packet_vector_user_flag_set(odp_pktvec, USER_FLAG_SET);
925 
926  event_hdr_t *const ev_hdr = odp_packet_vector_user_area(odp_pktvec);
927  odp_event_t odp_event = odp_packet_vector_to_event(odp_pktvec);
928  em_event_t event = event_odp2em(odp_event);
929 
930  if (EM_CHECK_LEVEL >= 3 && unlikely(ev_hdr == NULL))
931  goto err_vecalloc;
932 
933  ev_hdr->event = event; /* store this event handle */
934 
935  /* init common ev_hdr fields in the caller */
936 
937  return ev_hdr;
938 
939 err_vecalloc:
940  odp_packet_vector_free(odp_pktvec);
941  return NULL;
942 }
943 
944 /*
945  * Helper for event_alloc_vec_multi()
946  */
947 static inline int
948 vecalloc_multi(odp_packet_vector_t odp_pktvecs[/*out*/], int num,
949  odp_pool_t odp_pool)
950 {
951  int i;
952 
953  for (i = 0; i < num; i++) {
954  odp_pktvecs[i] = odp_packet_vector_alloc(odp_pool);
955  if (unlikely(odp_pktvecs[i] == ODP_PACKET_VECTOR_INVALID))
956  break;
957  }
958 
959  const int num_vecs = i;
960 
961  if (unlikely(num_vecs == 0))
962  return 0;
963 
964  /*
965  * Set the pkt vector user ptr to be able to recognize vector-events
966  * that EM has created vs vectors from pkt-input that needs their
967  * ev-hdrs to be initialized.
968  */
969  for (i = 0; i < num_vecs; i++)
970  odp_packet_vector_user_flag_set(odp_pktvecs[i], USER_FLAG_SET);
971 
972  return num_vecs;
973 }
974 
975 /**
976  * Allocate & initialize multiple events based on odp-pkt-vectors.
977  */
978 static inline int
979 event_alloc_vector_multi(em_event_t events[/*out*/], const int num,
980  const mpool_elem_t *pool_elem, uint32_t size,
981  em_event_type_t type)
982 {
983  odp_packet_vector_t odp_pktvecs[num];
984  /* use same output-array: odp_events[] = events[] */
985  odp_event_t *const odp_events = (odp_event_t *)events;
986  event_hdr_t *ev_hdrs[num];
987  int subpool;
988  const bool esv_ena = esv_enabled();
989 
990  /*
991  * Allocate from the 'best fit' subpool, or if that is full, from the
992  * next subpool that has pkts available of a bigger size.
993  */
994  subpool = pool_find_subpool(pool_elem, size);
995  if (unlikely(subpool < 0))
996  return 0;
997 
998  int num_req = num;
999  int num_vecs = 0;
1000  int i;
1001 
1002  for (; subpool < pool_elem->num_subpools; subpool++) {
1003  odp_pool_t odp_pool = pool_elem->odp_pool[subpool];
1004 
1005  if (EM_CHECK_LEVEL >= 3 &&
1006  unlikely(odp_pool == ODP_POOL_INVALID))
1007  return 0;
1008 
1009  int ret = vecalloc_multi(&odp_pktvecs[num_vecs], num_req,
1010  odp_pool);
1011  if (unlikely(ret <= 0))
1012  continue; /* try next subpool */
1013 
1014  /*
1015  * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'.
1016  * Note: odp_events[] points&writes into events[out]
1017  */
1018  for (i = num_vecs; i < num_vecs + ret; i++) {
1019  odp_events[i] = odp_packet_vector_to_event(odp_pktvecs[i]);
1020  ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]);
1021  }
1022 
1023  /*
1024  * Note: events[] == odp_events[] before ESV init.
1025  * Don't touch odp_events[] during this loop-round anymore.
1026  */
1027  if (esv_ena) {
1028  /* reads ev_hdrs[i]->flags if prealloc_pools used */
1029  evstate_alloc_multi(&events[num_vecs] /*in/out*/,
1030  &ev_hdrs[num_vecs], ret);
1031  }
1032 
1033  for (i = num_vecs; i < num_vecs + ret; i++) {
1034  ev_hdrs[i]->flags.all = 0;
1035  ev_hdrs[i]->event_type = type;
1036  if (!esv_ena)
1037  ev_hdrs[i]->event = events[i];
1038  ev_hdrs[i]->event_size = size; /* original vec size */
1039  ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF;
1040 
1041  ev_hdrs[i]->user_area.all = 0;
1042  ev_hdrs[i]->user_area.size = pool_elem->user_area.size;
1043  ev_hdrs[i]->user_area.isinit = 1;
1044  /*ev_hdrs[i]->align_offset = needed by odp bufs only*/
1045  }
1046 
1047  num_vecs += ret;
1048  if (likely(num_vecs == num))
1049  break; /* all allocated */
1050  num_req -= ret;
1051  }
1052 
1053  return num_vecs; /* number of allocated pkts */
1054 }
1055 
1056 /**
1057  * Helper for em_alloc() and em_event_clone()
1058  */
1059 static inline em_event_t
1060 event_alloc(const mpool_elem_t *pool_elem, uint32_t size, em_event_type_t type,
1061  const uint16_t api_op)
1062 {
1063  /*
1064  * EM event pools created with type=PKT can support:
1065  * - SW events (bufs)
1066  * - pkt events.
1067  *
1068  * EM event pools created with type=SW can support:
1069  * - SW events (bufs) only
1070  */
1071  event_hdr_t *ev_hdr = NULL;
1072 
1073  if (pool_elem->event_type == EM_EVENT_TYPE_PACKET)
1074  ev_hdr = event_alloc_pkt(pool_elem, size);
1075  else if (pool_elem->event_type == EM_EVENT_TYPE_SW)
1076  ev_hdr = event_alloc_buf(pool_elem, size);
1077  else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR)
1078  ev_hdr = event_alloc_vector(pool_elem, size);
1079 
1080  if (unlikely(!ev_hdr))
1081  return EM_EVENT_UNDEF;
1082 
1083  /*
1084  * event now allocated:
1085  * ev_hdr->event = stored by event_alloc_pkt/buf/vector()
1086  */
1087  /* Update event ESV state for alloc */
1088  if (esv_enabled())
1089  (void)evstate_alloc(ev_hdr->event, ev_hdr, api_op);
1090 
1091  ev_hdr->flags.all = 0; /* clear only after evstate_alloc() */
1092  ev_hdr->event_type = type; /* store the event type */
1093  ev_hdr->event_size = size; /* store requested size */
1094  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
1095 
1096  ev_hdr->user_area.all = 0;
1097  ev_hdr->user_area.size = pool_elem->user_area.size;
1098  ev_hdr->user_area.isinit = 1;
1099  /* ev_hdr->align_offset = init by event_alloc_buf() when needed */
1100 
1101  return ev_hdr->event;
1102 }
1103 
1104 /**
1105  * Start-up helper for pool preallocation
1106  */
1107 static inline event_prealloc_hdr_t *
1108 event_prealloc(const mpool_elem_t *pool_elem, uint32_t size)
1109 {
1110  /*
1111  * EM event pools created with type=PKT can support:
1112  * - SW events (bufs)
1113  * - pkt events.
1114  *
1115  * EM event pools created with type=SW can support:
1116  * - SW events (bufs) only
1117  */
1118  event_hdr_t *ev_hdr = NULL;
1119 
1120  if (pool_elem->event_type == EM_EVENT_TYPE_PACKET)
1121  ev_hdr = event_alloc_pkt(pool_elem, size);
1122  else if (pool_elem->event_type == EM_EVENT_TYPE_SW)
1123  ev_hdr = event_alloc_buf(pool_elem, size);
1124  else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR)
1125  ev_hdr = event_alloc_vector(pool_elem, size);
1126 
1127  if (unlikely(ev_hdr == NULL))
1128  return NULL;
1129 
1130  /* event now allocated */
1131 
1132  if (esv_enabled()) {
1133  em_event_t event = ev_hdr->event;
1134 
1135  (void)evstate_prealloc(event, ev_hdr);
1136  }
1137  ev_hdr->flags.all = 0; /* clear only after evstate_alloc() */
1138  ev_hdr->user_area.all = 0;
1139 
1140  event_prealloc_hdr_t *prealloc_hdr = (event_prealloc_hdr_t *)ev_hdr;
1141 
1142  return prealloc_hdr;
1143 }
1144 
1145 static inline event_prealloc_hdr_t *
1146 list_node_to_prealloc_hdr(list_node_t *const list_node)
1147 {
1148  event_prealloc_hdr_t *const ev_hdr = (event_prealloc_hdr_t *)(uintptr_t)
1149  ((uint8_t *)list_node - offsetof(event_prealloc_hdr_t, list_node));
1150 
1151  return likely(list_node != NULL) ? ev_hdr : NULL;
1152 }
1153 
1154 /**
1155  * @brief Convert event vector table content to odp packets in-place.
1156  *
1157  * Convert an EM event vector table, containing em_event_t:s with
1158  * esv-info (evgen), to a table of odp packets (remove handles' evgen in-place).
1159  */
1160 static inline void
1161 vector_tbl2odp(odp_event_t odp_event_pktvec)
1162 {
1163  odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1164  odp_packet_t *pkt_tbl = NULL;
1165  const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/);
1166 
1167  if (likely(pkts > 0)) {
1168  /* Careful! Points to same table */
1169  em_event_t *event_tbl = (em_event_t *)pkt_tbl;
1170 
1171  /* Drop ESV event generation (evgen) from event handle */
1172  (void)events_em2pkt_inplace(event_tbl, pkts);
1173  }
1174 }
1175 
1176 /**
1177  * @brief Convert ODP packet vector table content to EM events.
1178  *
1179  * Convert an ODP packet vector table to a table of EM events.
1180  * The content must be known to be raw odp packets.
1181  *
1182  * For recovery purposes only.
1183  */
1184 static inline void
1185 vector_tbl2em(odp_event_t odp_event_pktvec)
1186 {
1187  odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec);
1188  odp_packet_t *pkt_tbl = NULL;
1189  const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/);
1190 
1191  if (likely(pkts > 0)) {
1192  em_event_t *const ev_tbl = (em_event_t *const)pkt_tbl;
1193  odp_packet_t odp_pkttbl[pkts];
1194  event_hdr_t *ev_hdr_tbl[pkts];
1195 
1196  /*
1197  * Copy pkts from vector's pkt-table using events_em2pkt() that
1198  * also drops any evgen-info from the handles if present.
1199  */
1200  events_em2pkt(ev_tbl/*in*/, odp_pkttbl/*out*/, pkts);
1201 
1202  event_init_pkt_multi(odp_pkttbl /*in*/, ev_tbl /*in,out*/,
1203  ev_hdr_tbl /*out*/, pkts, false);
1204  }
1205 }
1206 
1207 static inline em_status_t
1208 send_event(em_event_t event, const queue_elem_t *q_elem)
1209 {
1210  const bool esv_ena = esv_enabled();
1211  odp_event_t odp_event = event_em2odp(event);
1212  odp_queue_t odp_queue = q_elem->odp_queue;
1213  int ret;
1214 
1215  if (unlikely(EM_CHECK_LEVEL > 1 &&
1216  (odp_event == ODP_EVENT_INVALID ||
1217  odp_queue == ODP_QUEUE_INVALID)))
1218  return EM_ERR_NOT_FOUND;
1219 
1220  if (unlikely(EM_CHECK_LEVEL > 0 &&
1221  q_elem->state != EM_QUEUE_STATE_READY)) {
1222  return EM_ERR_BAD_STATE;
1223  }
1224 
1225  /*
1226  * Vector: convert the event vector table to a table of odp packets
1227  * (in-place) before passing the vector and contents to the scheduler.
1228  */
1229  if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1230  vector_tbl2odp(odp_event);
1231 
1232  /* Enqueue event for scheduling */
1233  ret = odp_queue_enq(odp_queue, odp_event);
1234 
1235  if (unlikely(EM_CHECK_LEVEL > 0 && ret != 0)) {
1236  /* Restore EM vector event-table before returning vector to user */
1237  if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR)
1238  vector_tbl2em(odp_event);
1239 
1240  return EM_ERR_LIB_FAILED;
1241  }
1242 
1243  return EM_OK;
1244 }
1245 
1246 static inline int
1247 send_event_multi(const em_event_t events[], const int num,
1248  const queue_elem_t *q_elem)
1249 {
1250  const bool esv_ena = esv_enabled();
1251  odp_event_t odp_events[num];
1252  odp_queue_t odp_queue = q_elem->odp_queue;
1253 
1254  if (unlikely(EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID))
1255  return 0;
1256 
1257  if (unlikely(EM_CHECK_LEVEL > 0 &&
1258  q_elem->state != EM_QUEUE_STATE_READY)) {
1259  return 0;
1260  }
1261 
1262  events_em2odp(events, odp_events/*out*/, num);
1263 
1264  /*
1265  * Vector: convert the event vector table to a table of odp packets
1266  * (in-place) before passing the vector and contents to the scheduler.
1267  */
1268  if (esv_ena) {
1269  for (int i = 0; i < num; i++) {
1270  if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1271  vector_tbl2odp(odp_events[i]);
1272  }
1273  }
1274 
1275  /* Enqueue events for scheduling */
1276  int ret = odp_queue_enq_multi(odp_queue, odp_events, num);
1277 
1278  if (likely(ret == num))
1279  return num; /* Success! */
1280 
1281  /*
1282  * Fail: could not enqueue all events (ret != num)
1283  */
1284  int enq = ret < 0 ? 0 : ret;
1285 
1286  /* Restore EM vector event-table before returning vector to user */
1287  if (esv_ena) {
1288  for (int i = enq; i < num; i++) {
1289  if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR)
1290  vector_tbl2em(odp_events[i]);
1291  }
1292  }
1293 
1294  return enq; /* enq < num */
1295 }
1296 
1297 static inline em_status_t
1298 send_local(em_event_t event, const queue_elem_t *q_elem)
1299 {
1300  em_locm_t *const locm = &em_locm;
1301  const em_queue_prio_t prio = q_elem->priority;
1302  evhdl_t evhdl = {.event = event};
1303  int ret;
1304 
1305  if (unlikely(EM_CHECK_LEVEL > 0 &&
1306  q_elem->state != EM_QUEUE_STATE_READY))
1307  return EM_ERR_BAD_STATE;
1308 
1309  em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->queue;
1310  stash_entry_t entry = {.qidx = queue_hdl2idx(queue),
1311  .evptr = evhdl.evptr};
1312 
1313  ret = odp_stash_put_u64(locm->local_queues.prio[prio].stash,
1314  &entry.u64, 1);
1315  if (likely(ret == 1)) {
1316  locm->local_queues.empty = 0;
1317  locm->local_queues.prio[prio].empty_prio = 0;
1318  return EM_OK;
1319  }
1320 
1321  return EM_ERR_LIB_FAILED;
1322 }
1323 
1324 static inline int
1325 send_local_multi(const em_event_t events[], const int num,
1326  const queue_elem_t *q_elem)
1327 {
1328  em_locm_t *const locm = &em_locm;
1329  const em_queue_prio_t prio = q_elem->priority;
1330  const evhdl_t *const evhdl_tbl = (const evhdl_t *const)events;
1331 
1332  if (unlikely(EM_CHECK_LEVEL > 0 &&
1333  q_elem->state != EM_QUEUE_STATE_READY))
1334  return 0;
1335 
1336  stash_entry_t entry_tbl[num];
1337  em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->queue;
1338  const uint16_t qidx = (uint16_t)queue_hdl2idx(queue);
1339 
1340  for (int i = 0; i < num; i++) {
1341  entry_tbl[i].qidx = qidx;
1342  entry_tbl[i].evptr = evhdl_tbl[i].evptr;
1343  }
1344 
1345  int ret = odp_stash_put_u64(locm->local_queues.prio[prio].stash,
1346  &entry_tbl[0].u64, num);
1347  if (likely(ret > 0)) {
1348  locm->local_queues.empty = 0;
1349  locm->local_queues.prio[prio].empty_prio = 0;
1350  return ret;
1351  }
1352 
1353  return 0;
1354 }
1355 
1356 /**
1357  * Send one event to a queue of type EM_QUEUE_TYPE_OUTPUT
1358  */
1359 static inline em_status_t
1360 send_output(em_event_t event, queue_elem_t *const output_q_elem)
1361 {
1362  const em_sched_context_type_t sched_ctx_type =
1364 
1365  if (unlikely(EM_CHECK_LEVEL > 0 &&
1366  output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED))
1367  return EM_ERR_BAD_STATE;
1368 
1369  /*
1370  * An event sent to an output queue from an ordered context needs to
1371  * be 're-ordered' before calling the user provided output-function.
1372  * Order is maintained by enqueuing and dequeuing into an odp-queue
1373  * that takes care of order.
1374  */
1375  if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) {
1376  const odp_queue_t odp_queue = output_q_elem->odp_queue;
1377  odp_event_t odp_event = event_em2odp(event);
1378  int ret;
1379 
1380  if (unlikely(EM_CHECK_LEVEL > 1 &&
1381  (odp_event == ODP_EVENT_INVALID ||
1382  odp_queue == ODP_QUEUE_INVALID)))
1383  return EM_ERR_NOT_FOUND;
1384 
1386  output_queue_track(output_q_elem);
1387 
1388  /* enqueue to enforce odp to handle ordering */
1389  ret = odp_queue_enq(odp_queue, odp_event);
1390  if (unlikely(ret != 0))
1391  return EM_ERR_LIB_FAILED;
1392 
1393  /* return value must be EM_OK after this since event enqueued */
1394 
1396  env_spinlock_t *const lock =
1397  &output_q_elem->output.lock;
1398 
1399  if (!env_spinlock_trylock(lock))
1400  return EM_OK;
1401  output_queue_drain(output_q_elem);
1402  env_spinlock_unlock(lock);
1403  }
1404 
1405  return EM_OK;
1406  }
1407 
1408  /*
1409  * No ordered context - call output_fn() directly
1410  */
1411  const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->queue;
1412  const em_output_func_t output_fn =
1413  output_q_elem->output.output_conf.output_fn;
1414  void *const output_fn_args =
1415  output_q_elem->output.output_conf.output_fn_args;
1416  int sent;
1417 
1418  sent = output_fn(&event, 1, output_queue, output_fn_args);
1419  if (unlikely(sent != 1))
1420  return EM_ERR_OPERATION_FAILED;
1421 
1422  return EM_OK;
1423 }
1424 
1425 /**
1426  * Send events to a queue of type EM_QUEUE_TYPE_OUTPUT
1427  */
1428 static inline int
1429 send_output_multi(const em_event_t events[], const unsigned int num,
1430  queue_elem_t *const output_q_elem)
1431 {
1432  const em_sched_context_type_t sched_ctx_type =
1434  int sent;
1435 
1436  if (unlikely(EM_CHECK_LEVEL > 0 &&
1437  output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED))
1438  return 0;
1439 
1440  /*
1441  * Event sent to an output queue from an ordered context needs to
1442  * be 're-ordered' before calling the user provided output-function.
1443  * Order is maintained by enqueuing and dequeuing into an odp-queue
1444  * that takes care of order.
1445  */
1446  if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) {
1447  const odp_queue_t odp_queue = output_q_elem->odp_queue;
1448  odp_event_t odp_events[num];
1449 
1450  if (unlikely(EM_CHECK_LEVEL > 1 &&
1451  odp_queue == ODP_QUEUE_INVALID))
1452  return 0;
1453 
1455  output_queue_track(output_q_elem);
1456 
1457  events_em2odp(events, odp_events/*out*/, num);
1458 
1459  /* enqueue to enforce odp to handle ordering */
1460  sent = odp_queue_enq_multi(odp_queue, odp_events, num);
1461  if (unlikely(sent <= 0))
1462  return 0;
1463 
1464  /* the return value must be the number of enqueued events */
1465 
1467  env_spinlock_t *const lock =
1468  &output_q_elem->output.lock;
1469 
1470  if (!env_spinlock_trylock(lock))
1471  return sent;
1472  output_queue_drain(output_q_elem);
1473  env_spinlock_unlock(lock);
1474  }
1475 
1476  return sent;
1477  }
1478 
1479  /*
1480  * No ordered context - call output_fn() directly
1481  */
1482  const em_queue_t output_queue = (em_queue_t)(uintptr_t)output_q_elem->queue;
1483  const em_output_func_t output_fn = output_q_elem->output.output_conf.output_fn;
1484  void *const output_fn_args = output_q_elem->output.output_conf.output_fn_args;
1485 
1486  sent = output_fn(events, num, output_queue, output_fn_args);
1487 
1488  return sent;
1489 }
1490 
1491 /**
1492  * Return a pointer to the EM event user payload.
1493  * Helper to e.g. EM API em_event_pointer()
1494  */
1495 static inline void *
1496 event_pointer(em_event_t event)
1497 {
1498  const odp_event_t odp_event = event_em2odp(event);
1499  const odp_event_type_t odp_etype = odp_event_type(odp_event);
1500  void *ev_ptr = NULL; /* return value */
1501 
1502  if (odp_etype == ODP_EVENT_PACKET) {
1503  const odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1504 
1505  ev_ptr = odp_packet_data(odp_pkt);
1506  } else if (odp_etype == ODP_EVENT_BUFFER) {
1507  const odp_buffer_t odp_buf = odp_buffer_from_event(odp_event);
1508  const event_hdr_t *ev_hdr = odp_buffer_user_area(odp_buf);
1509  const uint32_t align_offset = ev_hdr->align_offset;
1510 
1511  ev_ptr = odp_buffer_addr(odp_buf);
1512 
1513  if (align_offset)
1514  ev_ptr = (void *)((uintptr_t)ev_ptr + 32 - align_offset);
1515  }
1516 
1517  return ev_ptr; /* NULL for unrecognized odp_etype, also for vectors */
1518 }
1519 
1520 static inline bool
1521 event_has_ref(em_event_t event)
1522 {
1523  odp_event_t odp_event = event_em2odp(event);
1524  odp_event_type_t odp_etype = odp_event_type(odp_event);
1525 
1526  if (odp_etype != ODP_EVENT_PACKET)
1527  return false;
1528 
1529  odp_packet_t odp_pkt = odp_packet_from_event(odp_event);
1530 
1531  return odp_packet_has_ref(odp_pkt) ? true : false;
1532 }
1533 
1534 #ifdef __cplusplus
1535 }
1536 #endif
1537 
1538 #endif /* EM_EVENT_H_ */
#define INTERNAL_ERROR(error, escope, fmt,...)
Definition: em_error.h:43
em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool, uint32_t offset, uint32_t size, bool clone_uarea, bool is_clone_part)
Definition: em_event.c:105
em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, bool is_extev)
void evstate_alloc_multi(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], const int num)
em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
void evstate_em2usr_multi(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], const int num, const uint16_t api_op)
em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr)
em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, bool is_extev)
#define USER_FLAG_SET
ENV_LOCAL em_locm_t em_locm
em_shm_t * em_shm
@ EM_QUEUE_STATE_READY
#define EM_CHECK_LEVEL
#define EM_OUTPUT_QUEUE_IMMEDIATE
#define EM_POOL_UNDEF
@ EM_ERR_NOT_FOUND
@ EM_ERR_OPERATION_FAILED
@ EM_ERR_NOT_IMPLEMENTED
@ EM_ERR_BAD_STATE
@ EM_ERR_LIB_FAILED
int(* em_output_func_t)(const em_event_t events[], const unsigned int num, const em_queue_t output_queue, void *output_fn_args)
@ EM_EVENT_TYPE_SW
@ EM_EVENT_TYPE_PACKET
@ EM_EVENT_TYPE_VECTOR
#define EM_OK
uint32_t em_escope_t
uint32_t em_event_type_t
uint32_t em_queue_prio_t
#define EM_EVENT_UNDEF
#define EM_EVENT_GROUP_UNDEF
uint32_t em_status_t
em_sched_context_type_t
@ EM_SCHED_CONTEXT_TYPE_ORDERED
@ EM_TMO_TYPE_NONE
em_sched_context_type_t sched_context_type
Definition: em_mem.h:170
em_locm_current_t current
Definition: em_mem.h:190
local_queues_t local_queues
Definition: em_mem.h:222
union event_hdr::@34 flags
uint8_t refs_used
em_event_t event
uint8_t align_offset
ev_hdr_user_area_t user_area
uint32_t event_size
em_event_type_t event_type
em_event_group_t egrp
struct mpool_elem_t::@54 user_area
uint32_t align_offset
Definition: em_pool_types.h:51
odp_pool_t odp_pool[EM_MAX_SUBPOOLS]
Definition: em_pool_types.h:69
uint16_t size
Definition: em_pool_types.h:55
em_event_type_t event_type
Definition: em_pool_types.h:49
em_output_queue_conf_t output_conf
env_spinlock_t lock
odp_queue_t odp_queue
uint32_t queue
uint8_t priority
queue_state_t state