EM-ODP  3.7.0
Event Machine on ODP
em_atomic_group.c
1 /*
2  * Copyright (c) 2015, Nokia Solutions and Networks
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * * Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  * * Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived
16  * from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "em_include.h"
32 #include "em_dispatcher_inline.h"
33 
34 /**
35  * Atomic group inits done at global init (once at startup on one core)
36  */
38 atomic_group_init(atomic_group_tbl_t *const atomic_group_tbl,
39  atomic_group_pool_t *const atomic_group_pool)
40 {
41  atomic_group_elem_t *atomic_group_elem;
42  const uint32_t objpool_subpools = MIN(4, OBJSUBPOOLS_MAX);
43  int ret;
44 
45  memset(atomic_group_tbl, 0, sizeof(atomic_group_tbl_t));
46  memset(atomic_group_pool, 0, sizeof(atomic_group_pool_t));
47  env_atomic32_init(&em_shm->atomic_group_count);
48 
49  for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) {
50  em_atomic_group_t agrp = agrp_idx2hdl(i);
51  atomic_group_elem_t *const agrp_elem =
52  atomic_group_elem_get(agrp);
53 
54  if (unlikely(!agrp_elem))
55  return EM_ERR_BAD_POINTER;
56 
57  agrp_elem->atomic_group = agrp; /* store handle */
58 
59  /* Init list and lock */
60  env_spinlock_init(&agrp_elem->lock);
61  list_init(&agrp_elem->qlist_head);
62  env_atomic32_init(&agrp_elem->num_queues);
63  }
64 
65  ret = objpool_init(&atomic_group_pool->objpool, objpool_subpools);
66  if (ret != 0)
67  return EM_ERR_LIB_FAILED;
68 
69  for (uint32_t i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) {
70  atomic_group_elem = &atomic_group_tbl->ag_elem[i];
71  objpool_add(&atomic_group_pool->objpool, i % objpool_subpools,
72  &atomic_group_elem->atomic_group_pool_elem);
73  }
74 
75  return EM_OK;
76 }
77 
78 static inline atomic_group_elem_t *
79 ag_pool_elem2ag_elem(const objpool_elem_t *const atomic_group_pool_elem)
80 {
81  return (atomic_group_elem_t *)((uintptr_t)atomic_group_pool_elem -
82  offsetof(atomic_group_elem_t, atomic_group_pool_elem));
83 }
84 
85 /**
86  * Dynamic atomic group allocation
87  */
88 em_atomic_group_t
90 {
91  const atomic_group_elem_t *ag_elem;
92  const objpool_elem_t *ag_p_elem;
93 
94  ag_p_elem = objpool_rem(&em_shm->atomic_group_pool.objpool,
95  em_core_id());
96 
97  if (unlikely(ag_p_elem == NULL))
98  return EM_ATOMIC_GROUP_UNDEF;
99 
100  ag_elem = ag_pool_elem2ag_elem(ag_p_elem);
101 
102  env_atomic32_inc(&em_shm->atomic_group_count);
103  return ag_elem->atomic_group;
104 }
105 
107 atomic_group_free(em_atomic_group_t atomic_group)
108 {
109  atomic_group_elem_t *agrp_elem = atomic_group_elem_get(atomic_group);
110 
111  if (unlikely(agrp_elem == NULL))
112  return EM_ERR_BAD_ID;
113 
114  objpool_add(&em_shm->atomic_group_pool.objpool,
115  agrp_elem->atomic_group_pool_elem.subpool_idx,
116  &agrp_elem->atomic_group_pool_elem);
117 
118  env_atomic32_dec(&em_shm->atomic_group_count);
119  return EM_OK;
120 }
121 
122 /**
123  * Called by em_queue_delete() to remove the queue from the atomic group list
124  */
125 void
127 {
128  if (!q_elem->flags.in_atomic_group)
129  return;
130 
131  em_atomic_group_t atomic_group = q_elem->agrp.atomic_group;
132 
133  if (!invalid_atomic_group(atomic_group)) {
134  atomic_group_elem_t *const ag_elem =
135  atomic_group_elem_get(atomic_group);
136 
137  atomic_group_rem_queue_list(ag_elem, q_elem);
138  q_elem->flags.in_atomic_group = false;
139  q_elem->agrp.atomic_group = EM_ATOMIC_GROUP_UNDEF;
140  }
141 }
142 
143 unsigned int
144 atomic_group_count(void)
145 {
146  return env_atomic32_get(&em_shm->atomic_group_count);
147 }
148 
149 static inline int
150 ag_local_processing_ended(atomic_group_elem_t *const ag_elem)
151 {
152  em_locm_t *const locm = &em_locm;
153 
154  /*
155  * Check if atomic group processing has ended for this core, meaning
156  * the application called em_atomic_processing_end()
157  */
158  if (locm->atomic_group_released) {
159  locm->atomic_group_released = false;
160  /*
161  * Try to acquire the atomic group lock and continue processing.
162  * It is possible that another core has acquired the lock
163  */
164  if (env_spinlock_trylock(&ag_elem->lock))
165  return 0;
166  else
167  return 1;
168  }
169 
170  return 0;
171 }
172 
173 static inline int
174 ag_internal_enq(const atomic_group_elem_t *ag_elem, const queue_elem_t *q_elem,
175  odp_event_t odp_evtbl[], const int num_events,
176  const em_queue_prio_t priority)
177 {
178  stash_entry_t entry_tbl[num_events];
179  odp_stash_t stash;
180  int ret;
181 
182  const em_queue_t queue = (em_queue_t)(uintptr_t)q_elem->queue;
183  const uint16_t qidx = (uint16_t)queue_hdl2idx(queue);
184 
185  for (int i = 0; i < num_events; i++) {
186  entry_tbl[i].qidx = qidx;
187  entry_tbl[i].evptr = (uintptr_t)odp_evtbl[i];
188  }
189 
190  if (priority == EM_QUEUE_PRIO_HIGHEST)
191  stash = ag_elem->stashes.hi_prio;
192  else
193  stash = ag_elem->stashes.lo_prio;
194 
195  /* Enqueue events to internal queue */
196  ret = odp_stash_put_u64(stash, &entry_tbl[0].u64, num_events);
197  if (unlikely(ret != num_events))
198  return ret > 0 ? ret : 0;
199 
200  return num_events;
201 }
202 
203 static inline int
204 ag_internal_deq(const atomic_group_elem_t *ag_elem,
205  stash_entry_t entry_tbl[/*out*/], const int num_events)
206 {
207  /*
208  * The function call_eo_receive_fn/multi() will convert to
209  * EM events with event-generation counts, if ESV is enabled,
210  * before passing the events to the user EO.
211  */
212  int32_t hi_cnt;
213  int32_t lo_cnt;
214 
215  /* hi-prio events */
216  hi_cnt = odp_stash_get_u64(ag_elem->stashes.hi_prio,
217  &entry_tbl[0].u64 /*[out]*/, num_events);
218  if (hi_cnt == num_events || hi_cnt < 0)
219  return hi_cnt;
220 
221  /* ...then lo-prio events */
222  lo_cnt = odp_stash_get_u64(ag_elem->stashes.lo_prio,
223  &entry_tbl[hi_cnt].u64 /*[out]*/,
224  num_events - hi_cnt);
225  if (unlikely(lo_cnt < 0))
226  return hi_cnt;
227 
228  return hi_cnt + lo_cnt;
229 }
230 
231 void atomic_group_dispatch(odp_event_t odp_evtbl[], const int num_events,
232  const queue_elem_t *q_elem)
233 {
234  atomic_group_elem_t *const ag_elem =
235  atomic_group_elem_get(q_elem->agrp.atomic_group);
236  const em_queue_prio_t priority = q_elem->priority;
237 
238  /* Enqueue the scheduled events into the atomic group internal queue */
239  int enq_cnt = ag_internal_enq(ag_elem, q_elem, odp_evtbl, num_events, priority);
240 
241  if (unlikely(enq_cnt < num_events)) {
242  int num_free = num_events - enq_cnt;
243  event_hdr_t *ev_hdr_tbl[num_free];
244  em_event_t ev_tbl[num_free];
245 
246  event_init_odp_multi(&odp_evtbl[enq_cnt], ev_tbl/*out*/, ev_hdr_tbl/*out*/,
247  num_free, true/*is_extev*/);
248  /* Drop events that could not be enqueued */
249  em_free_multi(ev_tbl, num_free);
250  /*
251  * Use dispatch escope since this func is called only from
252  * dispatch_round() => atomic_group_dispatch()
253  */
254  INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_DISPATCH,
255  "Atomic group:%" PRI_AGRP " internal enqueue fails:\n"
256  " num_events:%d enq_cnt:%d => %d events dropped",
257  ag_elem->atomic_group, num_events, enq_cnt, num_free);
258  }
259 
260  /*
261  * Try to acquire the atomic group lock - if not available then some
262  * other core is already handling the same atomic group.
263  */
264  if (!env_spinlock_trylock(&ag_elem->lock))
265  return;
266 
267  em_locm_t *const locm = &em_locm;
268 
269  /* hint */
270  odp_schedule_release_atomic();
271 
272  locm->atomic_group_released = false;
273  /*
274  * Loop until no more events or until atomic processing end.
275  * Events in the ag_elem->internal_queue:s have been scheduled
276  * already once and should be dispatched asap.
277  */
278  odp_event_t deq_evtbl[EM_SCHED_AG_MULTI_MAX_BURST];
280 
281  do {
282  int deq_cnt = ag_internal_deq(ag_elem, entry_tbl /*[out]*/,
284 
285  if (unlikely(deq_cnt <= 0)) {
286  env_spinlock_unlock(&ag_elem->lock);
287  /* return if no more events available */
288  return;
289  }
290 
291  for (int i = 0; i < deq_cnt; i++)
292  deq_evtbl[i] = (odp_event_t)(uintptr_t)entry_tbl[i].evptr;
293 
294  locm->event_burst_cnt = deq_cnt;
295  int tbl_idx = 0; /* index into ..._tbl[] */
296 
297  /*
298  * Dispatch in batches of 'batch_cnt' events.
299  * Each batch contains events from the same atomic queue.
300  */
301  do {
302  const int qidx = entry_tbl[tbl_idx].qidx;
303  const em_queue_t queue = queue_idx2hdl(qidx);
304  queue_elem_t *const batch_qelem = queue_elem_get(queue);
305 
306  int batch_cnt = 1;
307 
308  /* i < deq_cnt <= EM_SCHED_AG_MULTI_MAX_BURST */
309  for (int i = tbl_idx + 1; i < deq_cnt &&
310  entry_tbl[i].qidx == qidx; i++) {
311  batch_cnt++;
312  }
313 
314  dispatch_events(&deq_evtbl[tbl_idx],
315  batch_cnt, batch_qelem);
316  tbl_idx += batch_cnt;
317  } while (tbl_idx < deq_cnt);
318 
319  } while (!ag_local_processing_ended(ag_elem));
320 }
321 
322 #define AG_INFO_HDR_STR \
323 "Number of atomic groups: %d\n\n" \
324 "ID Name Qgrp Q-num\n" \
325 "---------------------------------------------------------\n%s\n"
326 
327 #define AG_INFO_LEN 58
328 #define AG_INFO_FMT "%-10" PRI_AGRP "%-32s%-10" PRI_QGRP "%-5d\n"/*58 characters*/
329 
331 {
332  unsigned int ag_num; /*atomic group number*/
333  const atomic_group_elem_t *ag_elem;
334  em_atomic_group_t ag_check;
335  char ag_name[EM_ATOMIC_GROUP_NAME_LEN];
336  int len = 0;
337  int n_print = 0;
338 
339  em_atomic_group_t ag = em_atomic_group_get_first(&ag_num);
340 
341  /*
342  * ag_num might not match the actual number of atomic groups returned
343  * by iterating with func em_atomic_group_get_next() if atomic groups
344  * are added or removed in parallel by another core. Thus space for 10
345  * extra atomic groups is reserved. If more than 10 atomic groups are
346  * added in parallel by other cores, we print only information of the
347  * (ag_num + 10) atomic groups.
348  *
349  * The extra 1 byte is reserved for the terminating null byte.
350  */
351  const int ag_info_str_len = (ag_num + 10) * AG_INFO_LEN + 1;
352  char ag_info_str[ag_info_str_len];
353 
354  while (ag != EM_ATOMIC_GROUP_UNDEF) {
355  ag_elem = atomic_group_elem_get(ag);
356 
357  em_atomic_group_get_name(ag, ag_name, sizeof(ag_name));
358 
359  ag_check = em_atomic_group_find(ag_name);
360  if (unlikely(ag_elem == NULL || ag_check != ag ||
361  !atomic_group_allocated(ag_elem))) {
363  continue;
364  }
365 
366  n_print = snprintf(ag_info_str + len, ag_info_str_len - len,
367  AG_INFO_FMT, ag, ag_name, ag_elem->queue_group,
368  env_atomic32_get(&ag_elem->num_queues));
369 
370  /* Not enough space to hold more atomic group info */
371  if (n_print >= ag_info_str_len - len)
372  break;
373 
374  len += n_print;
376  }
377 
378  /* No atomic group */
379  if (len == 0) {
380  EM_PRINT("No atomic group has been created\n");
381  return;
382  }
383 
384  /*
385  * To prevent printing incomplete information of the last atomic group
386  * when there is not enough space to hold all atomic group info.
387  */
388  ag_info_str[len] = '\0';
389  EM_PRINT(AG_INFO_HDR_STR, ag_num, ag_info_str);
390 }
391 
392 #define AG_QUEUE_INFO_HDR_STR \
393 "Atomic group %" PRI_AGRP "(%s) has %d queue(s):\n\n" \
394 "ID Name Priority Type State Qgrp Ctx\n" \
395 "-----------------------------------------------------------------------------------\n" \
396 "%s\n"
397 
398 #define AG_Q_INFO_LEN 85
399 #define AG_Q_INFO_FMT "%-10" PRI_QUEUE "%-32s%-10d%-10s%-9s%-10" PRI_QGRP "%-3c\n"
400 
401 void print_atomic_group_queues(em_atomic_group_t ag)
402 {
403  unsigned int q_num;
404  em_queue_t ag_queue;
405  const queue_elem_t *q_elem;
406  char q_name[EM_QUEUE_NAME_LEN];
407  int len = 0;
408  int n_print = 0;
409 
410  atomic_group_elem_t *ag_elem = atomic_group_elem_get(ag);
411 
412  if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) {
413  EM_PRINT("Atomic group %" PRI_AGRP "is not created!\n", ag);
414  return;
415  }
416 
417  ag_queue = em_atomic_group_queue_get_first(&q_num, ag);
418 
419  /*
420  * q_num may not match the number of queues actually returned by iterating
421  * with em_atomic_group_queue_get_next() if queues are added or removed
422  * in parallel by another core. Thus space for 10 extra queues is reserved.
423  * If more than 10 queues are added to this atomic group by other cores
424  * in parallel, we print only information of the (q_num + 10) queues.
425  *
426  * The extra 1 byte is reserved for the terminating null byte.
427  */
428  int q_info_str_len = (q_num + 10) * AG_Q_INFO_LEN + 1;
429  char q_info_str[q_info_str_len];
430 
431  while (ag_queue != EM_QUEUE_UNDEF) {
432  q_elem = queue_elem_get(ag_queue);
433 
434  if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) {
435  ag_queue = em_atomic_group_queue_get_next();
436  continue;
437  }
438 
439  queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1);
440 
441  n_print = snprintf(q_info_str + len, q_info_str_len - len,
442  AG_Q_INFO_FMT, ag_queue, q_name,
443  q_elem->priority,
444  queue_get_type_str(q_elem->type),
445  queue_get_state_str(q_elem->state),
446  q_elem->queue_group,
447  q_elem->context ? 'Y' : 'N');
448 
449  /* Not enough space to hold more queue info */
450  if (n_print >= q_info_str_len - len)
451  break;
452 
453  len += n_print;
454  ag_queue = em_atomic_group_queue_get_next();
455  }
456 
457  /* Atomic group has no queue */
458  if (!len) {
459  EM_PRINT("Atomic group %" PRI_AGRP "(%s) has no queue!\n",
460  ag, ag_elem->name);
461  return;
462  }
463 
464  /*
465  * To prevent printing incomplete information of the last queue when
466  * there is not enough space to hold all queue info.
467  */
468  q_info_str[len] = '\0';
469  EM_PRINT(AG_QUEUE_INFO_HDR_STR, ag, ag_elem->name, q_num, q_info_str);
470 }
471 
472 void print_ag_elem_info(void)
473 {
474  EM_PRINT("\n"
475  "EM Atomic Groups\n"
476  "----------------\n"
477  "ag-elem size: %zu B\n",
478  sizeof(atomic_group_elem_t));
479 
480  EM_DBG("\t\toffset\tsize\n"
481  "\t\t------\t-----\n"
482  "atomic_group:\t%3zu B\t%3zu B\n"
483  "queue_group:\t%3zu B\t%3zu B\n"
484  "ag pool_elem:\t%3zu B\t%3zu B\n"
485  "stashes:\t%3zu B\t%3zu B\n"
486  "lock:\t\t%3zu B\t%3zu B\n"
487  "num_queues:\t%3zu B\t%3zu B\n"
488  "qlist_head[]:\t%3zu B\t%3zu B\n"
489  "name:\t\t%3zu B\t%3zu B\n",
490  offsetof(atomic_group_elem_t, atomic_group),
491  sizeof_field(atomic_group_elem_t, atomic_group),
492  offsetof(atomic_group_elem_t, queue_group),
493  sizeof_field(atomic_group_elem_t, queue_group),
494  offsetof(atomic_group_elem_t, atomic_group_pool_elem),
495  sizeof_field(atomic_group_elem_t, atomic_group_pool_elem),
496  offsetof(atomic_group_elem_t, stashes),
497  sizeof_field(atomic_group_elem_t, stashes),
498  offsetof(atomic_group_elem_t, lock),
499  sizeof_field(atomic_group_elem_t, lock),
500  offsetof(atomic_group_elem_t, num_queues),
501  sizeof_field(atomic_group_elem_t, num_queues),
502  offsetof(atomic_group_elem_t, qlist_head),
503  sizeof_field(atomic_group_elem_t, qlist_head),
504  offsetof(atomic_group_elem_t, name),
505  sizeof_field(atomic_group_elem_t, name));
506 
507  EM_PRINT("\n");
508 }
EM_SCHED_AG_MULTI_MAX_BURST
#define EM_SCHED_AG_MULTI_MAX_BURST
Definition: event_machine_hw_config.h:232
EM_QUEUE_PRIO_HIGHEST
@ EM_QUEUE_PRIO_HIGHEST
Definition: event_machine_hw_types.h:155
em_atomic_group_get_next
em_atomic_group_t em_atomic_group_get_next(void)
Definition: event_machine_atomic_group.c:413
queue_get_state_str
const char * queue_get_state_str(queue_state_t state)
Definition: em_queue.c:1673
queue_elem_t::queue_group
em_queue_group_t queue_group
Definition: em_queue_types.h:252
EM_OK
#define EM_OK
Definition: event_machine_types.h:329
atomic_group_elem_t::hi_prio
odp_stash_t hi_prio
Definition: em_atomic_group_types.h:56
atomic_group_elem_t::qlist_head
list_node_t qlist_head
Definition: em_atomic_group_types.h:67
atomic_group_init
em_status_t atomic_group_init(atomic_group_tbl_t *const atomic_group_tbl, atomic_group_pool_t *const atomic_group_pool)
Definition: em_atomic_group.c:38
atomic_group_elem_t::num_queues
env_atomic32_t num_queues
Definition: em_atomic_group_types.h:64
queue_get_type_str
const char * queue_get_type_str(em_queue_type_t type)
Definition: em_queue.c:1701
atomic_group_alloc
em_atomic_group_t atomic_group_alloc(void)
Definition: em_atomic_group.c:89
em_atomic_group_find
em_atomic_group_t em_atomic_group_find(const char *name)
Definition: event_machine_atomic_group.c:367
queue_elem_t::type
uint8_t type
Definition: em_queue_types.h:216
em_locm
ENV_LOCAL em_locm_t em_locm
stash_entry_t
Definition: em_event_types.h:86
EM_ERR_LIB_FAILED
@ EM_ERR_LIB_FAILED
Definition: event_machine_hw_types.h:291
atomic_group_elem_t::stashes
struct atomic_group_elem_t::@15 stashes
em_atomic_group_get_first
em_atomic_group_t em_atomic_group_get_first(unsigned int *num)
Definition: event_machine_atomic_group.c:384
EM_MAX_ATOMIC_GROUPS
#define EM_MAX_ATOMIC_GROUPS
Definition: event_machine_config.h:137
queue_elem_t::state
queue_state_t state
Definition: em_queue_types.h:210
print_atomic_group_queues
void print_atomic_group_queues(em_atomic_group_t ag)
Definition: em_atomic_group.c:401
PRI_AGRP
#define PRI_AGRP
Definition: event_machine_types.h:158
print_atomic_group_info
void print_atomic_group_info(void)
Definition: em_atomic_group.c:330
EM_QUEUE_NAME_LEN
#define EM_QUEUE_NAME_LEN
Definition: event_machine_config.h:125
event_hdr
Definition: em_event_types.h:184
em_atomic_group_queue_get_next
em_queue_t em_atomic_group_queue_get_next(void)
Definition: event_machine_atomic_group.c:492
em_locm_t::event_burst_cnt
int event_burst_cnt
Definition: em_mem.h:198
queue_elem_t::queue
uint32_t queue
Definition: em_queue_types.h:225
atomic_group_remove_queue
void atomic_group_remove_queue(queue_elem_t *const q_elem)
Definition: em_atomic_group.c:126
atomic_group_elem_t
Definition: em_atomic_group_types.h:46
queue_elem_t::context
void * context
Definition: em_queue_types.h:231
atomic_group_elem_t::queue_group
em_queue_group_t queue_group
Definition: em_atomic_group_types.h:50
EM_ERR_BAD_ID
@ EM_ERR_BAD_ID
Definition: event_machine_hw_types.h:265
INTERNAL_ERROR
#define INTERNAL_ERROR(error, escope, fmt,...)
Definition: em_error.h:43
queue_elem_t::in_atomic_group
uint8_t in_atomic_group
Definition: em_queue_types.h:201
EM_ATOMIC_GROUP_NAME_LEN
#define EM_ATOMIC_GROUP_NAME_LEN
Definition: event_machine_config.h:143
atomic_group_pool_t
Definition: em_atomic_group_types.h:85
em_status_t
uint32_t em_status_t
Definition: event_machine_types.h:321
em_atomic_group_queue_get_first
em_queue_t em_atomic_group_queue_get_first(unsigned int *num, em_atomic_group_t atomic_group)
Definition: event_machine_atomic_group.c:436
em_locm_t::atomic_group_released
bool atomic_group_released
Definition: em_mem.h:201
EM_ERR_OPERATION_FAILED
@ EM_ERR_OPERATION_FAILED
Definition: event_machine_hw_types.h:289
queue_elem_t::priority
uint8_t priority
Definition: em_queue_types.h:213
em_atomic_group_get_name
size_t em_atomic_group_get_name(em_atomic_group_t atomic_group, char *name, size_t maxlen)
Definition: event_machine_atomic_group.c:334
objpool_elem_t
Definition: objpool.h:48
EM_ATOMIC_GROUP_UNDEF
#define EM_ATOMIC_GROUP_UNDEF
Definition: event_machine_types.h:156
em_shm
em_shm_t * em_shm
Definition: event_machine_init.c:41
atomic_group_elem_t::atomic_group
em_atomic_group_t atomic_group
Definition: em_atomic_group_types.h:48
EM_QUEUE_UNDEF
#define EM_QUEUE_UNDEF
Definition: event_machine_types.h:107
em_include.h
atomic_group_elem_t::name
char name[EM_ATOMIC_GROUP_NAME_LEN]
Definition: em_atomic_group_types.h:69
em_core_id
int em_core_id(void)
Definition: event_machine_core.c:34
atomic_group_tbl_t
Definition: em_atomic_group_types.h:77
EM_ERR_BAD_POINTER
@ EM_ERR_BAD_POINTER
Definition: event_machine_hw_types.h:271
em_queue_prio_t
uint32_t em_queue_prio_t
Definition: event_machine_types.h:186
em_dispatcher_inline.h
em_shm_t::atomic_group_count
env_atomic32_t atomic_group_count
Definition: em_mem.h:142
q_elem_atomic_group_::atomic_group
em_atomic_group_t atomic_group
Definition: em_queue_types.h:158
em_free_multi
void em_free_multi(em_event_t events[], int num)
Definition: event_machine_event.c:370
em_locm_t
Definition: em_mem.h:188
atomic_group_elem_t::atomic_group_pool_elem
objpool_elem_t atomic_group_pool_elem
Definition: em_atomic_group_types.h:52
queue_elem_t
Definition: em_queue_types.h:180
atomic_group_elem_t::lo_prio
odp_stash_t lo_prio
Definition: em_atomic_group_types.h:58
atomic_group_tbl_t::ag_elem
atomic_group_elem_t ag_elem[EM_MAX_ATOMIC_GROUPS]
Definition: em_atomic_group_types.h:79