EM-ODP  3.7.0
Event Machine on ODP
em_internal_event.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, Nokia Siemens Networks
3  * Copyright (c) 2014-2016, Nokia Solutions and Networks
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  *
10  * * Redistributions of source code must retain the above copyright
11  * notice, this list of conditions and the following disclaimer.
12  * * Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution.
15  * * Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived
17  * from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  *
35  * EM Internal Control
36  */
37 
38 #include "em_include.h"
39 
40 /* doc in header file */
42 {
43  const int num_cores = em_core_count();
44  char q_name[EM_QUEUE_NAME_LEN];
45  em_queue_t shared_unsched_queue;
46  em_queue_t queue;
47  em_queue_conf_t unsch_conf;
48 
49  const char *err_str = "";
50 
51  EM_DBG("%s()\n", __func__);
52 
53  /*
54  * Create shared internal unsched queue used for internal EM messaging.
55  * Cannot use em_queue_create_static() here since the requested handle
56  * 'SHARED_INTERNAL_UNSCHED_QUEUE' lies outside of the normal static
57  * range.
58  */
59  shared_unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE);
60  queue = queue_create("EMctrl-unschedQ-shared", EM_QUEUE_TYPE_UNSCHEDULED,
62  shared_unsched_queue, EM_ATOMIC_GROUP_UNDEF,
63  NULL /* use default queue config */, &err_str);
64  if (queue == EM_QUEUE_UNDEF || queue != shared_unsched_queue)
65  return EM_FATAL(EM_ERR_NOT_FREE);
66 
67  /*
68  * Create static internal per-core UNSCHEDULED queues used for
69  * internal EM messaging. Cannot use em_queue_create_static()
70  * here since the requested handles lies outside of the normal
71  * static range.
72  */
73  memset(&unsch_conf, 0, sizeof(unsch_conf));
74  unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE;
75 
76  for (int i = 0; i < num_cores; i++) {
77  em_queue_t queue_req;
78 
79  queue_req = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + i);
80  snprintf(q_name, sizeof(q_name), "EMctrl-unschedQ-core%d", i);
81  q_name[EM_QUEUE_NAME_LEN - 1] = '\0';
82 
85  queue_req, EM_ATOMIC_GROUP_UNDEF,
86  &unsch_conf, /* request deq-not-mtsafe */
87  &err_str);
88  if (unlikely(queue == EM_QUEUE_UNDEF || queue != queue_req))
89  return EM_FATAL(EM_ERR_NOT_FREE);
90  }
91 
92  return EM_OK;
93 }
94 
95 /* doc in header file */
97 {
98  const int num_cores = em_core_count();
99  em_queue_t unsched_queue;
100  em_event_t unsched_event;
101  em_status_t stat;
102 
103  unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE);
104  for (;/* flush unsched queue */;) {
105  unsched_event = em_queue_dequeue(unsched_queue);
106  if (unsched_event == EM_EVENT_UNDEF)
107  break;
108  em_free(unsched_event);
109  }
110  stat = em_queue_delete(unsched_queue);
111  if (unlikely(stat != EM_OK))
112  return INTERNAL_ERROR(stat, EM_ESCOPE_DELETE_CTRL_QUEUES,
113  "shared unschedQ delete");
114 
115  for (int i = 0; i < num_cores; i++) {
116  unsched_queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + i);
117 
118  for (;/* flush unsched queue */;) {
119  unsched_event = em_queue_dequeue(unsched_queue);
120  if (unsched_event == EM_EVENT_UNDEF)
121  break;
122  em_free(unsched_event);
123  }
124 
125  stat = em_queue_delete(unsched_queue);
126  if (unlikely(stat != EM_OK))
127  return INTERNAL_ERROR(stat, EM_ESCOPE_DELETE_CTRL_QUEUES,
128  "core unschedQ:%d delete", i);
129  }
130 
131  return stat;
132 }
133 
134 /* doc in header file */
135 int send_core_ctrl_events(const em_core_mask_t *const mask, em_event_t ctrl_event,
136  void (*f_done_callback)(void *arg_ptr),
137  void *f_done_arg_ptr,
138  int num_notif, const em_notif_t notif_tbl[],
139  bool sync_operation)
140 {
141  em_status_t err;
142  em_event_group_t event_group = EM_EVENT_GROUP_UNDEF;
143  const internal_event_t *i_event = em_event_pointer(ctrl_event);
144  const int core_count = em_core_count(); /* All running EM cores */
145  const int mask_count = em_core_mask_count(mask); /* Subset of cores*/
146  int alloc_count = 0;
147  int sent_count = 0;
148  int unsent_count = mask_count;
149  int first_qidx;
150  int i;
151  em_event_t events[mask_count];
152 
153  if (unlikely(num_notif > EM_EVENT_GROUP_MAX_NOTIF)) {
154  INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_INTERNAL_NOTIF,
155  "Too large notif table (%i)", num_notif);
156  return unsent_count;
157  }
158 
159  /*
160  * Set up internal notification when all cores are done.
161  */
162  event_group = internal_done_w_notif_req(mask_count /*=evgrp count*/,
163  f_done_callback, f_done_arg_ptr,
164  num_notif, notif_tbl,
165  sync_operation);
166  if (unlikely(event_group == EM_EVENT_GROUP_UNDEF)) {
167  INTERNAL_ERROR(EM_ERR_NOT_FREE, EM_ESCOPE_INTERNAL_NOTIF,
168  "Internal 'done' notif setup failed");
169  return unsent_count;
170  }
171 
172  /*
173  * Allocate ctrl events to be sent to the concerned cores.
174  * Reuse the input ctrl_event later so alloc one less.
175  * Copy content from input ctrl_event into all allocated events.
176  */
177  for (i = 0; i < mask_count - 1; i++) {
178  events[i] = em_alloc(sizeof(internal_event_t),
181  if (unlikely(events[i] == EM_EVENT_UNDEF)) {
183  EM_ESCOPE_INTERNAL_NOTIF,
184  "Internal event alloc failed");
185  goto err_free_resources;
186  }
187  alloc_count++;
188 
189  internal_event_t *i_event_tmp = em_event_pointer(events[i]);
190  /* Copy input event content */
191  *i_event_tmp = *i_event;
192  }
193  /* Reuse the input event */
194  events[i] = ctrl_event;
195  /* don't increment alloc_count++, caller frees input event on error */
196 
197  /*
198  * Send ctrl events to the concerned cores
199  */
200  first_qidx = queue_id2idx(FIRST_INTERNAL_UNSCHED_QUEUE);
201 
202  for (i = 0; i < core_count; i++) {
203  if (em_core_mask_isset(i, mask)) {
204  /*
205  * Send copy to each core-specific queue,
206  * track completion using an event group.
207  */
208  err = em_send_group(events[sent_count],
209  queue_idx2hdl(first_qidx + i),
210  event_group);
211  if (unlikely(err != EM_OK)) {
212  INTERNAL_ERROR(err, EM_ESCOPE_INTERNAL_NOTIF,
213  "Event group send failed");
214  goto err_free_resources;
215  }
216  sent_count++;
217  unsent_count--;
218  }
219  }
220 
221  return 0; /* Success, all ctrl events sent */
222 
223  /* Error handling, free resources */
224 err_free_resources:
225  for (i = sent_count; i < alloc_count; i++)
226  em_free(events[i]);
227  evgrp_abort_delete(event_group);
228  return unsent_count;
229 }
230 
231 /**
232  * Handle the internal 'done' event
233  */
234 static void i_event__internal_done(const internal_event_t *i_ev)
235 {
236  int num_notif;
237  em_status_t ret;
238 
239  /* Release the event group, we are done with it */
240  ret = em_event_group_delete(i_ev->done.event_group);
241 
242  if (unlikely(ret != EM_OK))
243  INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_INTERNAL_DONE,
244  "Event group %" PRI_EGRP " delete failed (ret=%u)",
245  i_ev->done.event_group, ret);
246 
247  /* Call the callback function, performs custom actions at 'done' */
248  if (i_ev->done.f_done_callback != NULL)
249  i_ev->done.f_done_callback(i_ev->done.f_done_arg_ptr);
250 
251  /*
252  * Send notification events if requested by the caller.
253  */
254  num_notif = i_ev->done.num_notif;
255 
256  if (num_notif > 0) {
257  ret = send_notifs(num_notif, i_ev->done.notif_tbl);
258  if (unlikely(ret != EM_OK))
259  INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_INTERNAL_DONE,
260  "em_send() of notifs(%d) failed",
261  num_notif);
262  }
263 }
264 
265 /**
266  * Handle internal ctrl events
267  */
268 static inline void
269 internal_event_receive(void *eo_ctx, em_event_t event, em_event_type_t type,
270  em_queue_t queue, void *q_ctx)
271 {
272  /* currently unused args */
273  (void)eo_ctx;
274  (void)type;
275  (void)q_ctx;
276 
277  internal_event_t *i_event = em_event_pointer(event);
278 
279  if (unlikely(!i_event)) {
280  if (event != EM_EVENT_UNDEF)
281  em_free(event); /* unrecognized odp event type? */
282  INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC,
283  "Q:%" PRI_QUEUE ": Invalid event, evptr NULL", queue);
284  return;
285  }
286 
287  switch (i_event->id) {
288  /*
289  * Internal Done event
290  */
291  case EM_INTERNAL_DONE:
292  i_event__internal_done(i_event);
293  break;
294 
295  /*
296  * Internal event related to Queue Group modification: add a core
297  */
298  case QUEUE_GROUP_ADD_REQ:
300  break;
301 
302  /*
303  * Internal event related to Queue Group modification: remove a core
304  */
305  case QUEUE_GROUP_REM_REQ:
307  break;
308  /*
309  * Internal events related to EO local start&stop functionality
310  */
311  case EO_START_LOCAL_REQ:
312  case EO_START_SYNC_LOCAL_REQ:
313  case EO_STOP_LOCAL_REQ:
314  case EO_STOP_SYNC_LOCAL_REQ:
315  case EO_REM_QUEUE_LOCAL_REQ:
316  case EO_REM_QUEUE_SYNC_LOCAL_REQ:
317  case EO_REM_QUEUE_ALL_LOCAL_REQ:
318  case EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ:
320  break;
321 
322  default:
324  EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC,
325  "Internal ev-id:0x%" PRIx64 " Q:%" PRI_QUEUE "",
326  i_event->id, queue);
327  break;
328  }
329 
330  i_event->id = 0;
331  em_free(event);
332 }
333 
334 /* doc in header file */
335 em_event_group_t internal_done_w_notif_req(int event_group_count,
336  void (*f_done_callback)(void *arg_ptr),
337  void *f_done_arg_ptr,
338  int num_notif, const em_notif_t notif_tbl[],
339  bool sync_operation)
340 {
341  em_event_group_t event_group;
342  em_event_t event;
343  internal_event_t *i_event;
344  em_notif_t i_notif;
345  em_status_t err;
346 
347  event = em_alloc(sizeof(internal_event_t), EM_EVENT_TYPE_SW,
349  if (unlikely(event == EM_EVENT_UNDEF)) {
351  EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ,
352  "Internal event 'DONE' alloc failed!");
353  return EM_EVENT_GROUP_UNDEF;
354  }
355 
356  event_group = em_event_group_create();
357  if (unlikely(event_group == EM_EVENT_GROUP_UNDEF)) {
358  em_free(event);
360  EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ,
361  "Event group create failed!");
362  return EM_EVENT_GROUP_UNDEF;
363  }
364 
365  i_event = em_event_pointer(event);
366  i_event->id = EM_INTERNAL_DONE;
367  i_event->done.event_group = event_group;
368  i_event->done.f_done_callback = f_done_callback;
369  i_event->done.f_done_arg_ptr = f_done_arg_ptr;
370  i_event->done.num_notif = num_notif;
371 
372  for (int i = 0; i < num_notif; i++) {
373  i_event->done.notif_tbl[i].event = notif_tbl[i].event;
374  i_event->done.notif_tbl[i].queue = notif_tbl[i].queue;
375  i_event->done.notif_tbl[i].egroup = notif_tbl[i].egroup;
376  }
377 
378  i_notif.event = event;
379  if (sync_operation) {
380  i_notif.queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE +
381  em_core_id());
382  } else {
383  i_notif.queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE);
384  }
385  i_notif.egroup = EM_EVENT_GROUP_UNDEF;
386 
387  /*
388  * Request sending of EM_INTERNAL_DONE when 'event_group_count' events
389  * in 'event_group' have been seen. The 'Done' event will trigger the
390  * notifications to be sent.
391  */
392  err = em_event_group_apply(event_group, event_group_count,
393  1, &i_notif);
394  if (unlikely(err != EM_OK)) {
395  INTERNAL_ERROR(err, EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ,
396  "Event group apply failed");
397  em_free(event);
398  (void)em_event_group_delete(event_group);
399  return EM_EVENT_GROUP_UNDEF;
400  }
401 
402  return event_group;
403 }
404 
405 /* doc in header file */
406 void evgrp_abort_delete(em_event_group_t event_group)
407 {
408  em_notif_t free_notif_tbl[EM_EVENT_GROUP_MAX_NOTIF];
409 
410  int num = em_event_group_get_notif(event_group,
412  free_notif_tbl);
413  em_status_t err = em_event_group_abort(event_group);
414 
415  if (err == EM_OK && num > 0) {
416  for (int i = 0; i < num; i++)
417  em_free(free_notif_tbl[i].event);
418  }
419  (void)em_event_group_delete(event_group);
420 }
421 
422 /* doc in header file */
423 em_status_t send_notifs(const int num_notif, const em_notif_t notif_tbl[])
424 {
425  em_status_t err;
426  em_status_t ret = EM_OK;
427 
428  for (int i = 0; i < num_notif; i++) {
429  const em_event_t event = notif_tbl[i].event;
430  const em_queue_t queue = notif_tbl[i].queue;
431  const em_event_group_t egrp = notif_tbl[i].egroup;
432 
433  /* 'egroup' may be uninit in old appl code, check */
434  if (invalid_egrp(egrp))
435  err = em_send(event, queue);
436  else
437  err = em_send_group(event, queue, egrp);
438 
439  if (unlikely(err != EM_OK)) {
440  em_free(event);
441  if (ret == EM_OK)
442  ret = err; /* return the first error */
443  }
444  }
445 
446  return ret;
447 }
448 
449 /* doc in header file */
450 em_status_t check_notif(const em_notif_t *const notif)
451 {
452  if (unlikely(notif == NULL || notif->event == EM_EVENT_UNDEF))
453  return EM_ERR_BAD_POINTER;
454 
455  const bool is_external = queue_external(notif->queue);
456 
457  if (!is_external) {
458  const queue_elem_t *q_elem = queue_elem_get(notif->queue);
459 
460  if (unlikely(q_elem == NULL || !queue_allocated(q_elem)))
461  return EM_ERR_NOT_FOUND;
462  }
463 
464  if (notif->egroup != EM_EVENT_GROUP_UNDEF) {
465  const event_group_elem_t *egrp_elem =
466  event_group_elem_get(notif->egroup);
467 
468  if (unlikely(egrp_elem == NULL ||
469  !event_group_allocated(egrp_elem)))
470  return EM_ERR_BAD_ID;
471  }
472 
473  return EM_OK;
474 }
475 
476 /* doc in header file */
477 em_status_t check_notif_tbl(const int num_notif, const em_notif_t notif_tbl[])
478 {
479  em_status_t err;
480 
481  if (unlikely((unsigned int)num_notif > EM_EVENT_GROUP_MAX_NOTIF))
482  return EM_ERR_TOO_LARGE;
483 
484  if (unlikely(num_notif > 0 && notif_tbl == NULL))
485  return EM_ERR_BAD_POINTER;
486 
487  for (int i = 0; i < num_notif; i++) {
488  err = check_notif(&notif_tbl[i]);
489  if (unlikely(err != EM_OK))
490  return err;
491  }
492 
493  return EM_OK;
494 }
495 
496 /**
497  * @brief Helper for poll_unsched_ctrl_queue()
498  */
499 static inline void
500 handle_ctrl_events(em_queue_t unsched_queue,
501  const em_event_t ev_tbl[], const int num)
502 {
503  em_locm_t *const locm = &em_locm;
504  event_hdr_t *evhdr_tbl[num];
505 
506  event_to_hdr_multi(ev_tbl, evhdr_tbl/*out*/, num);
507 
508  for (int i = 0; i < num; i++) {
509  /*
510  * Simulate a dispatch-round for the core-local ctrl event.
511  * Dispatch an unscheduled event as scheduled, be careful!
512  * Don't call dispatch enter/exit callbacks here.
513  */
514  em_event_t event = ev_tbl[i];
515  const event_hdr_t *ev_hdr = evhdr_tbl[i];
516  em_event_type_t event_type = ev_hdr->event_type;
517 
518  /* Check and set core local event group */
519  event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1);
520 
521  internal_event_receive(NULL, event, event_type,
522  unsched_queue, NULL);
523 
524  /*
525  * Event belongs to an event_group, update the count and
526  * if requested send notifications
527  */
528  if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) {
529  /*
530  * Atomically decrease the event group count.
531  * If the new count is zero, send notification events.
532  */
533  event_group_count_decrement(1);
534  }
536  }
537 }
538 
539 /* doc in header file */
541 {
542  em_locm_t *const locm = &em_locm;
543 
544  queue_elem_t *core_unsch_qelem = locm->sync_api.ctrl_poll.core_unsched_qelem;
545  em_queue_t core_unsched_queue = locm->sync_api.ctrl_poll.core_unsched_queue;
546 
547  queue_elem_t *shared_unsch_qelem = locm->sync_api.ctrl_poll.shared_unsched_qelem;
548  em_queue_t shared_unsched_queue = locm->sync_api.ctrl_poll.shared_unsched_queue;
549 
550  em_locm_current_t current;
551 
552  const int deq_max = 16;
553  em_event_t core_ev_tbl[deq_max];
554  em_event_t shared_ev_tbl[deq_max];
555  int core_num;
556  int shared_num;
557  int round = 0;
558 
559  do {
560  core_num = queue_dequeue_multi(core_unsch_qelem,
561  core_ev_tbl/*out*/, deq_max);
562  shared_num = queue_dequeue_multi(shared_unsch_qelem,
563  shared_ev_tbl/*out*/, deq_max);
564  if (core_num <= 0 && shared_num <= 0)
565  break; /* no ctrl events, exit loop */
566 
567  /* Save local current state the first time only */
568  if (round == 0) {
569  current = locm->current; /* save */
570  locm->current.rcv_multi_cnt = 1;
572  }
573 
574  if (core_num > 0) {
575  locm->current.q_elem = core_unsch_qelem;
576  locm->current.sched_q_elem = core_unsch_qelem;
577  handle_ctrl_events(core_unsched_queue, core_ev_tbl, core_num);
578  }
579  if (shared_num > 0) {
580  locm->current.q_elem = shared_unsch_qelem;
581  locm->current.sched_q_elem = shared_unsch_qelem;
582  handle_ctrl_events(shared_unsched_queue, shared_ev_tbl, shared_num);
583  }
584 
585  round++;
586  } while (true);
587 
588  if (round > 0)
589  locm->current = current; /* restore */
590 }
em_event_group_get_notif
int em_event_group_get_notif(em_event_group_t event_group, int max_notif, em_notif_t notif_tbl[])
Definition: event_machine_event_group.c:735
EM_QUEUE_GROUP_UNDEF
#define EM_QUEUE_GROUP_UNDEF
Definition: event_machine_types.h:127
delete_ctrl_queues
em_status_t delete_ctrl_queues(void)
Delete EM's internal unscheduled control queues at teardown.
Definition: em_internal_event.c:96
EM_OK
#define EM_OK
Definition: event_machine_types.h:329
EM_EVENT_TYPE_SW
@ EM_EVENT_TYPE_SW
Definition: event_machine_hw_types.h:72
ODP_PACKED::sched_context_type
em_sched_context_type_t sched_context_type
Definition: em_mem.h:170
EM_QUEUE_PRIO_UNDEF
#define EM_QUEUE_PRIO_UNDEF
Definition: event_machine_hw_types.h:157
em_queue_conf_t
Definition: event_machine_types.h:212
em_notif_t::queue
em_queue_t queue
Definition: event_machine_types.h:270
ODP_PACKED
Definition: em_event_types.h:129
EM_EVENT_UNDEF
#define EM_EVENT_UNDEF
Definition: event_machine_types.h:62
event_hdr::egrp_gen
int32_t egrp_gen
Definition: em_event_types.h:260
i_event__qgrp_rem_core_req
void i_event__qgrp_rem_core_req(const internal_event_t *i_ev)
EM internal event handler, remove core from an EM queue group. (see em_internal_event....
Definition: em_queue_group.c:1220
EM_POOL_DEFAULT
#define EM_POOL_DEFAULT
Definition: event_machine_hw_config.h:191
create_ctrl_queues
em_status_t create_ctrl_queues(void)
Create EM's internal unscheduled control queues at startup.
Definition: em_internal_event.c:41
i_event__qgrp_add_core_req
void i_event__qgrp_add_core_req(const internal_event_t *i_ev)
EM internal event handler, add core to an EM queue group. (see em_internal_event.c&h)
Definition: em_queue_group.c:1207
em_event_group_delete
em_status_t em_event_group_delete(em_event_group_t event_group)
Definition: event_machine_event_group.c:57
event_hdr::event_type
em_event_type_t event_type
Definition: em_event_types.h:241
em_locm
ENV_LOCAL em_locm_t em_locm
internal_event_t::done
struct internal_event_t::@50 done
em_free
void em_free(em_event_t event)
Definition: event_machine_event.c:261
ODP_PACKED::sched_q_elem
queue_elem_t * sched_q_elem
Definition: em_mem.h:176
em_locm_t::current
em_locm_current_t current
Definition: em_mem.h:190
em_send
em_status_t em_send(em_event_t event, em_queue_t queue)
Definition: event_machine_event.c:661
EM_ERR_ALLOC_FAILED
@ EM_ERR_ALLOC_FAILED
Definition: event_machine_hw_types.h:287
em_locm_t::sync_api
sync_api_t sync_api
Definition: em_mem.h:236
em_queue_dequeue
em_event_t em_queue_dequeue(em_queue_t queue)
Definition: event_machine_queue.c:232
em_core_mask_isset
int em_core_mask_isset(int core, const em_core_mask_t *mask)
Definition: event_machine_hw_specific.c:58
check_notif_tbl
em_status_t check_notif_tbl(const int num_notif, const em_notif_t notif_tbl[])
Check that the usage of a table of notifications is valid.
Definition: em_internal_event.c:477
sync_api_t::shared_unsched_queue
em_queue_t shared_unsched_queue
Definition: em_sync_api_types.h:58
em_core_mask_t
Definition: event_machine_hw_types.h:242
em_core_count
int em_core_count(void)
Definition: event_machine_core.c:40
EM_QUEUE_FLAG_DEQ_NOT_MTSAFE
#define EM_QUEUE_FLAG_DEQ_NOT_MTSAFE
Definition: event_machine_hw_types.h:229
EM_ERR_TOO_LARGE
@ EM_ERR_TOO_LARGE
Definition: event_machine_hw_types.h:294
em_send_group
em_status_t em_send_group(em_event_t event, em_queue_t queue, em_event_group_t event_group)
Definition: event_machine_event_group.c:474
EM_QUEUE_NAME_LEN
#define EM_QUEUE_NAME_LEN
Definition: event_machine_config.h:125
internal_event_t::id
uint64_t id
Definition: em_internal_event_types.h:95
EM_QUEUE_TYPE_UNSCHEDULED
@ EM_QUEUE_TYPE_UNSCHEDULED
Definition: event_machine_hw_types.h:127
event_hdr
Definition: em_event_types.h:184
em_queue_delete
em_status_t em_queue_delete(em_queue_t queue)
Definition: event_machine_queue.c:95
em_event_group_create
em_event_group_t em_event_group_create(void)
Definition: event_machine_event_group.c:37
check_notif
em_status_t check_notif(const em_notif_t *const notif)
Check that the usage of a notification is valid.
Definition: em_internal_event.c:450
EM_ERR_NOT_FREE
@ EM_ERR_NOT_FREE
Definition: event_machine_hw_types.h:276
em_queue_conf_t::flags
em_queue_flag_t flags
Definition: event_machine_types.h:218
em_event_group_apply
em_status_t em_event_group_apply(em_event_group_t event_group, int count, int num_notif, const em_notif_t notif_tbl[])
Definition: event_machine_event_group.c:93
poll_unsched_ctrl_queue
void poll_unsched_ctrl_queue(void)
Poll EM's internal unscheduled control queues during dispatch.
Definition: em_internal_event.c:540
em_alloc
em_event_t em_alloc(uint32_t size, em_event_type_t type, em_pool_t pool)
Definition: event_machine_event.c:33
EM_ERR_BAD_ID
@ EM_ERR_BAD_ID
Definition: event_machine_hw_types.h:265
ODP_PACKED::egrp
em_event_group_t egrp
Definition: em_mem.h:178
INTERNAL_ERROR
#define INTERNAL_ERROR(error, escope, fmt,...)
Definition: em_error.h:43
i_event__eo_local_func_call_req
void i_event__eo_local_func_call_req(const internal_event_t *i_ev)
Definition: em_eo.c:1073
queue_create
em_queue_t queue_create(const char *name, em_queue_type_t type, em_queue_prio_t prio, em_queue_group_t queue_group, em_queue_t queue_req, em_atomic_group_t atomic_group, const em_queue_conf_t *conf, const char **err_str)
Definition: em_queue.c:592
internal_event_t
Definition: em_internal_event_types.h:93
em_status_t
uint32_t em_status_t
Definition: event_machine_types.h:321
ODP_PACKED::q_elem
queue_elem_t * q_elem
Definition: em_mem.h:174
PRI_QUEUE
#define PRI_QUEUE
Definition: event_machine_types.h:109
event_hdr::egrp
em_event_group_t egrp
Definition: em_event_types.h:265
EM_ATOMIC_GROUP_UNDEF
#define EM_ATOMIC_GROUP_UNDEF
Definition: event_machine_types.h:156
sync_api_t::core_unsched_qelem
queue_elem_t * core_unsched_qelem
Definition: em_sync_api_types.h:53
em_event_type_t
uint32_t em_event_type_t
Definition: event_machine_types.h:85
EM_QUEUE_UNDEF
#define EM_QUEUE_UNDEF
Definition: event_machine_types.h:107
em_include.h
sync_api_t::shared_unsched_qelem
queue_elem_t * shared_unsched_qelem
Definition: em_sync_api_types.h:60
em_core_id
int em_core_id(void)
Definition: event_machine_core.c:34
EM_SCHED_CONTEXT_TYPE_NONE
@ EM_SCHED_CONTEXT_TYPE_NONE
Definition: event_machine_types.h:281
EM_ERR_NOT_FOUND
@ EM_ERR_NOT_FOUND
Definition: event_machine_hw_types.h:278
send_notifs
em_status_t send_notifs(const int num_notif, const em_notif_t notif_tbl[])
Helper func to send notifications events.
Definition: em_internal_event.c:423
sync_api_t::core_unsched_queue
em_queue_t core_unsched_queue
Definition: em_sync_api_types.h:51
EM_ERR_BAD_POINTER
@ EM_ERR_BAD_POINTER
Definition: event_machine_hw_types.h:271
em_notif_t
Definition: event_machine_types.h:268
em_core_mask_count
int em_core_mask_count(const em_core_mask_t *mask)
Definition: event_machine_hw_specific.c:87
internal_done_w_notif_req
em_event_group_t internal_done_w_notif_req(int event_group_count, void(*f_done_callback)(void *arg_ptr), void *f_done_arg_ptr, int num_notif, const em_notif_t notif_tbl[], bool sync_operation)
Helper func: Allocate & set up the internal 'done' event with function callbacks and notification eve...
Definition: em_internal_event.c:335
EM_EVENT_GROUP_MAX_NOTIF
#define EM_EVENT_GROUP_MAX_NOTIF
Definition: event_machine_config.h:167
event_group_elem_t
Definition: em_event_group_types.h:64
em_locm_t
Definition: em_mem.h:188
em_event_group_abort
em_status_t em_event_group_abort(em_event_group_t event_group)
Definition: event_machine_event_group.c:685
send_core_ctrl_events
int send_core_ctrl_events(const em_core_mask_t *const mask, em_event_t ctrl_event, void(*f_done_callback)(void *arg_ptr), void *f_done_arg_ptr, int num_notif, const em_notif_t notif_tbl[], bool sync_operation)
Sends an internal control event to each core set in 'mask'.
Definition: em_internal_event.c:135
EM_EVENT_GROUP_UNDEF
#define EM_EVENT_GROUP_UNDEF
Definition: event_machine_types.h:141
evgrp_abort_delete
void evgrp_abort_delete(em_event_group_t event_group)
internal_done_w_notif_req() 'companion' to abort and delete the event group created by the mentioned ...
Definition: em_internal_event.c:406
em_notif_t::egroup
em_event_group_t egroup
Definition: event_machine_types.h:271
em_notif_t::event
em_event_t event
Definition: event_machine_types.h:269
queue_elem_t
Definition: em_queue_types.h:180
em_event_pointer
void * em_event_pointer(em_event_t event)
Definition: event_machine_event.c:750
PRI_EGRP
#define PRI_EGRP
Definition: event_machine_types.h:143
ODP_PACKED::rcv_multi_cnt
int rcv_multi_cnt
Definition: em_mem.h:172