EM-ODP  3.8.0-1
Event Machine on ODP
event_machine_timer.c
1 /*
2  * Copyright (c) 2016, Nokia Solutions and Networks
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * * Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  * * Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived
16  * from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * ---------------------------------------------------------------------
31  * Some notes about the implementation:
32  *
33  * EM Timer API is close to ODP timer, but there are issues
34  * making this code a bit more complex than it could be:
35  *
36  * 1) there is no generic periodic timer in ODP
37  * 2) unless using the pre-defined timeout event there is no way to access
38  * all necessary information runtime to implement a periodic timer
39  *
40  * Point 2 is solved by creating a timeout pool. When user allocates
41  * EM timeout, a new minimum size buffer is allocated to store all the needed
42  * information. Timer handle is a pointer to such buffer so all data is
43  * available via the handle (ack() is the most problematic case). This does
44  * create performance penalty, but so far it looks like the penalty is not
45  * too large and does simplify the code otherwise. Also timeouts could be
46  * pre-allocated as the API separates creation and arming.
47  * Most of the synchronization is handled by ODP timer, a ticketlock is used
48  * for high level management API.
49  *
50  */
51 #include "em_include.h"
52 
53 /* timer handle = index + 1 (UNDEF 0) */
54 #define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1))
55 #define TMR_H2I(x) ((int)((uintptr_t)(x) - 1))
56 
57 static inline em_status_t timer_rv_odp2em(int odpret)
58 {
59  switch (odpret) {
60  case ODP_TIMER_SUCCESS:
61  return EM_OK;
62  case ODP_TIMER_TOO_NEAR:
63  return EM_ERR_TOONEAR;
64  case ODP_TIMER_TOO_FAR:
65  return EM_ERR_TOOFAR;
66  default:
67  break;
68  }
69 
70  return EM_ERR_LIB_FAILED;
71 }
72 
73 static inline int is_queue_valid_type(em_timer_t tmr, const queue_elem_t *q_elem)
74 {
75  unsigned int tmridx = (unsigned int)TMR_H2I(tmr);
76 
77  /* implementation specific */
78  if (em_shm->timers.timer[tmridx].plain_q_ok && q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED)
79  return 1;
80  /* EM assumes scheduled always supported */
81  return (q_elem->type == EM_QUEUE_TYPE_ATOMIC ||
82  q_elem->type == EM_QUEUE_TYPE_PARALLEL ||
83  q_elem->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) ? 1 : 0;
84 
85  /* LOCAL or OUTPUT queues not supported */
86 }
87 
88 static inline bool is_event_type_valid(em_event_t event)
89 {
90  em_event_type_t etype = em_event_type_major(em_event_get_type(event));
91 
92  if (etype == EM_EVENT_TYPE_PACKET ||
93  etype == EM_EVENT_TYPE_SW ||
94  etype == EM_EVENT_TYPE_TIMER)
95  return true;
96 
97  /* limitations mainly set by odp spec, e.g. no vectors */
98  return false;
99 }
100 
101 /* Helper for em_tmo_get_type() */
102 static inline bool can_have_tmo_type(em_event_t event)
103 {
104  em_event_type_t etype = em_event_type_major(em_event_get_type(event));
105 
106  if (etype == EM_EVENT_TYPE_PACKET ||
107  etype == EM_EVENT_TYPE_SW ||
108  etype == EM_EVENT_TYPE_TIMER ||
109  etype == EM_EVENT_TYPE_TIMER_IND)
110  return true;
111 
112  return false;
113 }
114 
115 static inline int is_timer_valid(em_timer_t tmr)
116 {
117  unsigned int i;
118  const timer_storage_t *const tmrs = &em_shm->timers;
119 
120  if (unlikely(tmr == EM_TIMER_UNDEF))
121  return 0;
122 
123  i = (unsigned int)TMR_H2I(tmr);
124  if (unlikely(i >= EM_ODP_MAX_TIMERS))
125  return 0;
126 
127  if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID ||
128  tmrs->timer[i].tmo_pool == ODP_POOL_INVALID))
129  return 0;
130  return 1;
131 }
132 
133 static inline em_status_t ack_ring_timeout_event(em_tmo_t tmo,
134  em_event_t ev,
135  em_tmo_state_t tmo_state,
136  event_hdr_t *ev_hdr,
137  odp_event_t odp_ev)
138 {
139  (void)ev;
140  (void)tmo_state;
141 
142  if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND))
143  return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK,
144  "Invalid event type:%u, expected timer-ring:%u",
146 
147  if (EM_CHECK_LEVEL > 0 && unlikely(tmo != ev_hdr->tmo))
148  return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK,
149  "Wrong event returned? tmo %p->%p", tmo, ev_hdr->tmo);
150 
151  int ret = odp_timer_periodic_ack(tmo->odp_timer, odp_ev);
152 
153  if (unlikely(ret < 0)) { /* failure */
154  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
155  return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK,
156  "Tmo ACK: ring timer odp ack fail, rv %d", ret);
157  }
158 
159  if (unlikely(ret == 2)) { /* cancelled, no more events coming */
160  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; /* allows em_free */
161  ev_hdr->tmo = EM_TMO_UNDEF;
162  atomic_thread_fence(memory_order_release);
163  TMR_DBG_PRINT("last periodic event %p\n", odp_ev);
164  return EM_ERR_CANCELED;
165  }
166 
167  /* ret = 1 would mean timer is cancelled, but more coming still.
168  * return ok to make ring and normal periodic behave the same
169  * e.g. CANCELED means tmo can now be deleted
170  */
171  return EM_OK;
172 }
173 
174 static void cleanup_timer_create_fail(event_timer_t *timer)
175 {
176  if (timer->tmo_pool != ODP_POOL_INVALID &&
177  timer->tmo_pool != em_shm->timers.shared_tmo_pool) /* don't kill shared pool */
178  odp_pool_destroy(timer->tmo_pool);
179  if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID)
180  odp_timer_pool_destroy(timer->odp_tmr_pool);
181  timer->tmo_pool = ODP_POOL_INVALID;
182  timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID;
183  TMR_DBG_PRINT("cleaned up failed timer create\n");
184 }
185 
186 static odp_pool_t create_tmo_handle_pool(uint32_t num_buf, uint32_t cache, const event_timer_t *tmr)
187 {
188  odp_pool_param_t odp_pool_param;
189  odp_pool_t pool;
190  char tmo_pool_name[ODP_POOL_NAME_LEN];
191 
192  odp_pool_param_init(&odp_pool_param);
193  odp_pool_param.type = ODP_POOL_BUFFER;
194  odp_pool_param.buf.size = sizeof(em_timer_timeout_t);
195  odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE;
196  odp_pool_param.buf.cache_size = cache;
197  odp_pool_param.stats.all = 0;
198  TMR_DBG_PRINT("tmo handle pool cache %d\n", odp_pool_param.buf.cache_size);
199 
200  /* local pool caching may cause out of buffers situation on a core. Adjust */
201  uint32_t num = num_buf + ((em_core_count() - 1) * odp_pool_param.buf.cache_size);
202 
203  if (num_buf != num) {
204  TMR_DBG_PRINT("Adjusted pool size %d->%d due to local caching (%d)\n",
205  num_buf, num, odp_pool_param.buf.cache_size);
206  }
207  odp_pool_param.buf.num = num;
208  snprintf(tmo_pool_name, ODP_POOL_NAME_LEN, "Tmo-pool-%d", tmr->idx);
209  pool = odp_pool_create(tmo_pool_name, &odp_pool_param);
210  if (pool != ODP_POOL_INVALID) {
211  TMR_DBG_PRINT("Created ODP-pool: %s for %d timeouts\n",
212  tmo_pool_name, odp_pool_param.buf.num);
213  }
214  return pool;
215 }
216 
217 static inline odp_event_t alloc_odp_timeout(em_tmo_t tmo)
218 {
219  odp_timeout_t odp_tmo = odp_timeout_alloc(tmo->ring_tmo_pool);
220 
221  if (unlikely(odp_tmo == ODP_TIMEOUT_INVALID))
222  return ODP_EVENT_INVALID;
223 
224  /* init EM event header */
225  event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo);
226  odp_event_t odp_event = odp_timeout_to_event(odp_tmo);
227  em_event_t event = event_odp2em(odp_event);
228 
229  if (unlikely(!ev_hdr)) {
230  odp_timeout_free(odp_tmo);
231  return ODP_EVENT_INVALID;
232  }
233 
234  if (esv_enabled())
235  event = evstate_alloc_tmo(event, ev_hdr);
236  ev_hdr->flags.all = 0;
238  ev_hdr->tmo = tmo;
240  ev_hdr->event_size = 0;
241  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
242  ev_hdr->user_area.all = 0;
243  ev_hdr->user_area.isinit = 1;
244 
245  return odp_event;
246 }
247 
248 static inline void free_odp_timeout(odp_event_t odp_event)
249 {
250  if (esv_enabled()) {
251  em_event_t event = event_odp2em(odp_event);
252  event_hdr_t *const ev_hdr = event_to_hdr(event);
253 
254  event = ev_hdr->event;
255  evstate_free(event, ev_hdr, EVSTATE__TMO_DELETE);
256  }
257 
258  odp_event_free(odp_event);
259 }
260 
261 static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev,
262  event_hdr_t *ev_hdr,
263  em_queue_t queue)
264 {
265  if (esv_enabled())
266  evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__NOSKIP);
267 
268  em_status_t err = em_send(next_tmo_ev, queue);
269 
270  if (unlikely(err != EM_OK)) {
271  err = INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK, "Tmo ACK: noskip em_send fail");
272  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
273  ev_hdr->tmo = EM_TMO_UNDEF;
274  }
275 
276  return err; /* EM_OK or send-failure */
277 }
278 
279 static inline void handle_ack_skip(em_tmo_t tmo)
280 {
281  uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool);
282  uint64_t skips;
283 
284  if (odpt > tmo->last_tick) /* late, over next period */
285  skips = ((odpt - tmo->last_tick) / tmo->period) + 1;
286  else
287  skips = 1; /* not yet over next period, but late for setting */
288 
289  tmo->last_tick += skips * tmo->period;
290  TMR_DBG_PRINT("%lu skips * %lu ticks => new tgt %lu\n",
291  skips, tmo->period, tmo->last_tick);
292  if (EM_TIMER_TMO_STATS)
293  tmo->stats.num_period_skips += skips;
294 }
295 
296 static inline bool check_tmo_flags(em_tmo_flag_t flags)
297 {
298  /* Check for valid tmo flags (oneshot OR periodic mainly) */
299  if (unlikely(!(flags & (EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC))))
300  return false;
301 
302  if (unlikely((flags & EM_TMO_FLAG_ONESHOT) && (flags & EM_TMO_FLAG_PERIODIC)))
303  return false;
304 
305  if (EM_CHECK_LEVEL > 1) {
308  if (unlikely(flags & inv_flags))
309  return false;
310  }
311  return true;
312 }
313 
314 static inline bool check_timer_attr(const em_timer_attr_t *tmr_attr)
315 {
316  if (unlikely(tmr_attr == NULL)) {
317  INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_CREATE,
318  "NULL ptr given");
319  return false;
320  }
321  if (unlikely(tmr_attr->__internal_check != EM_CHECK_INIT_CALLED)) {
322  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE,
323  "Not initialized: em_timer_attr_init(tmr_attr) not called");
324  return false;
325  }
326  if (unlikely(tmr_attr->resparam.res_ns && tmr_attr->resparam.res_hz)) {
327  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE,
328  "Only res_ns OR res_hz allowed");
329  return false;
330  }
331  return true;
332 }
333 
334 static inline bool check_timer_attr_ring(const em_timer_attr_t *ring_attr)
335 {
336  if (unlikely(ring_attr == NULL)) {
337  INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_RING_CREATE,
338  "NULL attr given");
339  return false;
340  }
341  if (EM_CHECK_LEVEL > 0 && unlikely(ring_attr->__internal_check != EM_CHECK_INIT_CALLED)) {
342  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_RING_CREATE,
343  "Not initialized: em_timer_ring_attr_init(ring_attr) not called");
344  return false;
345  }
346 
347  if (EM_CHECK_LEVEL > 1 &&
348  unlikely(ring_attr->ringparam.base_hz.integer < 1 ||
349  ring_attr->ringparam.max_mul < 1 ||
350  (ring_attr->flags & EM_TIMER_FLAG_RING) == 0)) {
351  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE,
352  "Invalid attr values for ring timer");
353  return false;
354  }
355 
356  return true;
357 }
358 
359 static inline int find_free_timer_index(void)
360 {
361  /*
362  * Find a free timer-slot.
363  * This linear search should not be a performance problem with only a few timers
364  * available especially when these are typically created at startup.
365  * Assumes context is locked
366  */
367  int i;
368 
369  for (i = 0; i < EM_ODP_MAX_TIMERS; i++) {
370  const event_timer_t *timer = &em_shm->timers.timer[i];
371 
372  if (timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID) /* marks unused entry */
373  break;
374  }
375  return i;
376 }
377 
379 {
380  if (unlikely(EM_CHECK_LEVEL > 0 && tmr_attr == NULL))
381  return; /* just ignore NULL here */
382 
383  /* clear/invalidate unused ring timer */
384  memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t));
385 
386  /* strategy: first put default resolution, then validate based on that */
387  tmr_attr->resparam.res_ns = EM_ODP_TIMER_RESOL_DEF_NS;
388  tmr_attr->resparam.res_hz = 0;
390  tmr_attr->flags = EM_TIMER_FLAG_NONE;
391 
392  odp_timer_clk_src_t odp_clksrc;
393  odp_timer_capability_t odp_capa;
394  odp_timer_res_capability_t odp_res_capa;
395  int err;
396 
397  err = timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc);
398  if (unlikely(err)) {
399  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_ATTR_INIT,
400  "Unsupported EM-timer clock source:%d",
401  tmr_attr->resparam.clk_src);
402  return;
403  }
404  err = odp_timer_capability(odp_clksrc, &odp_capa);
405  if (unlikely(err)) {
406  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT,
407  "Timer capability: ret %d, odp-clksrc:%d",
408  err, odp_clksrc);
409  return;
410  }
411 
412  TMR_DBG_PRINT("odp says highest res %lu\n", odp_capa.highest_res_ns);
413  if (unlikely(odp_capa.highest_res_ns > tmr_attr->resparam.res_ns)) {
414  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT,
415  "Timer capability: maxres %lu req %lu, odp-clksrc:%d!",
416  odp_capa.highest_res_ns, tmr_attr->resparam.res_ns, odp_clksrc);
417  return;
418  }
419 
420  memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t));
421  odp_res_capa.res_ns = tmr_attr->resparam.res_ns;
422  err = odp_timer_res_capability(odp_clksrc, &odp_res_capa);
423  if (unlikely(err)) {
424  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT,
425  "Timer res capability failed: ret %d, odp-clksrc:%d, res %lu",
426  err, odp_clksrc, tmr_attr->resparam.res_ns);
427  return;
428  }
429 
430  TMR_DBG_PRINT("res %lu -> ODP says min %lu, max %lu\n",
431  tmr_attr->resparam.res_ns, odp_res_capa.min_tmo,
432  odp_res_capa.max_tmo);
433 
434  tmr_attr->num_tmo = EM_ODP_DEFAULT_TMOS;
435  if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS)
436  tmr_attr->num_tmo = odp_capa.max_timers;
437 
438  tmr_attr->resparam.min_tmo = odp_res_capa.min_tmo;
439  tmr_attr->resparam.max_tmo = odp_res_capa.max_tmo;
440  tmr_attr->name[0] = 0; /* timer_create will add default (no index available here) */
442 }
443 
445  em_timer_clksrc_t clk_src,
446  uint64_t base_hz,
447  uint64_t max_mul,
448  uint64_t res_ns)
449 {
450  if (unlikely(EM_CHECK_LEVEL > 0 && ring_attr == NULL))
451  return EM_ERR_BAD_ARG;
452 
453  /* clear unused fields */
454  memset(ring_attr, 0, sizeof(em_timer_attr_t));
455 
456  ring_attr->ringparam.base_hz.integer = base_hz;
457  ring_attr->ringparam.clk_src = clk_src;
458  ring_attr->ringparam.max_mul = max_mul;
459  ring_attr->ringparam.res_ns = res_ns; /* 0 is legal and means odp default */
460  ring_attr->num_tmo = EM_ODP_DEFAULT_RING_TMOS;
461  ring_attr->flags = EM_TIMER_FLAG_RING;
462  ring_attr->name[0] = 0; /* default at ring_create, index not known here */
463 
464  odp_timer_clk_src_t odp_clksrc;
465  odp_timer_capability_t capa;
466  int rv = timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc);
467 
468  if (unlikely(rv))
469  return EM_ERR_BAD_ARG;
470  if (unlikely(odp_timer_capability(odp_clksrc, &capa) != 0)) {
471  TMR_DBG_PRINT("odp_timer_capability returned error for clk_src %u\n", odp_clksrc);
472  return EM_ERR_BAD_ARG; /* assume clksrc not supported */
473  }
474 
475  if (capa.periodic.max_pools == 0) /* no odp support */
476  return EM_ERR_NOT_IMPLEMENTED;
477 
478  if (capa.periodic.max_timers < ring_attr->num_tmo)
479  ring_attr->num_tmo = capa.periodic.max_timers;
480 
481  odp_timer_periodic_capability_t pcapa;
482 
483  pcapa.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer;
484  pcapa.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer;
485  pcapa.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom;
486  pcapa.max_multiplier = ring_attr->ringparam.max_mul;
487  pcapa.res_ns = ring_attr->ringparam.res_ns;
488  rv = odp_timer_periodic_capability(odp_clksrc, &pcapa);
489  ring_attr->ringparam.res_ns = pcapa.res_ns; /* update back */
490  ring_attr->ringparam.base_hz.integer = pcapa.base_freq_hz.integer;
491  ring_attr->ringparam.base_hz.numer = pcapa.base_freq_hz.numer;
492  ring_attr->ringparam.base_hz.denom = pcapa.base_freq_hz.denom;
493  if (pcapa.max_multiplier < ring_attr->ringparam.max_mul) /* don't increase here */
494  ring_attr->ringparam.max_mul = pcapa.max_multiplier;
495  if (rv != 1) /* 1 means all values supported */
496  return EM_ERR_BAD_ARG;
497 
499  return EM_OK;
500 }
501 
503 {
504  if (EM_CHECK_LEVEL > 0 && unlikely(capa == NULL)) {
505  EM_LOG(EM_LOG_DBG, "%s(): NULL capa ptr!\n", __func__);
506  return EM_ERR_BAD_POINTER;
507  }
508 
509  odp_timer_clk_src_t odp_clksrc;
510  odp_timer_capability_t odp_capa;
511 
512  if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) ||
513  odp_timer_capability(odp_clksrc, &odp_capa))) {
514  EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src);
515  return EM_ERR_BAD_ARG;
516  }
517 
518  capa->max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ?
519  odp_capa.max_pools : EM_ODP_MAX_TIMERS;
520  capa->max_num_tmo = odp_capa.max_timers;
521  capa->max_res.clk_src = clk_src;
522  capa->max_res.res_ns = odp_capa.max_res.res_ns;
523  capa->max_res.res_hz = odp_capa.max_res.res_hz;
524  capa->max_res.min_tmo = odp_capa.max_res.min_tmo;
525  capa->max_res.max_tmo = odp_capa.max_res.max_tmo;
526  capa->max_tmo.clk_src = clk_src;
527  capa->max_tmo.res_ns = odp_capa.max_tmo.res_ns;
528  capa->max_tmo.res_hz = odp_capa.max_tmo.res_hz;
529  capa->max_tmo.min_tmo = odp_capa.max_tmo.min_tmo;
530  capa->max_tmo.max_tmo = odp_capa.max_tmo.max_tmo;
531 
532  /* ring timer basic capability */
533  capa->ring.max_rings = odp_capa.periodic.max_pools; /* 0 if not supported */
534  capa->ring.max_num_tmo = odp_capa.periodic.max_timers;
535  capa->ring.min_base_hz.integer = odp_capa.periodic.min_base_freq_hz.integer;
536  capa->ring.min_base_hz.numer = odp_capa.periodic.min_base_freq_hz.numer;
537  capa->ring.min_base_hz.denom = odp_capa.periodic.min_base_freq_hz.denom;
538  capa->ring.max_base_hz.integer = odp_capa.periodic.max_base_freq_hz.integer;
539  capa->ring.max_base_hz.numer = odp_capa.periodic.max_base_freq_hz.numer;
540  capa->ring.max_base_hz.denom = odp_capa.periodic.max_base_freq_hz.denom;
541  return EM_OK;
542 }
543 
545 {
546  if (EM_CHECK_LEVEL > 0 && unlikely(res == NULL)) {
547  EM_LOG(EM_LOG_DBG, "%s: NULL ptr res\n", __func__);
548  return EM_ERR_BAD_POINTER;
549  }
550 
551  odp_timer_clk_src_t odp_clksrc;
552  odp_timer_res_capability_t odp_res_capa;
553  int err;
554 
555  err = timer_clksrc_em2odp(clk_src, &odp_clksrc);
556  if (unlikely(err)) {
557  EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src);
558  return EM_ERR_BAD_ARG;
559  }
560  memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t));
561  odp_res_capa.res_ns = res->res_ns;
562  odp_res_capa.res_hz = res->res_hz; /* ODP will check if both were set */
563  odp_res_capa.max_tmo = res->max_tmo;
564  err = odp_timer_res_capability(odp_clksrc, &odp_res_capa);
565  if (unlikely(err)) {
566  EM_LOG(EM_LOG_DBG, "%s: ODP res_capability failed (ret %d)!\n", __func__, err);
567  return EM_ERR_BAD_ARG;
568  }
569  res->min_tmo = odp_res_capa.min_tmo;
570  res->max_tmo = odp_res_capa.max_tmo;
571  res->res_ns = odp_res_capa.res_ns;
572  res->res_hz = odp_res_capa.res_hz;
573  res->clk_src = clk_src;
574  return EM_OK;
575 }
576 
578 {
579  odp_timer_clk_src_t odp_clksrc;
580  odp_timer_periodic_capability_t pcapa;
581 
582  if (EM_CHECK_LEVEL > 0 && unlikely(ring == NULL)) {
583  EM_LOG(EM_LOG_DBG, "%s: NULL ptr ring\n", __func__);
584  return EM_ERR_BAD_POINTER;
585  }
586 
587  if (unlikely(timer_clksrc_em2odp(ring->clk_src, &odp_clksrc))) {
588  EM_LOG(EM_LOG_DBG, "%s: Invalid clk_src %d\n", __func__, ring->clk_src);
589  return EM_ERR_BAD_ARG;
590  }
591 
592  pcapa.base_freq_hz.integer = ring->base_hz.integer;
593  pcapa.base_freq_hz.numer = ring->base_hz.numer;
594  pcapa.base_freq_hz.denom = ring->base_hz.denom;
595  pcapa.max_multiplier = ring->max_mul;
596  pcapa.res_ns = ring->res_ns;
597  int rv = odp_timer_periodic_capability(odp_clksrc, &pcapa);
598 
599  ring->base_hz.integer = pcapa.base_freq_hz.integer;
600  ring->base_hz.numer = pcapa.base_freq_hz.numer;
601  ring->base_hz.denom = pcapa.base_freq_hz.denom;
602  ring->max_mul = pcapa.max_multiplier;
603  ring->res_ns = pcapa.res_ns;
604 
605  if (unlikely(rv < 0)) {
606  EM_LOG(EM_LOG_DBG, "%s: odp failed periodic capability for clk_src %d\n",
607  __func__, ring->clk_src);
608  return EM_ERR_LIB_FAILED;
609  }
610  if (rv == 0)
611  return EM_ERR_NOT_SUPPORTED; /* no error, but no exact support */
612 
613  return EM_OK; /* meet or exceed */
614 }
615 
616 em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr)
617 {
618  /* timers are initialized? */
619  if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) {
620  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE,
621  "Timer is not initialized!");
622  return EM_TIMER_UNDEF;
623  }
624 
625  if (EM_CHECK_LEVEL > 0) {
626  if (check_timer_attr(tmr_attr) == false)
627  return EM_TIMER_UNDEF;
628  }
629 
630  odp_timer_pool_param_t odp_tpool_param;
631  odp_timer_clk_src_t odp_clksrc;
632 
633  odp_timer_pool_param_init(&odp_tpool_param);
634  odp_tpool_param.res_ns = tmr_attr->resparam.res_ns;
635  odp_tpool_param.res_hz = tmr_attr->resparam.res_hz;
636  odp_tpool_param.min_tmo = tmr_attr->resparam.min_tmo;
637  odp_tpool_param.max_tmo = tmr_attr->resparam.max_tmo;
638  odp_tpool_param.num_timers = tmr_attr->num_tmo;
639  odp_tpool_param.priv = tmr_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0;
640  if (unlikely(timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc))) {
641  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE,
642  "Unsupported EM-timer clock source:%d",
643  tmr_attr->resparam.clk_src);
644  return EM_TIMER_UNDEF;
645  }
646  odp_tpool_param.clk_src = odp_clksrc;
647 
648  /* check queue type support */
649  odp_timer_capability_t capa;
650 
651  if (unlikely(odp_timer_capability(odp_clksrc, &capa))) {
652  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE,
653  "ODP timer capa failed for clk:%d",
654  tmr_attr->resparam.clk_src);
655  return EM_TIMER_UNDEF;
656  }
657  if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */
658  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE,
659  "ODP does not support scheduled q for clk:%d",
660  tmr_attr->resparam.clk_src);
661  return EM_TIMER_UNDEF;
662  }
663 
664  odp_ticketlock_lock(&em_shm->timers.timer_lock);
665 
666  int i = find_free_timer_index();
667 
668  if (unlikely(i >= EM_ODP_MAX_TIMERS)) {
669  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
670  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_CREATE,
671  "No more timers available");
672  return EM_TIMER_UNDEF;
673  }
674 
675  event_timer_t *timer = &em_shm->timers.timer[i];
676  char timer_pool_name[ODP_TIMER_POOL_NAME_LEN];
677  const char *name = tmr_attr->name;
678  const char *reason = "";
679 
680  if (tmr_attr->name[0] == '\0') { /* replace NULL with default */
681  snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN,
682  "EM-timer-%d", timer->idx); /* idx initialized by timer_init */
683  name = timer_pool_name;
684  }
685 
686  TMR_DBG_PRINT("Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n",
687  odp_tpool_param.clk_src, odp_tpool_param.res_ns,
688  odp_tpool_param.res_hz);
689  timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param);
690  if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) {
691  reason = "odp_timer_pool_create error";
692  goto error_locked;
693  }
694  TMR_DBG_PRINT("Created timer: %s with idx: %d\n", name, timer->idx);
695 
696  /* tmo handle pool can be per-timer or shared */
697  if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */
698  odp_pool_t opool = create_tmo_handle_pool(tmr_attr->num_tmo,
699  em_shm->opt.timer.tmo_pool_cache, timer);
700 
701  if (unlikely(opool == ODP_POOL_INVALID)) {
702  reason = "Tmo handle buffer pool create failed";
703  goto error_locked;
704  }
705 
706  timer->tmo_pool = opool;
707  TMR_DBG_PRINT("Created per-timer tmo handle pool\n");
708  } else {
709  if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */
710  odp_pool_t opool =
711  create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size,
712  em_shm->opt.timer.tmo_pool_cache, timer);
713 
714  if (unlikely(opool == ODP_POOL_INVALID)) {
715  reason = "Shared tmo handle buffer pool create failed";
716  goto error_locked;
717  }
718  timer->tmo_pool = opool;
719  em_shm->timers.shared_tmo_pool = opool;
720  TMR_DBG_PRINT("Created shared tmo handle pool for total %u tmos\n",
721  em_shm->opt.timer.shared_tmo_pool_size);
722  } else {
723  timer->tmo_pool = em_shm->timers.shared_tmo_pool;
724  }
725  }
726 
727  timer->num_tmo_reserve = tmr_attr->num_tmo;
728  if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */
729  uint32_t left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved;
730 
731  if (timer->num_tmo_reserve > left) {
732  TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left);
733  reason = "Not enough tmos left in shared pool";
734  goto error_locked;
735  }
736  em_shm->timers.reserved += timer->num_tmo_reserve;
737  TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n",
738  timer->num_tmo_reserve, em_shm->timers.reserved);
739  }
740  timer->flags = tmr_attr->flags;
741  timer->plain_q_ok = capa.queue_type_plain;
742  timer->is_ring = false;
743 
744 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
745  if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) {
746  reason = "odp_timer_pool_start_multi failed";
747  goto error_locked;
748  }
749 #else
750  odp_timer_pool_start();
751 #endif
752  em_shm->timers.num_timers++;
753  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
754 
755  TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), em_shm->timers.num_timers);
756  return TMR_I2H(i);
757 
758 error_locked:
759  cleanup_timer_create_fail(timer);
760  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
761 
762  TMR_DBG_PRINT("ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n",
763  odp_tpool_param.clk_src, odp_tpool_param.res_ns,
764  odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers);
765  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE,
766  "Timer pool create failed, reason: ", reason);
767  return EM_TIMER_UNDEF;
768 }
769 
770 em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr)
771 {
772  /* timers are initialized? */
773  if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) {
774  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE,
775  "Timer is disabled!");
776  return EM_TIMER_UNDEF;
777  }
778 
779  if (EM_CHECK_LEVEL > 0 && unlikely(check_timer_attr_ring(ring_attr) == false)) {
780  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE,
781  "NULL or incorrect attribute");
782  return EM_TIMER_UNDEF;
783  }
784 
785  odp_timer_pool_param_t odp_tpool_param;
786  odp_timer_clk_src_t odp_clksrc;
787 
788  odp_timer_pool_param_init(&odp_tpool_param);
789  odp_tpool_param.timer_type = ODP_TIMER_TYPE_PERIODIC;
790  odp_tpool_param.exp_mode = ODP_TIMER_EXP_AFTER;
791  odp_tpool_param.num_timers = ring_attr->num_tmo;
792  odp_tpool_param.priv = ring_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0;
793  if (unlikely(timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc))) {
794  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE,
795  "Unsupported EM-timer clock source:%d",
796  ring_attr->ringparam.clk_src);
797  return EM_TIMER_UNDEF;
798  }
799  odp_tpool_param.clk_src = odp_clksrc;
800  odp_tpool_param.periodic.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer;
801  odp_tpool_param.periodic.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer;
802  odp_tpool_param.periodic.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom;
803  odp_tpool_param.periodic.max_multiplier = ring_attr->ringparam.max_mul;
804  odp_tpool_param.res_hz = 0;
805  odp_tpool_param.res_ns = ring_attr->ringparam.res_ns;
806 
807  /* check queue type support */
808  odp_timer_capability_t capa;
809 
810  if (unlikely(odp_timer_capability(odp_clksrc, &capa))) {
811  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
812  "ODP timer capa failed for clk:%d",
813  ring_attr->ringparam.clk_src);
814  return EM_TIMER_UNDEF;
815  }
816  if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */
817  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
818  "ODP does not support scheduled q for clk:%d",
819  ring_attr->ringparam.clk_src);
820  return EM_TIMER_UNDEF;
821  }
822 
823  /* lock context to find free slot and update it */
824  timer_storage_t *const tmrs = &em_shm->timers;
825 
826  odp_ticketlock_lock(&tmrs->timer_lock);
827 
828  /* is there enough events left in shared pool ? */
829  uint32_t left = em_shm->opt.timer.ring.timer_event_pool_size - tmrs->ring_reserved;
830 
831  if (ring_attr->num_tmo > left) {
832  odp_ticketlock_unlock(&tmrs->timer_lock);
833  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
834  "Too few ring timeout events left (req %u/%u)",
835  ring_attr->num_tmo, left);
836  return EM_TIMER_UNDEF;
837  }
838 
839  /* allocate timer */
840  int i = find_free_timer_index();
841 
842  if (unlikely(i >= EM_ODP_MAX_TIMERS)) {
843  odp_ticketlock_unlock(&tmrs->timer_lock);
844  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
845  "No more timers available");
846  return EM_TIMER_UNDEF;
847  }
848 
849  event_timer_t *timer = &tmrs->timer[i];
850 
851  /* then timer pool */
852  char timer_pool_name[ODP_TIMER_POOL_NAME_LEN];
853  const char *name = ring_attr->name;
854  const char *reason = "";
855 
856  if (ring_attr->name[0] == '\0') { /* replace NULL with default */
857  snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN,
858  "EM-timer-%d", timer->idx); /* idx initialized by timer_init */
859  name = timer_pool_name;
860  }
861 
862  TMR_DBG_PRINT("Creating ODP periodic tmr pool: clk %d, res_ns %lu, base_hz %lu\n",
863  odp_tpool_param.clk_src, odp_tpool_param.res_ns,
864  odp_tpool_param.periodic.base_freq_hz.integer);
865  timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param);
866  if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) {
867  reason = "odp_timer_pool_create failed";
868  goto error_locked;
869  }
870  TMR_DBG_PRINT("Created ring timer: %s with idx: %d\n", name, timer->idx);
871 
872  /* tmo handle pool can be per-timer or shared */
873  if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */
874  odp_pool_t opool = create_tmo_handle_pool(ring_attr->num_tmo,
875  em_shm->opt.timer.tmo_pool_cache, timer);
876 
877  if (unlikely(opool == ODP_POOL_INVALID)) {
878  reason = "tmo handle pool creation failed";
879  goto error_locked;
880  }
881 
882  timer->tmo_pool = opool;
883  TMR_DBG_PRINT("Created per-timer tmo handle pool %p\n", opool);
884  } else {
885  if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */
886  odp_pool_t opool =
887  create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size,
888  em_shm->opt.timer.tmo_pool_cache, timer);
889 
890  if (unlikely(opool == ODP_POOL_INVALID)) {
891  reason = "Shared tmo handle pool creation failed";
892  goto error_locked;
893  }
894 
895  timer->tmo_pool = opool;
896  em_shm->timers.shared_tmo_pool = opool;
897  TMR_DBG_PRINT("Created shared tmo handle pool %p\n", opool);
898  } else {
899  timer->tmo_pool = em_shm->timers.shared_tmo_pool;
900  }
901  }
902 
903  timer->num_tmo_reserve = ring_attr->num_tmo;
904  if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */
905  left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved;
906 
907  if (timer->num_tmo_reserve > left) {
908  TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left);
909  reason = "Not enough tmos left in shared pool";
910  goto error_locked;
911  }
912  em_shm->timers.reserved += timer->num_tmo_reserve;
913  TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n",
914  timer->num_tmo_reserve, em_shm->timers.reserved);
915  }
916 
917  /* odp timeout event pool for ring tmo events is always shared for all ring timers*/
918  if (tmrs->ring_tmo_pool == ODP_POOL_INVALID) {
919  odp_pool_param_t odp_tmo_pool_param;
920  char pool_name[ODP_POOL_NAME_LEN];
921 
922  odp_pool_param_init(&odp_tmo_pool_param);
923  odp_tmo_pool_param.type = ODP_POOL_TIMEOUT;
924  odp_tmo_pool_param.tmo.cache_size = em_shm->opt.timer.ring.timer_event_pool_cache;
925  TMR_DBG_PRINT("ring tmo event pool cache %u\n", odp_tmo_pool_param.tmo.cache_size);
926  odp_tmo_pool_param.tmo.num = em_shm->opt.timer.ring.timer_event_pool_size;
927  TMR_DBG_PRINT("ring tmo event pool size %u\n", odp_tmo_pool_param.tmo.num);
928  odp_tmo_pool_param.tmo.uarea_size = sizeof(event_hdr_t);
929  odp_tmo_pool_param.stats.all = 0;
930  snprintf(pool_name, ODP_POOL_NAME_LEN, "Ring-%d-tmo-pool", timer->idx);
931  tmrs->ring_tmo_pool = odp_pool_create(pool_name, &odp_tmo_pool_param);
932  if (unlikely(tmrs->ring_tmo_pool == ODP_POOL_INVALID)) {
933  reason = "odp timeout event pool creation failed";
934  goto error_locked;
935  }
936  TMR_DBG_PRINT("Created ODP-timeout event pool %p: '%s'\n",
937  tmrs->ring_tmo_pool, pool_name);
938  }
939 
940  tmrs->ring_reserved += ring_attr->num_tmo;
941  TMR_DBG_PRINT("Updated ring reserve by +%u to %u\n", ring_attr->num_tmo,
942  tmrs->ring_reserved);
943  tmrs->num_rings++;
944  tmrs->num_timers++;
945  timer->num_ring_reserve = ring_attr->num_tmo;
946  timer->flags = ring_attr->flags;
947  timer->plain_q_ok = capa.queue_type_plain;
948  timer->is_ring = true;
949  tmrs->num_ring_create_calls++;
950 
951 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
952  if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) {
953  reason = "odp_timer_pool_start_multi failed";
954  goto error_locked;
955  }
956 #else
957  odp_timer_pool_start();
958 #endif
959 
960  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
961 
962  TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), tmrs->num_timers);
963  return TMR_I2H(i);
964 
965 error_locked:
966  cleanup_timer_create_fail(timer);
967  odp_ticketlock_unlock(&tmrs->timer_lock);
968 
969  TMR_DBG_PRINT("ERR odp tmr ring pool in: clk %u, res %lu, base_hz %lu, max_mul %lu, num tmo %u\n",
970  ring_attr->ringparam.clk_src,
971  ring_attr->ringparam.res_ns,
972  ring_attr->ringparam.base_hz.integer,
973  ring_attr->ringparam.max_mul,
974  ring_attr->num_tmo);
975  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
976  "Ring timer create failed, reason: ", reason);
977  return EM_TIMER_UNDEF;
978 }
979 
981 {
982  timer_storage_t *const tmrs = &em_shm->timers;
983  int i = TMR_H2I(tmr);
984  em_status_t rv = EM_OK;
985  odp_pool_t pool_fail = ODP_POOL_INVALID;
986 
987  /* take lock before checking so nothing can change */
988  odp_ticketlock_lock(&tmrs->timer_lock);
989  if (unlikely(!is_timer_valid(tmr))) {
990  odp_ticketlock_unlock(&tmrs->timer_lock);
991  return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_DELETE,
992  "Invalid timer:%" PRI_TMR "", tmr);
993  }
994 
995  if (tmrs->timer[i].tmo_pool != tmrs->shared_tmo_pool) { /* don't delete shared pool */
996  if (unlikely(odp_pool_destroy(tmrs->timer[i].tmo_pool) != 0)) {
997  rv = EM_ERR_LIB_FAILED;
998  pool_fail = tmrs->timer[i].tmo_pool;
999  } else {
1000  TMR_DBG_PRINT("Deleted odp pool %p\n", tmrs->timer[i].tmo_pool);
1001  }
1002  }
1003  tmrs->timer[i].tmo_pool = ODP_POOL_INVALID;
1004  odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool);
1005  tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID;
1006 
1007  /* Ring delete. Don't remove shared event pool as user could still have event */
1008  if (tmrs->timer[i].is_ring && tmrs->num_rings) {
1009  tmrs->num_rings--;
1010  if (tmrs->num_rings < 1)
1011  TMR_DBG_PRINT("Last ring deleted");
1012  tmrs->ring_reserved -= tmrs->timer[i].num_ring_reserve;
1013  TMR_DBG_PRINT("Updated ring reserve by -%u to %u\n",
1014  tmrs->timer[i].num_ring_reserve, tmrs->ring_reserved);
1015  tmrs->timer[i].num_ring_reserve = 0;
1016  }
1017 
1018  tmrs->num_timers--;
1019  if (tmrs->shared_tmo_pool != ODP_POOL_INVALID) { /* shared pool in use */
1020  tmrs->reserved -= tmrs->timer[i].num_tmo_reserve;
1021  TMR_DBG_PRINT("Updated tmo reserve by -%u to %u\n",
1022  tmrs->timer[i].num_tmo_reserve, tmrs->reserved);
1023  tmrs->timer[i].num_tmo_reserve = 0;
1024  }
1025  if (tmrs->num_timers == 0 && tmrs->shared_tmo_pool != ODP_POOL_INVALID) {
1026  /* no more timers, delete shared tmo pool */
1027  if (unlikely(odp_pool_destroy(tmrs->shared_tmo_pool) != 0)) {
1028  rv = EM_ERR_LIB_FAILED;
1029  pool_fail = tmrs->shared_tmo_pool;
1030  } else {
1031  TMR_DBG_PRINT("Deleted shared tmo pool %p\n", tmrs->shared_tmo_pool);
1032  tmrs->shared_tmo_pool = ODP_POOL_INVALID;
1033  }
1034  }
1035 
1036  odp_ticketlock_unlock(&tmrs->timer_lock);
1037  if (unlikely(rv != EM_OK)) {
1038  return INTERNAL_ERROR(rv, EM_ESCOPE_TIMER_DELETE,
1039  "timer %p delete fail, odp pool %p fail\n", tmr, pool_fail);
1040  }
1041  TMR_DBG_PRINT("ok, deleted timer %p, num_timers %u\n", tmr, tmrs->num_timers);
1042  return rv;
1043 }
1044 
1046 {
1047  const timer_storage_t *const tmrs = &em_shm->timers;
1048  int i = TMR_H2I(tmr);
1049 
1050  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr))
1051  return 0;
1052 
1053  return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool);
1054 }
1055 
1056 em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue)
1057 {
1058  return em_tmo_create_arg(tmr, flags, queue, NULL);
1059 }
1060 
1062  em_queue_t queue, em_tmo_args_t *args)
1063 {
1064  const queue_elem_t *const q_elem = queue_elem_get(queue);
1065 
1066  if (EM_CHECK_LEVEL > 0) {
1067  if (unlikely(!is_timer_valid(tmr))) {
1068  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1069  "Invalid timer:%" PRI_TMR "", tmr);
1070  return EM_TMO_UNDEF;
1071  }
1072  if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) {
1073  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1074  "Tmr:%" PRI_TMR ": inv.Q:%" PRI_QUEUE "",
1075  tmr, queue);
1076  return EM_TMO_UNDEF;
1077  }
1078  if (unlikely(!is_queue_valid_type(tmr, q_elem))) {
1079  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1080  "Tmr:%" PRI_TMR ": inv.Q (type):%" PRI_QUEUE "",
1081  tmr, queue);
1082  return EM_TMO_UNDEF;
1083  }
1084  if (unlikely(!check_tmo_flags(flags))) {
1085  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1086  "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x",
1087  tmr, flags);
1088  return EM_TMO_UNDEF;
1089  }
1090  }
1091 
1092  int i = TMR_H2I(tmr);
1093 
1094  if (EM_CHECK_LEVEL > 1 &&
1095  em_shm->timers.timer[i].is_ring &&
1096  !(flags & EM_TMO_FLAG_PERIODIC)) {
1097  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1098  "Tmr:%" PRI_TMR ": asking oneshot with ring timer!",
1099  tmr);
1100  return EM_TMO_UNDEF;
1101  }
1102 
1103  odp_buffer_t tmo_buf = odp_buffer_alloc(em_shm->timers.timer[i].tmo_pool);
1104 
1105  if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) {
1106  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE,
1107  "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr);
1108  return EM_TMO_UNDEF;
1109  }
1110 
1111  em_timer_timeout_t *tmo = odp_buffer_addr(tmo_buf);
1112  odp_timer_pool_t odptmr = em_shm->timers.timer[i].odp_tmr_pool;
1113 
1114  const void *userptr = NULL;
1115 
1116  if (args != NULL)
1117  userptr = args->userptr;
1118 
1119  tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->odp_queue, userptr);
1120  if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) {
1121  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CREATE,
1122  "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", tmr);
1123  odp_buffer_free(tmo_buf);
1124  return EM_TMO_UNDEF;
1125  }
1126 
1127  /* OK, init state. Some values copied for faster access runtime */
1128  tmo->period = 0;
1129  tmo->odp_timer_pool = odptmr;
1130  tmo->timer = tmr;
1131  tmo->odp_buffer = tmo_buf;
1132  tmo->flags = flags;
1133  tmo->queue = queue;
1134  tmo->is_ring = em_shm->timers.timer[i].is_ring;
1135  tmo->odp_timeout = ODP_EVENT_INVALID;
1136  tmo->ring_tmo_pool = em_shm->timers.ring_tmo_pool;
1137 
1138  if (tmo->is_ring) { /* pre-allocate timeout event to save time at start */
1139  odp_event_t odp_tmo_event = alloc_odp_timeout(tmo);
1140 
1141  if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) {
1142  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE,
1143  "Ring: odp timeout event allocation failed");
1144  odp_timer_free(tmo->odp_timer);
1145  odp_buffer_free(tmo_buf);
1146  return EM_TMO_UNDEF;
1147  }
1148  tmo->odp_timeout = odp_tmo_event;
1149  TMR_DBG_PRINT("Ring: allocated odp timeout ev %p\n", tmo->odp_timeout);
1150  }
1151 
1152  if (EM_TIMER_TMO_STATS)
1153  memset(&tmo->stats, 0, sizeof(em_tmo_stats_t));
1154 
1155  odp_atomic_init_u32(&tmo->state, EM_TMO_STATE_IDLE);
1156  TMR_DBG_PRINT("ODP timer %p allocated\n", tmo->odp_timer);
1157  TMR_DBG_PRINT("tmo %p created\n", tmo);
1158  return tmo;
1159 }
1160 
1162 {
1163  if (EM_CHECK_LEVEL > 0) {
1165  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_DELETE,
1166  "Invalid args: tmo:%" PRI_TMO, tmo);
1167  }
1168 
1169  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1170 
1171  if (EM_CHECK_LEVEL > 1) {
1172  /* check that tmo buf is valid before accessing other struct members */
1173  RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer),
1174  EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE,
1175  "Invalid tmo buffer");
1176 
1177  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1178  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_DELETE,
1179  "Invalid tmo state:%d", tmo_state);
1180 
1181  RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID,
1182  EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE,
1183  "Invalid tmo odp_timer, deleted?");
1184  }
1185 
1186  TMR_DBG_PRINT("ODP timer %p\n", tmo->odp_timer);
1187 
1188  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN);
1189 
1190 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
1191  /* ODP 1.43 odp_timer_free() returns status */
1192  int fret = odp_timer_free(tmo->odp_timer);
1193 
1194  RETURN_ERROR_IF(fret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE,
1195  "odp timer free failed!?, rv %d\n", fret);
1196 #else
1197  /* Older than ODP 1.43 odp_timer_free() returns an event */
1198  odp_event_t odp_evt;
1199 
1200  odp_evt = ODP_EVENT_INVALID;
1201  odp_evt = odp_timer_free(tmo->odp_timer);
1202 
1203  RETURN_ERROR_IF(odp_evt != ODP_EVENT_INVALID, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE,
1204  "odp timer free returned an event %p\n", odp_evt);
1205 #endif
1206 
1207  odp_buffer_t tmp = tmo->odp_buffer;
1208 
1209  tmo->odp_timer = ODP_TIMER_INVALID;
1210  tmo->odp_buffer = ODP_BUFFER_INVALID;
1211 
1212  if (tmo->is_ring && tmo->odp_timeout != ODP_EVENT_INVALID) {
1213  TMR_DBG_PRINT("ring: free unused ODP timeout ev %p\n", tmo->odp_timeout);
1214  free_odp_timeout(tmo->odp_timeout);
1215  tmo->odp_timeout = ODP_EVENT_INVALID;
1216  }
1217 
1218  odp_buffer_free(tmp);
1219 
1220  TMR_DBG_PRINT("tmo %p delete ok\n", tmo);
1221 
1222  return EM_OK;
1223 }
1224 
1226  em_event_t tmo_ev)
1227 {
1229  (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF),
1230  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS,
1231  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1232  tmo, tmo_ev);
1233  /* check that tmo buf is valid before accessing other struct members */
1234  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1235  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS,
1236  "Invalid tmo buffer");
1238  (tmo->flags & EM_TMO_FLAG_PERIODIC),
1239  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_ABS,
1240  "Cannot set periodic tmo, use _set_periodic()");
1242  !is_event_type_valid(tmo_ev),
1243  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS,
1244  "invalid event type");
1245  if (EM_CHECK_LEVEL > 1) {
1246  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1247 
1248  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1249  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_ABS,
1250  "Invalid tmo state:%d", tmo_state);
1251  }
1253  tmo->odp_timer == ODP_TIMER_INVALID,
1254  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS,
1255  "Invalid tmo odp_timer");
1256 
1257  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1258  odp_event_t odp_ev = event_em2odp(tmo_ev);
1259  bool esv_ena = esv_enabled();
1260  odp_timer_start_t startp;
1261 
1264  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS,
1265  "Invalid event type: timer-ring");
1266 
1267  if (esv_ena)
1268  evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS);
1269 
1270  /* set tmo active and arm with absolute time */
1271  startp.tick_type = ODP_TIMER_TICK_ABS;
1272  startp.tick = ticks_abs;
1273  startp.tmo_ev = odp_ev;
1275  ev_hdr->tmo = tmo;
1276  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1277  int odpret = odp_timer_start(tmo->odp_timer, &startp);
1278 
1279  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1280  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1281  ev_hdr->tmo = EM_TMO_UNDEF;
1282  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1283  if (esv_ena)
1284  evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS__FAIL);
1285 
1286  em_status_t retval = timer_rv_odp2em(odpret);
1287 
1288  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1289  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1290  return retval;
1291  }
1292 
1293  return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_ABS,
1294  "odp_timer_start():%d", odpret);
1295  }
1296  TMR_DBG_PRINT("OK\n");
1297  return EM_OK;
1298 }
1299 
1301  em_event_t tmo_ev)
1302 {
1303  if (EM_CHECK_LEVEL > 0) {
1304  RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF,
1305  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1306  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1307  tmo, tmo_ev);
1308 
1310  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1311  "%s: Periodic no longer supported", __func__);
1312  }
1313  if (EM_CHECK_LEVEL > 1) {
1314  /* check that tmo buf is valid before accessing other struct members */
1315  RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer),
1316  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL,
1317  "Invalid tmo buffer");
1318 
1319  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1320 
1321  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1322  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_REL,
1323  "Invalid tmo state:%d", tmo_state);
1324  }
1326  !is_event_type_valid(tmo_ev),
1327  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1328  "invalid event type");
1329 
1330  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1331  odp_event_t odp_ev = event_em2odp(tmo_ev);
1332  bool esv_ena = esv_enabled();
1333  odp_timer_start_t startp;
1334 
1337  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1338  "Invalid event type: timer-ring");
1339 
1340  if (esv_ena)
1341  evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL);
1342 
1343  /* set tmo active and arm with relative time */
1344  startp.tick_type = ODP_TIMER_TICK_REL;
1345  startp.tick = ticks_rel;
1346  startp.tmo_ev = odp_ev;
1348  ev_hdr->tmo = tmo;
1349  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1350  int odpret = odp_timer_start(tmo->odp_timer, &startp);
1351 
1352  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1353  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1354  ev_hdr->tmo = EM_TMO_UNDEF;
1355  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1356  if (esv_ena)
1357  evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL__FAIL);
1358 
1359  em_status_t retval = timer_rv_odp2em(odpret);
1360 
1361  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1362  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1363  return retval;
1364  }
1365  return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_REL,
1366  "odp_timer_start():%d", odpret);
1367  }
1368  TMR_DBG_PRINT("OK\n");
1369  return EM_OK;
1370 }
1371 
1373  em_timer_tick_t start_abs,
1374  em_timer_tick_t period,
1375  em_event_t tmo_ev)
1376 {
1378  (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF),
1379  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC,
1380  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1381  tmo, tmo_ev);
1382  /* check that tmo buf is valid before accessing other struct members */
1383  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1384  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC,
1385  "Invalid tmo buffer");
1386  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC),
1387  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC,
1388  "Not periodic tmo");
1389  if (EM_CHECK_LEVEL > 1) {
1390  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1391 
1392  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1393  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC,
1394  "Invalid tmo state:%d", tmo_state);
1395  }
1397  !is_event_type_valid(tmo_ev),
1398  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC,
1399  "invalid event type");
1400 
1401  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1402  odp_event_t odp_ev = event_em2odp(tmo_ev);
1403  bool esv_ena = esv_enabled();
1404  odp_timer_start_t startp;
1405 
1408  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC,
1409  "Invalid event type: timer-ring");
1410 
1411  if (esv_ena)
1412  evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC);
1413 
1414  TMR_DBG_PRINT("start %lu, period %lu\n", start_abs, period);
1415 
1416  tmo->period = period;
1417  if (start_abs == 0)
1418  start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period;
1419  tmo->last_tick = start_abs;
1420  TMR_DBG_PRINT("last_tick %lu, now %lu\n", tmo->last_tick,
1421  odp_timer_current_tick(tmo->odp_timer_pool));
1422 
1423  /* set tmo active and arm with absolute time */
1424  startp.tick_type = ODP_TIMER_TICK_ABS;
1425  startp.tick = start_abs;
1426  startp.tmo_ev = odp_ev;
1428  ev_hdr->tmo = tmo;
1429  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1430  int odpret = odp_timer_start(tmo->odp_timer, &startp);
1431 
1432  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1433  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1434  ev_hdr->tmo = EM_TMO_UNDEF;
1435  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1436  if (esv_ena)
1437  evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC__FAIL);
1438 
1439  TMR_DBG_PRINT("diff to tmo %ld\n",
1440  (int64_t)tmo->last_tick -
1441  (int64_t)odp_timer_current_tick(tmo->odp_timer_pool));
1442 
1443  em_status_t retval = timer_rv_odp2em(odpret);
1444 
1445  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1446  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1447  return retval;
1448  }
1449  return INTERNAL_ERROR(retval,
1450  EM_ESCOPE_TMO_SET_PERIODIC,
1451  "odp_timer_start():%d", odpret);
1452  }
1453  TMR_DBG_PRINT("OK\n");
1454  return EM_OK;
1455 }
1456 
1458  em_timer_tick_t start_abs,
1459  uint64_t multiplier,
1460  em_event_t tmo_ev)
1461 {
1463  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1464  "Inv.args: tmo UNDEF");
1465  /* check that tmo buf is valid before accessing other struct members */
1466  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1467  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1468  "Invalid tmo buffer");
1469  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC),
1470  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1471  "Not periodic tmo");
1472  if (EM_CHECK_LEVEL > 1) {
1473  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1474 
1475  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1476  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1477  "Invalid tmo state:%d", tmo_state);
1478  }
1479 
1480  odp_timer_periodic_start_t startp;
1481  odp_event_t odp_ev = tmo->odp_timeout; /* pre-allocated */
1482 
1483  if (tmo_ev != EM_EVENT_UNDEF) { /* user gave event to (re-)use */
1484  odp_ev = event_em2odp(tmo_ev);
1486  odp_event_type(odp_ev) != ODP_EVENT_TIMEOUT,
1487  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1488  "Inv.args: not TIMER event given");
1489  odp_timeout_t odp_tmo = odp_timeout_from_event(odp_ev);
1490  event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo);
1491 
1493  ev_hdr->tmo = tmo;
1494  TMR_DBG_PRINT("user event %p\n", tmo_ev);
1495  } else {
1496  tmo->odp_timeout = ODP_EVENT_INVALID; /* now used */
1497  }
1498 
1499  if (odp_ev == ODP_EVENT_INVALID) { /* re-start, pre-alloc used */
1500  odp_event_t odp_tmo_event = alloc_odp_timeout(tmo);
1501 
1502  if (unlikely(odp_tmo_event == ODP_EVENT_INVALID))
1503  return INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1504  "Ring: odp timeout event allocation failed");
1505  odp_ev = odp_tmo_event;
1506  }
1507 
1508  TMR_DBG_PRINT("ring tmo start_abs %lu, M=%lu, odp ev=%p\n", start_abs, multiplier, odp_ev);
1509  startp.first_tick = start_abs;
1510  startp.freq_multiplier = multiplier;
1511  startp.tmo_ev = odp_ev;
1512  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1513  int odpret = odp_timer_periodic_start(tmo->odp_timer, &startp);
1514 
1515  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1516  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1517 
1518  em_status_t retval = timer_rv_odp2em(odpret);
1519 
1520  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1521  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1522  return retval;
1523  }
1524  return INTERNAL_ERROR(retval,
1525  EM_ESCOPE_TMO_SET_PERIODIC_RING,
1526  "odp_timer_periodic_start(): ret %d", odpret);
1527  }
1528  /* ok */
1529  TMR_DBG_PRINT("OK\n");
1530  return EM_OK;
1531 }
1532 
1533 em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event)
1534 {
1535  if (EM_CHECK_LEVEL > 0) {
1536  RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL,
1537  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CANCEL,
1538  "Invalid args: tmo:%" PRI_TMO " cur_event:%p",
1539  tmo, cur_event);
1540  }
1541  *cur_event = EM_EVENT_UNDEF;
1542  if (EM_CHECK_LEVEL > 1) {
1543  RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer),
1544  EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL,
1545  "Invalid tmo buffer");
1546  RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID,
1547  EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL,
1548  "Invalid tmo odp_timer");
1549  }
1550 
1551  /* check state: EM_TMO_STATE_UNKNOWN | EM_TMO_STATE_IDLE | EM_TMO_STATE_ACTIVE */
1552  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1553 
1554  RETURN_ERROR_IF(tmo_state != EM_TMO_STATE_ACTIVE,
1555  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL,
1556  "Invalid tmo state:%d (!%d)", tmo_state, EM_TMO_STATE_ACTIVE);
1557 
1558  TMR_DBG_PRINT("ODP tmo %p\n", tmo->odp_timer);
1559 
1560  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1561 
1562  if (tmo->is_ring) { /* periodic ring never returns event here */
1563  RETURN_ERROR_IF(odp_timer_periodic_cancel(tmo->odp_timer) != 0,
1564  EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL,
1565  "odp periodic cancel fail");
1566  return EM_ERR_TOONEAR; /* ack will tell when no more coming */
1567  }
1568 
1569  /* not ring, cancel*/
1570  odp_event_t odp_ev = ODP_EVENT_INVALID;
1571  int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev);
1572 
1573  if (ret == ODP_TIMER_TOO_NEAR) {
1574  if (EM_CHECK_LEVEL > 1) {
1575  RETURN_ERROR_IF(odp_ev != ODP_EVENT_INVALID,
1576  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL,
1577  "ODP timer cancel return TOONEAR but return event!");
1578  }
1579  TMR_DBG_PRINT("ODP returned TOONEAR\n");
1580  return EM_ERR_TOONEAR;
1581  }
1582 
1583  RETURN_ERROR_IF(ret == ODP_TIMER_FAIL,
1584  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL,
1585  "ODP timer cancel fail!");
1586 
1587  /*
1588  * Cancel successful (ret == ODP_TIMER_SUCCESS): odp_ev contains the canceled tmo event
1589  */
1590 
1591  if (EM_CHECK_LEVEL > 2) {
1592  RETURN_ERROR_IF(!odp_event_is_valid(odp_ev),
1593  EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL,
1594  "Invalid tmo event from odp_timer_cancel");
1595  }
1596 
1597  em_event_t tmo_ev = event_odp2em(odp_ev);
1598  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1599 
1600  /* successful cancel also resets the event tmo type */
1601  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1602  ev_hdr->tmo = EM_TMO_UNDEF;
1603 
1604  if (esv_enabled())
1605  tmo_ev = evstate_em2usr(tmo_ev, ev_hdr, EVSTATE__TMO_CANCEL);
1606 
1607  *cur_event = tmo_ev;
1608  TMR_DBG_PRINT("OK\n");
1609  return EM_OK;
1610 }
1611 
1612 em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev)
1613 {
1615  (tmo == EM_TMO_UNDEF || next_tmo_ev == EM_EVENT_UNDEF),
1616  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK,
1617  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1618  tmo, next_tmo_ev);
1619  /* check that tmo buf is valid before accessing other struct members */
1620  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1621  EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK,
1622  "Tmo ACK: invalid tmo buffer");
1623  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC),
1624  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_ACK,
1625  "Tmo ACK: Not a periodic tmo");
1626 
1627  if (EM_TIMER_TMO_STATS)
1628  tmo->stats.num_acks++;
1629 
1630  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1631  event_hdr_t *ev_hdr = event_to_hdr(next_tmo_ev);
1632  odp_event_t odp_ev = event_em2odp(next_tmo_ev);
1633 
1634  if (tmo->is_ring) /* ring timer */
1635  return ack_ring_timeout_event(tmo, next_tmo_ev, tmo_state, ev_hdr, odp_ev);
1636 
1637  /* not periodic ring, set next timeout */
1638  if (unlikely(tmo_state != EM_TMO_STATE_ACTIVE)) {
1639  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1640  ev_hdr->tmo = EM_TMO_UNDEF;
1641 
1642  if (tmo_state == EM_TMO_STATE_IDLE) /* canceled, skip errorhandler */
1643  return EM_ERR_CANCELED;
1644 
1645  return INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_ACK,
1646  "Tmo ACK: invalid tmo state:%d", tmo_state);
1647  }
1648 
1649  bool esv_ena = esv_enabled();
1650 
1651  if (esv_ena)
1652  evstate_usr2em(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK);
1653  /*
1654  * The periodic timer will silently stop if ack fails! Attempt to
1655  * handle exceptions and if the tmo cannot be renewed, call
1656  * the errorhandler so the application may recover.
1657  */
1658  tmo->last_tick += tmo->period; /* maintain absolute time */
1659  int ret;
1660  int tries = EM_TIMER_ACK_TRIES;
1661  em_status_t err;
1662  odp_timer_start_t startp;
1663 
1664  startp.tick_type = ODP_TIMER_TICK_ABS;
1665  startp.tmo_ev = odp_ev;
1666  ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; /* could be new event */
1667  ev_hdr->tmo = tmo;
1668 
1669  /* try to set tmo EM_TIMER_ACK_TRIES times */
1670  do {
1671  /* ask new timeout for next period */
1672  startp.tick = tmo->last_tick;
1673  ret = odp_timer_start(tmo->odp_timer, &startp);
1674  /*
1675  * Calling ack() was delayed over next period if 'ret' is
1676  * ODP_TIMER_TOO_NEAR, i.e. now in past. Other errors
1677  * should not happen, fatal for this tmo
1678  */
1679  if (likely(ret != ODP_TIMER_TOO_NEAR)) {
1680  if (ret != ODP_TIMER_SUCCESS) {
1681  TMR_DBG_PRINT("ODP return %d\n"
1682  "tmo tgt/tick now %lu/%lu\n",
1683  ret, tmo->last_tick,
1684  odp_timer_current_tick(tmo->odp_timer_pool));
1685  }
1686  break; /* ok */
1687  }
1688 
1689  /* ODP_TIMER_TOO_NEAR: ack() delayed beyond next time slot */
1690  if (EM_TIMER_TMO_STATS)
1691  tmo->stats.num_late_ack++;
1692  TMR_DBG_PRINT("late, tgt/now %lu/%lu\n", tmo->last_tick,
1693  odp_timer_current_tick(tmo->odp_timer_pool));
1694 
1695  if (tmo->flags & EM_TMO_FLAG_NOSKIP) /* not allowed to skip, send immediately */
1696  return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue);
1697 
1698  /* skip already passed periods and try again */
1699  handle_ack_skip(tmo);
1700 
1701  tries--;
1702  if (unlikely(tries < 1)) {
1704  EM_ESCOPE_TMO_ACK,
1705  "Tmo ACK: too many retries:%u",
1706  EM_TIMER_ACK_TRIES);
1707  goto ack_err;
1708  }
1709  } while (ret != ODP_TIMER_SUCCESS);
1710 
1711  if (unlikely(ret != ODP_TIMER_SUCCESS)) {
1712  err = INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK,
1713  "Tmo ACK: failed to renew tmo (odp ret %d)",
1714  ret);
1715  goto ack_err;
1716  }
1717  return EM_OK;
1718 
1719 ack_err:
1720  /* fail, restore event state */
1721  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1722  ev_hdr->tmo = EM_TMO_UNDEF;
1723  if (esv_ena)
1724  evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__FAIL);
1725  return err;
1726 }
1727 
1728 int em_timer_get_all(em_timer_t *tmr_list, int max)
1729 {
1730  odp_ticketlock_lock(&em_shm->timers.timer_lock);
1731 
1732  const uint32_t num_timers = em_shm->timers.num_timers;
1733 
1734  if (tmr_list && max > 0 && num_timers > 0) {
1735  int num = 0;
1736 
1737  for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) {
1738  if (em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) {
1739  tmr_list[num] = TMR_I2H(i);
1740  num++;
1741  if (num >= max)
1742  break;
1743  }
1744  }
1745  }
1746 
1747  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
1748 
1749  return num_timers;
1750 }
1751 
1753 {
1754  odp_timer_pool_info_t poolinfo;
1755  int i = TMR_H2I(tmr);
1756  int ret;
1758 
1759  if (EM_CHECK_LEVEL > 0)
1760  RETURN_ERROR_IF(!is_timer_valid(tmr) || tmr_attr == NULL,
1761  EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_ATTR,
1762  "Inv.args: timer:%" PRI_TMR " tmr_attr:%p",
1763  tmr, tmr_attr);
1764 
1765  /* get current values from ODP */
1766  ret = odp_timer_pool_info(em_shm->timers.timer[i].odp_tmr_pool, &poolinfo);
1767  RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_GET_ATTR,
1768  "ODP timer pool info failed");
1769 
1770  timer_clksrc_odp2em(poolinfo.param.clk_src, &clk);
1771 
1772  if (poolinfo.param.timer_type == ODP_TIMER_TYPE_SINGLE) {
1773  tmr_attr->resparam.res_ns = poolinfo.param.res_ns;
1774  tmr_attr->resparam.res_hz = poolinfo.param.res_hz;
1775  tmr_attr->resparam.max_tmo = poolinfo.param.max_tmo;
1776  tmr_attr->resparam.min_tmo = poolinfo.param.min_tmo;
1777  tmr_attr->resparam.clk_src = clk;
1778  memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t));
1779  } else {
1780  tmr_attr->ringparam.base_hz.integer = poolinfo.param.periodic.base_freq_hz.integer;
1781  tmr_attr->ringparam.base_hz.numer = poolinfo.param.periodic.base_freq_hz.numer;
1782  tmr_attr->ringparam.base_hz.denom = poolinfo.param.periodic.base_freq_hz.denom;
1783  tmr_attr->ringparam.max_mul = poolinfo.param.periodic.max_multiplier;
1784  tmr_attr->ringparam.res_ns = poolinfo.param.res_ns;
1785  tmr_attr->ringparam.clk_src = clk;
1786  memset(&tmr_attr->resparam, 0, sizeof(em_timer_res_param_t));
1787  }
1788 
1789  tmr_attr->num_tmo = poolinfo.param.num_timers;
1790  tmr_attr->flags = em_shm->timers.timer[i].flags;
1791 
1792  strncpy(tmr_attr->name, poolinfo.name, EM_TIMER_NAME_LEN - 1);
1793  tmr_attr->name[EM_TIMER_NAME_LEN - 1] = '\0';
1794  return EM_OK;
1795 }
1796 
1797 uint64_t em_timer_get_freq(em_timer_t tmr)
1798 {
1799  const timer_storage_t *const tmrs = &em_shm->timers;
1800 
1801  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) {
1802  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_FREQ,
1803  "Invalid timer:%" PRI_TMR "", tmr);
1804  return 0;
1805  }
1806 
1807  return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool,
1808  1000ULL * 1000ULL * 1000ULL); /* 1 sec */
1809 }
1810 
1811 uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks)
1812 {
1813  const timer_storage_t *const tmrs = &em_shm->timers;
1814 
1815  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) {
1816  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_TICK_TO_NS,
1817  "Invalid timer:%" PRI_TMR "", tmr);
1818  return 0;
1819  }
1820 
1821  return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ticks);
1822 }
1823 
1824 em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns)
1825 {
1826  const timer_storage_t *const tmrs = &em_shm->timers;
1827 
1828  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) {
1829  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_NS_TO_TICK,
1830  "Invalid timer:%" PRI_TMR "", tmr);
1831  return 0;
1832  }
1833 
1834  return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns);
1835 }
1836 
1838 {
1839  if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) {
1840  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo");
1841  return EM_TMO_STATE_UNKNOWN;
1842  }
1843  if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) {
1844  INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo buffer");
1845  return EM_TMO_STATE_UNKNOWN;
1846  }
1847 
1848  return odp_atomic_load_acq_u32(&tmo->state);
1849 }
1850 
1852 {
1854  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATS,
1855  "Invalid tmo");
1856  /* check that tmo buf is valid before accessing other struct members */
1857  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1858  EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATS,
1859  "Invalid tmo buffer");
1860  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo->odp_timer == ODP_TIMER_INVALID,
1861  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATS,
1862  "tmo deleted?");
1863 
1864  if (EM_TIMER_TMO_STATS) {
1865  if (stat)
1866  *stat = tmo->stats;
1867  } else {
1868  return EM_ERR_NOT_IMPLEMENTED;
1869  }
1870 
1871  return EM_OK;
1872 }
1873 
1874 em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset)
1875 {
1876  if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) {
1877  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid event given");
1878  return EM_TMO_TYPE_NONE;
1879  }
1880 
1881  event_hdr_t *ev_hdr = event_to_hdr(event);
1882  em_tmo_type_t type = (em_tmo_type_t)ev_hdr->flags.tmo_type;
1883 
1884  if (EM_CHECK_LEVEL > 1 && unlikely(!can_have_tmo_type(event))) {
1885  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE,
1886  "Invalid event type");
1887  return EM_TMO_TYPE_NONE;
1888  }
1889 
1890  if (EM_CHECK_LEVEL > 2 && unlikely(type > EM_TMO_TYPE_PERIODIC)) {
1891  INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATE,
1892  "Invalid tmo event type, header corrupted?");
1893  return EM_TMO_TYPE_NONE;
1894  }
1895 
1896  if (tmo)
1897  *tmo = (type == EM_TMO_TYPE_NONE) ? EM_TMO_UNDEF : ev_hdr->tmo;
1898 
1899  if (reset && ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND) {
1900  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1901  ev_hdr->tmo = EM_TMO_UNDEF;
1902  }
1903 
1904  return type;
1905 }
1906 
1907 void *em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo)
1908 {
1909  if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) {
1910  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_USERPTR, "Invalid event given");
1911  return NULL;
1912  }
1913 
1914  odp_event_t odp_event = event_em2odp(event);
1915  odp_event_type_t evtype = odp_event_type(odp_event);
1916 
1917  if (unlikely(evtype != ODP_EVENT_TIMEOUT)) /* no errorhandler for other events */
1918  return NULL;
1919 
1920  event_hdr_t *ev_hdr = event_to_hdr(event); /* will not return on error */
1921 
1922  if (tmo) /* always periodic timeout here */
1923  *tmo = ev_hdr->tmo;
1924 
1925  return odp_timeout_user_ptr(odp_timeout_from_event(odp_event));
1926 }
1927 
1929 {
1930  if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) {
1931  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Invalid tmo given");
1932  return EM_TIMER_UNDEF;
1933  }
1934  if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) {
1935  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Corrupted tmo?");
1936  return EM_TIMER_UNDEF;
1937  }
1938 
1939  return tmo->timer;
1940 }
1941 
1942 uint64_t em_timer_to_u64(em_timer_t timer)
1943 {
1944  return (uint64_t)timer;
1945 }
1946 
1948 {
1949  return (uint64_t)tmo;
1950 }
#define INTERNAL_ERROR(error, escope, fmt,...)
Definition: em_error.h:43
#define RETURN_ERROR_IF(cond, error, escope, fmt,...)
Definition: em_error.h:50
void evstate_usr2em_revert(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
em_event_t evstate_alloc_tmo(const em_event_t event, event_hdr_t *const ev_hdr)
em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
void evstate_usr2em(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
struct event_hdr event_hdr_t
#define EM_CHECK_INIT_CALLED
Definition: em_include.h:69
em_shm_t * em_shm
#define EM_CHECK_LEVEL
@ EM_QUEUE_TYPE_ATOMIC
@ EM_QUEUE_TYPE_UNSCHEDULED
@ EM_QUEUE_TYPE_PARALLEL
@ EM_QUEUE_TYPE_PARALLEL_ORDERED
@ EM_ERR_OPERATION_FAILED
@ EM_ERR_CANCELED
@ EM_ERR_BAD_CONTEXT
@ EM_ERR_ALLOC_FAILED
@ EM_ERR_BAD_ARG
@ EM_ERR_NOT_IMPLEMENTED
@ EM_ERR_NOT_SUPPORTED
@ EM_ERR_BAD_STATE
@ EM_ERR_TOONEAR
@ EM_ERR_LIB_FAILED
@ EM_ERR_NOT_INITIALIZED
@ EM_ERR_BAD_POINTER
@ EM_EVENT_TYPE_SW
@ EM_EVENT_TYPE_PACKET
@ EM_EVENT_TYPE_TIMER_IND
@ EM_EVENT_TYPE_TIMER
#define EM_TIMER_UNDEF
#define EM_TIMER_CLKSRC_DEFAULT
#define PRI_QUEUE
#define EM_OK
uint32_t em_event_type_t
#define EM_EVENT_UNDEF
#define EM_EVENT_GROUP_UNDEF
uint32_t em_status_t
#define PRI_EVENT
int em_core_count(void)
em_event_type_t em_event_get_type(em_event_t event)
em_status_t em_send(em_event_t event, em_queue_t queue)
em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src)
em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr)
em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, em_event_t tmo_ev)
uint64_t em_tmo_to_u64(em_tmo_t tmo)
em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset)
int em_timer_get_all(em_timer_t *tmr_list, int max)
em_status_t em_timer_delete(em_timer_t tmr)
em_timer_t em_tmo_get_timer(em_tmo_t tmo)
em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, em_timer_clksrc_t clk_src, uint64_t base_hz, uint64_t max_mul, uint64_t res_ns)
em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, em_timer_tick_t start_abs, uint64_t multiplier, em_event_t tmo_ev)
em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns)
em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue)
em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr)
em_tmo_state_t em_tmo_get_state(em_tmo_t tmo)
em_status_t em_tmo_delete(em_tmo_t tmo)
em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event)
em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr)
em_status_t em_tmo_set_periodic(em_tmo_t tmo, em_timer_tick_t start_abs, em_timer_tick_t period, em_event_t tmo_ev)
em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat)
em_timer_tick_t em_timer_current_tick(em_timer_t tmr)
em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, em_event_t tmo_ev)
uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks)
uint64_t em_timer_to_u64(em_timer_t timer)
em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue, em_tmo_args_t *args)
void em_timer_attr_init(em_timer_attr_t *tmr_attr)
uint64_t em_timer_get_freq(em_timer_t tmr)
em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring)
Check periodic ring timer capability.
em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev)
void * em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo)
uint64_t em_timer_tick_t
em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src)
@ EM_TMO_STATE_IDLE
@ EM_TMO_STATE_ACTIVE
@ EM_TMO_TYPE_ONESHOT
@ EM_TMO_TYPE_NONE
@ EM_TMO_TYPE_PERIODIC
em_timer_ring_param_t ringparam
char name[EM_TIMER_NAME_LEN]
em_timer_res_param_t resparam
em_timer_res_param_t max_tmo
struct em_timer_capability_t::@2 ring
em_timer_res_param_t max_res
union event_hdr::@34 flags
em_event_t event
em_tmo_t tmo
uint8_t tmo_type
ev_hdr_user_area_t user_area
uint32_t event_size
em_event_type_t event_type
em_event_group_t egrp
odp_queue_t odp_queue