EM-ODP  3.7.0
Event Machine on ODP
event_machine_timer.c
1 /*
2  * Copyright (c) 2016, Nokia Solutions and Networks
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * * Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in the
13  * documentation and/or other materials provided with the distribution.
14  * * Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived
16  * from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * ---------------------------------------------------------------------
31  * Some notes about the implementation:
32  *
33  * EM Timer API is close to ODP timer, but there are issues
34  * making this code a bit more complex than it could be:
35  *
36  * 1) no periodic timer in ODP
37  * 2) unless using the pre-defined timeout event there is no way to access
38  * all necessary information runtime to implement a periodic timer
39  *
40  * Point 2 is solved by creating a timeout pool. When user allocates
41  * EM timeout, a new minimum size buffer is allocated to store all the needed
42  * information. Timer handle is a pointer to such buffer so all data is
43  * available via the handle (ack() is the most problematic case). This does
44  * create performance penalty, but so far it looks like the penalty is not
45  * too large and does simplify the code otherwise. Also timeouts could be
46  * pre-allocated as the API separates creation and arming.
47  * Most of the synchronization is handled by ODP timer, a ticketlock is used
48  * for high level management API.
49  *
50  */
51 #include "em_include.h"
52 
53 /* timer handle = index + 1 (UNDEF 0) */
54 #define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1))
55 #define TMR_H2I(x) ((int)((uintptr_t)(x) - 1))
56 
57 static inline em_status_t timer_rv_odp2em(int odpret)
58 {
59  switch (odpret) {
60  case ODP_TIMER_SUCCESS:
61  return EM_OK;
62  case ODP_TIMER_TOO_NEAR:
63  return EM_ERR_TOONEAR;
64  case ODP_TIMER_TOO_FAR:
65  return EM_ERR_TOOFAR;
66  default:
67  break;
68  }
69 
70  return EM_ERR_LIB_FAILED;
71 }
72 
73 static inline int is_queue_valid_type(em_timer_t tmr, const queue_elem_t *q_elem)
74 {
75  unsigned int tmridx = (unsigned int)TMR_H2I(tmr);
76 
77  /* implementation specific */
78  if (em_shm->timers.timer[tmridx].plain_q_ok && q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED)
79  return 1;
80  /* EM assumes scheduled always supported */
81  return (q_elem->type == EM_QUEUE_TYPE_ATOMIC ||
82  q_elem->type == EM_QUEUE_TYPE_PARALLEL ||
83  q_elem->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) ? 1 : 0;
84 
85  /* LOCAL or OUTPUT queues not supported */
86 }
87 
88 static inline bool is_event_type_valid(em_event_t event)
89 {
90  em_event_type_t etype = em_event_type_major(em_event_get_type(event));
91 
92  if (etype == EM_EVENT_TYPE_PACKET ||
93  etype == EM_EVENT_TYPE_SW ||
94  etype == EM_EVENT_TYPE_TIMER)
95  return true;
96 
97  /* limitations mainly set by odp spec, e.g. no vectors */
98  return false;
99 }
100 
101 /* Helper for em_tmo_get_type() */
102 static inline bool can_have_tmo_type(em_event_t event)
103 {
104  em_event_type_t etype = em_event_type_major(em_event_get_type(event));
105 
106  if (etype == EM_EVENT_TYPE_PACKET ||
107  etype == EM_EVENT_TYPE_SW ||
108  etype == EM_EVENT_TYPE_TIMER ||
109  etype == EM_EVENT_TYPE_TIMER_IND)
110  return true;
111 
112  return false;
113 }
114 
115 static inline int is_timer_valid(em_timer_t tmr)
116 {
117  unsigned int i;
118  const timer_storage_t *const tmrs = &em_shm->timers;
119 
120  if (unlikely(tmr == EM_TIMER_UNDEF))
121  return 0;
122 
123  i = (unsigned int)TMR_H2I(tmr);
124  if (unlikely(i >= EM_ODP_MAX_TIMERS))
125  return 0;
126 
127  if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID ||
128  tmrs->timer[i].tmo_pool == ODP_POOL_INVALID))
129  return 0;
130  return 1;
131 }
132 
133 static inline em_status_t ack_ring_timeout_event(em_tmo_t tmo,
134  em_event_t ev,
135  em_tmo_state_t tmo_state,
136  event_hdr_t *ev_hdr,
137  odp_event_t odp_ev)
138 {
139  (void)ev;
140  (void)tmo_state;
141 
142  if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND))
143  return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK,
144  "Invalid event type:%u, expected timer-ring:%u",
146 
147  if (EM_CHECK_LEVEL > 0 && unlikely(tmo != ev_hdr->tmo))
148  return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK,
149  "Wrong event returned? tmo %p->%p", tmo, ev_hdr->tmo);
150 
151  int ret = odp_timer_periodic_ack(tmo->odp_timer, odp_ev);
152 
153  if (unlikely(ret < 0)) { /* failure */
154  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
155  return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK,
156  "Tmo ACK: ring timer odp ack fail, rv %d", ret);
157  }
158 
159  if (unlikely(ret == 2)) { /* cancelled, no more events coming */
160  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE; /* allows em_free */
161  ev_hdr->tmo = EM_TMO_UNDEF;
162  atomic_thread_fence(memory_order_release);
163  TMR_DBG_PRINT("last periodic event %p\n", odp_ev);
164  return EM_ERR_CANCELED;
165  }
166 
167  /* ret = 1 would mean timer is cancelled, but more coming still.
168  * return ok to make ring and normal periodic behave the same
169  * e.g. CANCELED means tmo can now be deleted
170  */
171  return EM_OK;
172 }
173 
174 static void cleanup_timer_create_fail(event_timer_t *timer)
175 {
176  if (timer->tmo_pool != ODP_POOL_INVALID &&
177  timer->tmo_pool != em_shm->timers.shared_tmo_pool) /* don't kill shared pool */
178  odp_pool_destroy(timer->tmo_pool);
179  if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID)
180  odp_timer_pool_destroy(timer->odp_tmr_pool);
181  timer->tmo_pool = ODP_POOL_INVALID;
182  timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID;
183  TMR_DBG_PRINT("cleaned up failed timer create\n");
184 }
185 
186 static odp_pool_t create_tmo_handle_pool(uint32_t num_buf, uint32_t cache, const event_timer_t *tmr)
187 {
188  odp_pool_param_t odp_pool_param;
189  odp_pool_t pool;
190  char tmo_pool_name[ODP_POOL_NAME_LEN];
191 
192  odp_pool_param_init(&odp_pool_param);
193  odp_pool_param.type = ODP_POOL_BUFFER;
194  odp_pool_param.buf.size = sizeof(em_timer_timeout_t);
195  odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE;
196  odp_pool_param.buf.cache_size = cache;
197  odp_pool_param.stats.all = 0;
198  TMR_DBG_PRINT("tmo handle pool cache %d\n", odp_pool_param.buf.cache_size);
199 
200  /* local pool caching may cause out of buffers situation on a core. Adjust */
201  uint32_t num = num_buf + ((em_core_count() - 1) * odp_pool_param.buf.cache_size);
202 
203  if (num_buf != num) {
204  TMR_DBG_PRINT("Adjusted pool size %d->%d due to local caching (%d)\n",
205  num_buf, num, odp_pool_param.buf.cache_size);
206  }
207  odp_pool_param.buf.num = num;
208  snprintf(tmo_pool_name, ODP_POOL_NAME_LEN, "Tmo-pool-%d", tmr->idx);
209  pool = odp_pool_create(tmo_pool_name, &odp_pool_param);
210  if (pool != ODP_POOL_INVALID) {
211  TMR_DBG_PRINT("Created ODP-pool: %s for %d timeouts\n",
212  tmo_pool_name, odp_pool_param.buf.num);
213  }
214  return pool;
215 }
216 
217 static inline odp_event_t alloc_odp_timeout(em_tmo_t tmo)
218 {
219  odp_timeout_t odp_tmo = odp_timeout_alloc(tmo->ring_tmo_pool);
220 
221  if (unlikely(odp_tmo == ODP_TIMEOUT_INVALID))
222  return ODP_EVENT_INVALID;
223 
224  /* init EM event header */
225  event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo);
226  odp_event_t odp_event = odp_timeout_to_event(odp_tmo);
227  em_event_t event = event_odp2em(odp_event);
228 
229  if (unlikely(!ev_hdr)) {
230  odp_timeout_free(odp_tmo);
231  return ODP_EVENT_INVALID;
232  }
233 
234  if (esv_enabled())
235  event = evstate_alloc_tmo(event, ev_hdr);
236  ev_hdr->flags.all = 0;
238  ev_hdr->tmo = tmo;
240  ev_hdr->event_size = 0;
241  ev_hdr->egrp = EM_EVENT_GROUP_UNDEF;
242  ev_hdr->user_area.all = 0;
243  ev_hdr->user_area.isinit = 1;
244 
245  return odp_event;
246 }
247 
248 static inline void free_odp_timeout(odp_event_t odp_event)
249 {
250  if (esv_enabled()) {
251  em_event_t event = event_odp2em(odp_event);
252  event_hdr_t *const ev_hdr = event_to_hdr(event);
253 
254  event = ev_hdr->event;
255  evstate_free(event, ev_hdr, EVSTATE__TMO_DELETE);
256  }
257 
258  odp_event_free(odp_event);
259 }
260 
261 static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev,
262  event_hdr_t *ev_hdr,
263  em_queue_t queue)
264 {
265  if (esv_enabled())
266  evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__NOSKIP);
267 
268  em_status_t err = em_send(next_tmo_ev, queue);
269 
270  if (unlikely(err != EM_OK)) {
271  err = INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK, "Tmo ACK: noskip em_send fail");
272  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
273  ev_hdr->tmo = EM_TMO_UNDEF;
274  }
275 
276  return err; /* EM_OK or send-failure */
277 }
278 
279 static inline void handle_ack_skip(em_tmo_t tmo)
280 {
281  uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool);
282  uint64_t skips;
283 
284  if (odpt > tmo->last_tick) /* late, over next period */
285  skips = ((odpt - tmo->last_tick) / tmo->period) + 1;
286  else
287  skips = 1; /* not yet over next period, but late for setting */
288 
289  tmo->last_tick += skips * tmo->period;
290  TMR_DBG_PRINT("%lu skips * %lu ticks => new tgt %lu\n",
291  skips, tmo->period, tmo->last_tick);
292  if (EM_TIMER_TMO_STATS)
293  tmo->stats.num_period_skips += skips;
294 }
295 
296 static inline bool check_tmo_flags(em_tmo_flag_t flags)
297 {
298  /* Check for valid tmo flags (oneshot OR periodic mainly) */
299  if (unlikely(!(flags & (EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC))))
300  return false;
301 
302  if (unlikely((flags & EM_TMO_FLAG_ONESHOT) && (flags & EM_TMO_FLAG_PERIODIC)))
303  return false;
304 
305  if (EM_CHECK_LEVEL > 1) {
308  if (unlikely(flags & inv_flags))
309  return false;
310  }
311  return true;
312 }
313 
314 static inline bool check_timer_attr(const em_timer_attr_t *tmr_attr)
315 {
316  if (unlikely(tmr_attr == NULL)) {
317  INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_CREATE,
318  "NULL ptr given");
319  return false;
320  }
321  if (unlikely(tmr_attr->__internal_check != EM_CHECK_INIT_CALLED)) {
322  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE,
323  "em_timer_attr_t not initialized");
324  return false;
325  }
326  if (unlikely(tmr_attr->resparam.res_ns && tmr_attr->resparam.res_hz)) {
327  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE,
328  "Only res_ns OR res_hz allowed");
329  return false;
330  }
331  return true;
332 }
333 
334 static inline bool check_timer_attr_ring(const em_timer_attr_t *ring_attr)
335 {
336  if (unlikely(ring_attr == NULL)) {
337  INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_RING_CREATE,
338  "NULL attr given");
339  return false;
340  }
341  if (EM_CHECK_LEVEL > 0 && unlikely(ring_attr->__internal_check != EM_CHECK_INIT_CALLED)) {
342  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_RING_CREATE,
343  "em_timer_ring_attr_t not initialized");
344  return false;
345  }
346 
347  if (EM_CHECK_LEVEL > 1 &&
348  unlikely(ring_attr->ringparam.base_hz.integer < 1 ||
349  ring_attr->ringparam.max_mul < 1 ||
350  (ring_attr->flags & EM_TIMER_FLAG_RING) == 0)) {
351  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE,
352  "invalid attr values for ring timer");
353  return false;
354  }
355 
356  return true;
357 }
358 
359 static inline int find_free_timer_index(void)
360 {
361  /*
362  * Find a free timer-slot.
363  * This linear search should not be a performance problem with only a few timers
364  * available especially when these are typically created at startup.
365  * Assumes context is locked
366  */
367  int i;
368 
369  for (i = 0; i < EM_ODP_MAX_TIMERS; i++) {
370  const event_timer_t *timer = &em_shm->timers.timer[i];
371 
372  if (timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID) /* marks unused entry */
373  break;
374  }
375  return i;
376 }
377 
379 {
380  if (unlikely(EM_CHECK_LEVEL > 0 && tmr_attr == NULL))
381  return; /* just ignore NULL here */
382 
383  /* clear/invalidate unused ring timer */
384  memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t));
385 
386  /* strategy: first put default resolution, then validate based on that */
387  tmr_attr->resparam.res_ns = EM_ODP_TIMER_RESOL_DEF_NS;
388  tmr_attr->resparam.res_hz = 0;
390  tmr_attr->flags = EM_TIMER_FLAG_NONE;
391 
392  odp_timer_clk_src_t odp_clksrc;
393  odp_timer_capability_t odp_capa;
394  odp_timer_res_capability_t odp_res_capa;
395  int err;
396 
397  err = timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc);
398  if (unlikely(err)) {
399  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_ATTR_INIT,
400  "Unsupported EM-timer clock source:%d",
401  tmr_attr->resparam.clk_src);
402  return;
403  }
404  err = odp_timer_capability(odp_clksrc, &odp_capa);
405  if (unlikely(err)) {
406  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT,
407  "Timer capability: ret %d, odp-clksrc:%d",
408  err, odp_clksrc);
409  return;
410  }
411 
412  TMR_DBG_PRINT("odp says highest res %lu\n", odp_capa.highest_res_ns);
413  if (unlikely(odp_capa.highest_res_ns > tmr_attr->resparam.res_ns)) {
414  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT,
415  "Timer capability: maxres %lu req %lu, odp-clksrc:%d!",
416  odp_capa.highest_res_ns, tmr_attr->resparam.res_ns, odp_clksrc);
417  return;
418  }
419 
420  memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t));
421  odp_res_capa.res_ns = tmr_attr->resparam.res_ns;
422  err = odp_timer_res_capability(odp_clksrc, &odp_res_capa);
423  if (unlikely(err)) {
424  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT,
425  "Timer res capability failed: ret %d, odp-clksrc:%d, res %lu",
426  err, odp_clksrc, tmr_attr->resparam.res_ns);
427  return;
428  }
429 
430  TMR_DBG_PRINT("res %lu -> ODP says min %lu, max %lu\n",
431  tmr_attr->resparam.res_ns, odp_res_capa.min_tmo,
432  odp_res_capa.max_tmo);
433 
434  tmr_attr->num_tmo = EM_ODP_DEFAULT_TMOS;
435  if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS)
436  tmr_attr->num_tmo = odp_capa.max_timers;
437 
438  tmr_attr->resparam.min_tmo = odp_res_capa.min_tmo;
439  tmr_attr->resparam.max_tmo = odp_res_capa.max_tmo;
440  tmr_attr->name[0] = 0; /* timer_create will add default (no index available here) */
442 }
443 
445  em_timer_clksrc_t clk_src,
446  uint64_t base_hz,
447  uint64_t max_mul,
448  uint64_t res_ns)
449 {
450  if (unlikely(EM_CHECK_LEVEL > 0 && ring_attr == NULL))
451  return EM_ERR_BAD_ARG;
452 
453  /* clear unused fields */
454  memset(ring_attr, 0, sizeof(em_timer_attr_t));
455 
456  ring_attr->ringparam.base_hz.integer = base_hz;
457  ring_attr->ringparam.clk_src = clk_src;
458  ring_attr->ringparam.max_mul = max_mul;
459  ring_attr->ringparam.res_ns = res_ns; /* 0 is legal and means odp default */
460  ring_attr->num_tmo = EM_ODP_DEFAULT_RING_TMOS;
461  ring_attr->flags = EM_TIMER_FLAG_RING;
462  ring_attr->name[0] = 0; /* default at ring_create, index not known here */
463 
464  odp_timer_clk_src_t odp_clksrc;
465  odp_timer_capability_t capa;
466  int rv = timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc);
467 
468  if (unlikely(rv))
469  return EM_ERR_BAD_ARG;
470  if (unlikely(odp_timer_capability(odp_clksrc, &capa) != 0)) {
471  TMR_DBG_PRINT("odp_timer_capability returned error for clk_src %u\n", odp_clksrc);
472  return EM_ERR_BAD_ARG; /* assume clksrc not supported */
473  }
474 
475  if (capa.periodic.max_pools == 0) /* no odp support */
476  return EM_ERR_NOT_IMPLEMENTED;
477 
478  if (capa.periodic.max_timers < ring_attr->num_tmo)
479  ring_attr->num_tmo = capa.periodic.max_timers;
480 
481  odp_timer_periodic_capability_t pcapa;
482 
483  pcapa.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer;
484  pcapa.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer;
485  pcapa.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom;
486  pcapa.max_multiplier = ring_attr->ringparam.max_mul;
487  pcapa.res_ns = ring_attr->ringparam.res_ns;
488  rv = odp_timer_periodic_capability(odp_clksrc, &pcapa);
489  ring_attr->ringparam.res_ns = pcapa.res_ns; /* update back */
490  ring_attr->ringparam.base_hz.integer = pcapa.base_freq_hz.integer;
491  ring_attr->ringparam.base_hz.numer = pcapa.base_freq_hz.numer;
492  ring_attr->ringparam.base_hz.denom = pcapa.base_freq_hz.denom;
493  if (pcapa.max_multiplier < ring_attr->ringparam.max_mul) /* don't increase here */
494  ring_attr->ringparam.max_mul = pcapa.max_multiplier;
495  if (rv != 1) /* 1 means all values supported */
496  return EM_ERR_BAD_ARG;
497 
499  return EM_OK;
500 }
501 
503 {
504  if (EM_CHECK_LEVEL > 0 && unlikely(capa == NULL)) {
505  EM_LOG(EM_LOG_DBG, "%s(): NULL capa ptr!\n", __func__);
506  return EM_ERR_BAD_POINTER;
507  }
508 
509  odp_timer_clk_src_t odp_clksrc;
510  odp_timer_capability_t odp_capa;
511 
512  if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) ||
513  odp_timer_capability(odp_clksrc, &odp_capa))) {
514  EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src);
515  return EM_ERR_BAD_ARG;
516  }
517 
518  capa->max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ?
519  odp_capa.max_pools : EM_ODP_MAX_TIMERS;
520  capa->max_num_tmo = odp_capa.max_timers;
521  capa->max_res.clk_src = clk_src;
522  capa->max_res.res_ns = odp_capa.max_res.res_ns;
523  capa->max_res.res_hz = odp_capa.max_res.res_hz;
524  capa->max_res.min_tmo = odp_capa.max_res.min_tmo;
525  capa->max_res.max_tmo = odp_capa.max_res.max_tmo;
526  capa->max_tmo.clk_src = clk_src;
527  capa->max_tmo.res_ns = odp_capa.max_tmo.res_ns;
528  capa->max_tmo.res_hz = odp_capa.max_tmo.res_hz;
529  capa->max_tmo.min_tmo = odp_capa.max_tmo.min_tmo;
530  capa->max_tmo.max_tmo = odp_capa.max_tmo.max_tmo;
531 
532  /* ring timer basic capability */
533  capa->ring.max_rings = odp_capa.periodic.max_pools; /* 0 if not supported */
534  capa->ring.max_num_tmo = odp_capa.periodic.max_timers;
535  capa->ring.min_base_hz.integer = odp_capa.periodic.min_base_freq_hz.integer;
536  capa->ring.min_base_hz.numer = odp_capa.periodic.min_base_freq_hz.numer;
537  capa->ring.min_base_hz.denom = odp_capa.periodic.min_base_freq_hz.denom;
538  capa->ring.max_base_hz.integer = odp_capa.periodic.max_base_freq_hz.integer;
539  capa->ring.max_base_hz.numer = odp_capa.periodic.max_base_freq_hz.numer;
540  capa->ring.max_base_hz.denom = odp_capa.periodic.max_base_freq_hz.denom;
541  return EM_OK;
542 }
543 
545 {
546  if (EM_CHECK_LEVEL > 0 && unlikely(res == NULL)) {
547  EM_LOG(EM_LOG_DBG, "%s: NULL ptr res\n", __func__);
548  return EM_ERR_BAD_POINTER;
549  }
550 
551  odp_timer_clk_src_t odp_clksrc;
552  odp_timer_res_capability_t odp_res_capa;
553  int err;
554 
555  err = timer_clksrc_em2odp(clk_src, &odp_clksrc);
556  if (unlikely(err)) {
557  EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src);
558  return EM_ERR_BAD_ARG;
559  }
560  memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t));
561  odp_res_capa.res_ns = res->res_ns;
562  odp_res_capa.res_hz = res->res_hz; /* ODP will check if both were set */
563  odp_res_capa.max_tmo = res->max_tmo;
564  err = odp_timer_res_capability(odp_clksrc, &odp_res_capa);
565  if (unlikely(err)) {
566  EM_LOG(EM_LOG_DBG, "%s: ODP res_capability failed (ret %d)!\n", __func__, err);
567  return EM_ERR_BAD_ARG;
568  }
569  res->min_tmo = odp_res_capa.min_tmo;
570  res->max_tmo = odp_res_capa.max_tmo;
571  res->res_ns = odp_res_capa.res_ns;
572  res->res_hz = odp_res_capa.res_hz;
573  res->clk_src = clk_src;
574  return EM_OK;
575 }
576 
578 {
579  odp_timer_clk_src_t odp_clksrc;
580  odp_timer_periodic_capability_t pcapa;
581 
582  if (EM_CHECK_LEVEL > 0 && unlikely(ring == NULL)) {
583  EM_LOG(EM_LOG_DBG, "%s: NULL ptr ring\n", __func__);
584  return EM_ERR_BAD_POINTER;
585  }
586 
587  if (unlikely(timer_clksrc_em2odp(ring->clk_src, &odp_clksrc))) {
588  EM_LOG(EM_LOG_DBG, "%s: Invalid clk_src %d\n", __func__, ring->clk_src);
589  return EM_ERR_BAD_ARG;
590  }
591 
592  pcapa.base_freq_hz.integer = ring->base_hz.integer;
593  pcapa.base_freq_hz.numer = ring->base_hz.numer;
594  pcapa.base_freq_hz.denom = ring->base_hz.denom;
595  pcapa.max_multiplier = ring->max_mul;
596  pcapa.res_ns = ring->res_ns;
597  int rv = odp_timer_periodic_capability(odp_clksrc, &pcapa);
598 
599  ring->base_hz.integer = pcapa.base_freq_hz.integer;
600  ring->base_hz.numer = pcapa.base_freq_hz.numer;
601  ring->base_hz.denom = pcapa.base_freq_hz.denom;
602  ring->max_mul = pcapa.max_multiplier;
603  ring->res_ns = pcapa.res_ns;
604 
605  if (unlikely(rv < 0)) {
606  EM_LOG(EM_LOG_DBG, "%s: odp failed periodic capability for clk_src %d\n",
607  __func__, ring->clk_src);
608  return EM_ERR_LIB_FAILED;
609  }
610  if (rv == 0)
611  return EM_ERR_NOT_SUPPORTED; /* no error, but no exact support */
612 
613  return EM_OK; /* meet or exceed */
614 }
615 
616 em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr)
617 {
618  /* timers are initialized? */
619  if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) {
620  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE,
621  "Timer is not initialized!");
622  return EM_TIMER_UNDEF;
623  }
624 
625  if (EM_CHECK_LEVEL > 0) {
626  if (check_timer_attr(tmr_attr) == false)
627  return EM_TIMER_UNDEF;
628  }
629 
630  odp_timer_pool_param_t odp_tpool_param;
631  odp_timer_clk_src_t odp_clksrc;
632 
633  odp_timer_pool_param_init(&odp_tpool_param);
634  odp_tpool_param.res_ns = tmr_attr->resparam.res_ns;
635  odp_tpool_param.res_hz = tmr_attr->resparam.res_hz;
636  odp_tpool_param.min_tmo = tmr_attr->resparam.min_tmo;
637  odp_tpool_param.max_tmo = tmr_attr->resparam.max_tmo;
638  odp_tpool_param.num_timers = tmr_attr->num_tmo;
639  odp_tpool_param.priv = tmr_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0;
640  if (unlikely(timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc))) {
641  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE,
642  "Unsupported EM-timer clock source:%d",
643  tmr_attr->resparam.clk_src);
644  return EM_TIMER_UNDEF;
645  }
646  odp_tpool_param.clk_src = odp_clksrc;
647 
648  /* check queue type support */
649  odp_timer_capability_t capa;
650 
651  if (unlikely(odp_timer_capability(odp_clksrc, &capa))) {
652  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE,
653  "ODP timer capa failed for clk:%d",
654  tmr_attr->resparam.clk_src);
655  return EM_TIMER_UNDEF;
656  }
657  if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */
658  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE,
659  "ODP does not support scheduled q for clk:%d",
660  tmr_attr->resparam.clk_src);
661  return EM_TIMER_UNDEF;
662  }
663 
664  odp_ticketlock_lock(&em_shm->timers.timer_lock);
665 
666  int i = find_free_timer_index();
667 
668  if (unlikely(i >= EM_ODP_MAX_TIMERS)) {
669  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
670  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_CREATE,
671  "No more timers available");
672  return EM_TIMER_UNDEF;
673  }
674 
675  event_timer_t *timer = &em_shm->timers.timer[i];
676  char timer_pool_name[ODP_TIMER_POOL_NAME_LEN];
677  const char *name = tmr_attr->name;
678  const char *reason = "";
679 
680  if (tmr_attr->name[0] == '\0') { /* replace NULL with default */
681  snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN,
682  "EM-timer-%d", timer->idx); /* idx initialized by timer_init */
683  name = timer_pool_name;
684  }
685 
686  TMR_DBG_PRINT("Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n",
687  odp_tpool_param.clk_src, odp_tpool_param.res_ns,
688  odp_tpool_param.res_hz);
689  timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param);
690  if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) {
691  reason = "odp_timer_pool_create error";
692  goto error_locked;
693  }
694  TMR_DBG_PRINT("Created timer: %s with idx: %d\n", name, timer->idx);
695 
696  /* tmo handle pool can be per-timer or shared */
697  if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */
698  odp_pool_t opool = create_tmo_handle_pool(tmr_attr->num_tmo,
699  em_shm->opt.timer.tmo_pool_cache, timer);
700 
701  if (unlikely(opool == ODP_POOL_INVALID)) {
702  reason = "Tmo handle buffer pool create failed";
703  goto error_locked;
704  }
705 
706  timer->tmo_pool = opool;
707  TMR_DBG_PRINT("Created per-timer tmo handle pool\n");
708  } else {
709  if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */
710  odp_pool_t opool =
711  create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size,
712  em_shm->opt.timer.tmo_pool_cache, timer);
713 
714  if (unlikely(opool == ODP_POOL_INVALID)) {
715  reason = "Shared tmo handle buffer pool create failed";
716  goto error_locked;
717  }
718  timer->tmo_pool = opool;
719  em_shm->timers.shared_tmo_pool = opool;
720  TMR_DBG_PRINT("Created shared tmo handle pool for total %u tmos\n",
721  em_shm->opt.timer.shared_tmo_pool_size);
722  } else {
723  timer->tmo_pool = em_shm->timers.shared_tmo_pool;
724  }
725  }
726 
727  timer->num_tmo_reserve = tmr_attr->num_tmo;
728  if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */
729  uint32_t left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved;
730 
731  if (timer->num_tmo_reserve > left) {
732  TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left);
733  reason = "Not enough tmos left in shared pool";
734  goto error_locked;
735  }
736  em_shm->timers.reserved += timer->num_tmo_reserve;
737  TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n",
738  timer->num_tmo_reserve, em_shm->timers.reserved);
739  }
740  timer->flags = tmr_attr->flags;
741  timer->plain_q_ok = capa.queue_type_plain;
742  timer->is_ring = false;
743 
744 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
745  if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) {
746  reason = "odp_timer_pool_start_multi failed";
747  goto error_locked;
748  }
749 #else
750  odp_timer_pool_start();
751 #endif
752  em_shm->timers.num_timers++;
753  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
754 
755  TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), em_shm->timers.num_timers);
756  return TMR_I2H(i);
757 
758 error_locked:
759  cleanup_timer_create_fail(timer);
760  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
761 
762  TMR_DBG_PRINT("ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n",
763  odp_tpool_param.clk_src, odp_tpool_param.res_ns,
764  odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers);
765  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE,
766  "Timer pool create failed, reason: ", reason);
767  return EM_TIMER_UNDEF;
768 }
769 
770 em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr)
771 {
772  /* timers are initialized? */
773  if (unlikely(em_shm->timers.init_check != EM_CHECK_INIT_CALLED)) {
774  INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE,
775  "Timer is disabled!");
776  return EM_TIMER_UNDEF;
777  }
778 
779  if (EM_CHECK_LEVEL > 0 && unlikely(check_timer_attr_ring(ring_attr) == false)) {
780  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE,
781  "NULL or incorrect attribute");
782  return EM_TIMER_UNDEF;
783  }
784 
785  odp_timer_pool_param_t odp_tpool_param;
786  odp_timer_clk_src_t odp_clksrc;
787 
788  odp_timer_pool_param_init(&odp_tpool_param);
789  odp_tpool_param.timer_type = ODP_TIMER_TYPE_PERIODIC;
790  odp_tpool_param.exp_mode = ODP_TIMER_EXP_AFTER;
791  odp_tpool_param.num_timers = ring_attr->num_tmo;
792  odp_tpool_param.priv = ring_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0;
793  if (unlikely(timer_clksrc_em2odp(ring_attr->ringparam.clk_src, &odp_clksrc))) {
794  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_RING_CREATE,
795  "Unsupported EM-timer clock source:%d",
796  ring_attr->ringparam.clk_src);
797  return EM_TIMER_UNDEF;
798  }
799  odp_tpool_param.clk_src = odp_clksrc;
800  odp_tpool_param.periodic.base_freq_hz.integer = ring_attr->ringparam.base_hz.integer;
801  odp_tpool_param.periodic.base_freq_hz.numer = ring_attr->ringparam.base_hz.numer;
802  odp_tpool_param.periodic.base_freq_hz.denom = ring_attr->ringparam.base_hz.denom;
803  odp_tpool_param.periodic.max_multiplier = ring_attr->ringparam.max_mul;
804  odp_tpool_param.res_hz = 0;
805  odp_tpool_param.res_ns = ring_attr->ringparam.res_ns;
806 
807  /* check queue type support */
808  odp_timer_capability_t capa;
809 
810  if (unlikely(odp_timer_capability(odp_clksrc, &capa))) {
811  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
812  "ODP timer capa failed for clk:%d",
813  ring_attr->ringparam.clk_src);
814  return EM_TIMER_UNDEF;
815  }
816  if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */
817  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
818  "ODP does not support scheduled q for clk:%d",
819  ring_attr->ringparam.clk_src);
820  return EM_TIMER_UNDEF;
821  }
822 
823  /* lock context to find free slot and update it */
824  timer_storage_t *const tmrs = &em_shm->timers;
825 
826  odp_ticketlock_lock(&tmrs->timer_lock);
827 
828  /* is there enough events left in shared pool ? */
829  uint32_t left = em_shm->opt.timer.ring.timer_event_pool_size - tmrs->ring_reserved;
830 
831  if (ring_attr->num_tmo > left) {
832  odp_ticketlock_unlock(&tmrs->timer_lock);
833  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
834  "Too few ring timeout events left (req %u/%u)",
835  ring_attr->num_tmo, left);
836  return EM_TIMER_UNDEF;
837  }
838 
839  /* allocate timer */
840  int i = find_free_timer_index();
841 
842  if (unlikely(i >= EM_ODP_MAX_TIMERS)) {
843  odp_ticketlock_unlock(&tmrs->timer_lock);
844  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
845  "No more timers available");
846  return EM_TIMER_UNDEF;
847  }
848 
849  event_timer_t *timer = &tmrs->timer[i];
850 
851  /* then timer pool */
852  char timer_pool_name[ODP_TIMER_POOL_NAME_LEN];
853  const char *name = ring_attr->name;
854  const char *reason = "";
855 
856  if (ring_attr->name[0] == '\0') { /* replace NULL with default */
857  snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN,
858  "EM-timer-%d", timer->idx); /* idx initialized by timer_init */
859  name = timer_pool_name;
860  }
861 
862  TMR_DBG_PRINT("Creating ODP periodic tmr pool: clk %d, res_ns %lu, base_hz %lu\n",
863  odp_tpool_param.clk_src, odp_tpool_param.res_ns,
864  odp_tpool_param.periodic.base_freq_hz.integer);
865  timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param);
866  if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) {
867  reason = "odp_timer_pool_create failed";
868  goto error_locked;
869  }
870  TMR_DBG_PRINT("Created ring timer: %s with idx: %d\n", name, timer->idx);
871 
872  /* tmo handle pool can be per-timer or shared */
873  if (!em_shm->opt.timer.shared_tmo_pool_enable) { /* per-timer pool */
874  odp_pool_t opool = create_tmo_handle_pool(ring_attr->num_tmo,
875  em_shm->opt.timer.tmo_pool_cache, timer);
876 
877  if (unlikely(opool == ODP_POOL_INVALID)) {
878  reason = "tmo handle pool creation failed";
879  goto error_locked;
880  }
881 
882  timer->tmo_pool = opool;
883  TMR_DBG_PRINT("Created per-timer tmo handle pool %p\n", opool);
884  } else {
885  if (em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) { /* first timer */
886  odp_pool_t opool =
887  create_tmo_handle_pool(em_shm->opt.timer.shared_tmo_pool_size,
888  em_shm->opt.timer.tmo_pool_cache, timer);
889 
890  if (unlikely(opool == ODP_POOL_INVALID)) {
891  reason = "Shared tmo handle pool creation failed";
892  goto error_locked;
893  }
894 
895  timer->tmo_pool = opool;
896  em_shm->timers.shared_tmo_pool = opool;
897  TMR_DBG_PRINT("Created shared tmo handle pool %p\n", opool);
898  } else {
899  timer->tmo_pool = em_shm->timers.shared_tmo_pool;
900  }
901  }
902 
903  timer->num_tmo_reserve = ring_attr->num_tmo;
904  if (em_shm->opt.timer.shared_tmo_pool_enable) { /* check reservation */
905  left = em_shm->opt.timer.shared_tmo_pool_size - em_shm->timers.reserved;
906 
907  if (timer->num_tmo_reserve > left) {
908  TMR_DBG_PRINT("Not enough tmos left in shared pool (%u)\n", left);
909  reason = "Not enough tmos left in shared pool";
910  goto error_locked;
911  }
912  em_shm->timers.reserved += timer->num_tmo_reserve;
913  TMR_DBG_PRINT("Updated shared tmo reserve by +%u to %u\n",
914  timer->num_tmo_reserve, em_shm->timers.reserved);
915  }
916 
917  /* odp timeout event pool for ring tmo events is always shared for all ring timers*/
918  if (tmrs->ring_tmo_pool == ODP_POOL_INVALID) {
919  odp_pool_param_t odp_tmo_pool_param;
920  char pool_name[ODP_POOL_NAME_LEN];
921 
922  odp_pool_param_init(&odp_tmo_pool_param);
923  odp_tmo_pool_param.type = ODP_POOL_TIMEOUT;
924  odp_tmo_pool_param.tmo.cache_size = em_shm->opt.timer.ring.timer_event_pool_cache;
925  TMR_DBG_PRINT("ring tmo event pool cache %u\n", odp_tmo_pool_param.tmo.cache_size);
926  odp_tmo_pool_param.tmo.num = em_shm->opt.timer.ring.timer_event_pool_size;
927  TMR_DBG_PRINT("ring tmo event pool size %u\n", odp_tmo_pool_param.tmo.num);
928  odp_tmo_pool_param.tmo.uarea_size = sizeof(event_hdr_t);
929  odp_tmo_pool_param.stats.all = 0;
930  snprintf(pool_name, ODP_POOL_NAME_LEN, "Ring-%d-tmo-pool", timer->idx);
931  tmrs->ring_tmo_pool = odp_pool_create(pool_name, &odp_tmo_pool_param);
932  if (unlikely(tmrs->ring_tmo_pool == ODP_POOL_INVALID)) {
933  reason = "odp timeout event pool creation failed";
934  goto error_locked;
935  }
936  TMR_DBG_PRINT("Created ODP-timeout event pool %p: '%s'\n",
937  tmrs->ring_tmo_pool, pool_name);
938  }
939 
940  tmrs->ring_reserved += ring_attr->num_tmo;
941  TMR_DBG_PRINT("Updated ring reserve by +%u to %u\n", ring_attr->num_tmo,
942  tmrs->ring_reserved);
943  tmrs->num_rings++;
944  tmrs->num_timers++;
945  timer->num_ring_reserve = ring_attr->num_tmo;
946  timer->flags = ring_attr->flags;
947  timer->plain_q_ok = capa.queue_type_plain;
948  timer->is_ring = true;
949  tmrs->num_ring_create_calls++;
950 
951 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
952  if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) {
953  reason = "odp_timer_pool_start_multi failed";
954  goto error_locked;
955  }
956 #else
957  odp_timer_pool_start();
958 #endif
959 
960  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
961 
962  TMR_DBG_PRINT("ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), tmrs->num_timers);
963  return TMR_I2H(i);
964 
965 error_locked:
966  cleanup_timer_create_fail(timer);
967  odp_ticketlock_unlock(&tmrs->timer_lock);
968 
969  TMR_DBG_PRINT("ERR odp tmr ring pool in: clk %u, res %lu, base_hz %lu, max_mul %lu, num tmo %u\n",
970  ring_attr->ringparam.clk_src,
971  ring_attr->ringparam.res_ns,
972  ring_attr->ringparam.base_hz.integer,
973  ring_attr->ringparam.max_mul,
974  ring_attr->num_tmo);
975  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_RING_CREATE,
976  "Ring timer create failed, reason: ", reason);
977  return EM_TIMER_UNDEF;
978 }
979 
981 {
982  timer_storage_t *const tmrs = &em_shm->timers;
983  int i = TMR_H2I(tmr);
984  em_status_t rv = EM_OK;
985  odp_pool_t pool_fail = ODP_POOL_INVALID;
986 
987  /* take lock before checking so nothing can change */
988  odp_ticketlock_lock(&tmrs->timer_lock);
989  if (unlikely(!is_timer_valid(tmr))) {
990  odp_ticketlock_unlock(&tmrs->timer_lock);
991  return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_DELETE,
992  "Invalid timer:%" PRI_TMR "", tmr);
993  }
994 
995  if (tmrs->timer[i].tmo_pool != tmrs->shared_tmo_pool) { /* don't delete shared pool */
996  if (unlikely(odp_pool_destroy(tmrs->timer[i].tmo_pool) != 0)) {
997  rv = EM_ERR_LIB_FAILED;
998  pool_fail = tmrs->timer[i].tmo_pool;
999  } else {
1000  TMR_DBG_PRINT("Deleted odp pool %p\n", tmrs->timer[i].tmo_pool);
1001  }
1002  }
1003  tmrs->timer[i].tmo_pool = ODP_POOL_INVALID;
1004  odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool);
1005  tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID;
1006 
1007  /* Ring delete. Don't remove shared event pool as user could still have event */
1008  if (tmrs->timer[i].is_ring && tmrs->num_rings) {
1009  tmrs->num_rings--;
1010  if (tmrs->num_rings < 1)
1011  TMR_DBG_PRINT("Last ring deleted");
1012  tmrs->ring_reserved -= tmrs->timer[i].num_ring_reserve;
1013  TMR_DBG_PRINT("Updated ring reserve by -%u to %u\n",
1014  tmrs->timer[i].num_ring_reserve, tmrs->ring_reserved);
1015  tmrs->timer[i].num_ring_reserve = 0;
1016  }
1017 
1018  tmrs->num_timers--;
1019  if (tmrs->shared_tmo_pool != ODP_POOL_INVALID) { /* shared pool in use */
1020  tmrs->reserved -= tmrs->timer[i].num_tmo_reserve;
1021  TMR_DBG_PRINT("Updated tmo reserve by -%u to %u\n",
1022  tmrs->timer[i].num_tmo_reserve, tmrs->reserved);
1023  tmrs->timer[i].num_tmo_reserve = 0;
1024  }
1025  if (tmrs->num_timers == 0 && tmrs->shared_tmo_pool != ODP_POOL_INVALID) {
1026  /* no more timers, delete shared tmo pool */
1027  if (unlikely(odp_pool_destroy(tmrs->shared_tmo_pool) != 0)) {
1028  rv = EM_ERR_LIB_FAILED;
1029  pool_fail = tmrs->shared_tmo_pool;
1030  } else {
1031  TMR_DBG_PRINT("Deleted shared tmo pool %p\n", tmrs->shared_tmo_pool);
1032  tmrs->shared_tmo_pool = ODP_POOL_INVALID;
1033  }
1034  }
1035 
1036  odp_ticketlock_unlock(&tmrs->timer_lock);
1037  if (unlikely(rv != EM_OK)) {
1038  return INTERNAL_ERROR(rv, EM_ESCOPE_TIMER_DELETE,
1039  "timer %p delete fail, odp pool %p fail\n", tmr, pool_fail);
1040  }
1041  TMR_DBG_PRINT("ok, deleted timer %p, num_timers %u\n", tmr, tmrs->num_timers);
1042  return rv;
1043 }
1044 
1046 {
1047  const timer_storage_t *const tmrs = &em_shm->timers;
1048  int i = TMR_H2I(tmr);
1049 
1050  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr))
1051  return 0;
1052 
1053  return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool);
1054 }
1055 
1056 em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue)
1057 {
1058  return em_tmo_create_arg(tmr, flags, queue, NULL);
1059 }
1060 
1062  em_queue_t queue, em_tmo_args_t *args)
1063 {
1064  const queue_elem_t *const q_elem = queue_elem_get(queue);
1065 
1066  if (EM_CHECK_LEVEL > 0) {
1067  if (unlikely(!is_timer_valid(tmr))) {
1068  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1069  "Invalid timer:%" PRI_TMR "", tmr);
1070  return EM_TMO_UNDEF;
1071  }
1072  if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) {
1073  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1074  "Tmr:%" PRI_TMR ": inv.Q:%" PRI_QUEUE "",
1075  tmr, queue);
1076  return EM_TMO_UNDEF;
1077  }
1078  if (unlikely(!is_queue_valid_type(tmr, q_elem))) {
1079  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1080  "Tmr:%" PRI_TMR ": inv.Q (type):%" PRI_QUEUE "",
1081  tmr, queue);
1082  return EM_TMO_UNDEF;
1083  }
1084  if (unlikely(!check_tmo_flags(flags))) {
1085  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1086  "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x",
1087  tmr, flags);
1088  return EM_TMO_UNDEF;
1089  }
1090  }
1091 
1092  int i = TMR_H2I(tmr);
1093 
1094  if (EM_CHECK_LEVEL > 1 &&
1095  em_shm->timers.timer[i].is_ring &&
1096  !(flags & EM_TMO_FLAG_PERIODIC)) {
1097  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE,
1098  "Tmr:%" PRI_TMR ": asking oneshot with ring timer!",
1099  tmr);
1100  return EM_TMO_UNDEF;
1101  }
1102 
1103  odp_buffer_t tmo_buf = odp_buffer_alloc(em_shm->timers.timer[i].tmo_pool);
1104 
1105  if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) {
1106  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE,
1107  "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr);
1108  return EM_TMO_UNDEF;
1109  }
1110 
1111  em_timer_timeout_t *tmo = odp_buffer_addr(tmo_buf);
1112  odp_timer_pool_t odptmr = em_shm->timers.timer[i].odp_tmr_pool;
1113 
1114  const void *userptr = NULL;
1115 
1116  if (args != NULL)
1117  userptr = args->userptr;
1118 
1119  tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->odp_queue, userptr);
1120  if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) {
1121  INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CREATE,
1122  "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", tmr);
1123  odp_buffer_free(tmo_buf);
1124  return EM_TMO_UNDEF;
1125  }
1126 
1127  /* OK, init state. Some values copied for faster access runtime */
1128  tmo->period = 0;
1129  tmo->odp_timer_pool = odptmr;
1130  tmo->timer = tmr;
1131  tmo->odp_buffer = tmo_buf;
1132  tmo->flags = flags;
1133  tmo->queue = queue;
1134  tmo->is_ring = em_shm->timers.timer[i].is_ring;
1135  tmo->odp_timeout = ODP_EVENT_INVALID;
1136  tmo->ring_tmo_pool = em_shm->timers.ring_tmo_pool;
1137 
1138  if (tmo->is_ring) { /* pre-allocate timeout event to save time at start */
1139  odp_event_t odp_tmo_event = alloc_odp_timeout(tmo);
1140 
1141  if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) {
1142  INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE,
1143  "Ring: odp timeout event allocation failed");
1144  odp_timer_free(tmo->odp_timer);
1145  odp_buffer_free(tmo_buf);
1146  return EM_TMO_UNDEF;
1147  }
1148  tmo->odp_timeout = odp_tmo_event;
1149  TMR_DBG_PRINT("Ring: allocated odp timeout ev %p\n", tmo->odp_timeout);
1150  }
1151 
1152  if (EM_TIMER_TMO_STATS)
1153  memset(&tmo->stats, 0, sizeof(em_tmo_stats_t));
1154 
1155  odp_atomic_init_u32(&tmo->state, EM_TMO_STATE_IDLE);
1156  TMR_DBG_PRINT("ODP timer %p allocated\n", tmo->odp_timer);
1157  TMR_DBG_PRINT("tmo %p created\n", tmo);
1158  return tmo;
1159 }
1160 
1161 em_status_t em_tmo_delete(em_tmo_t tmo, em_event_t *cur_event)
1162 {
1163  if (EM_CHECK_LEVEL > 0) {
1164  RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL,
1165  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_DELETE,
1166  "Invalid args: tmo:%" PRI_TMO " cur_event:%p",
1167  tmo, cur_event);
1168  }
1169 
1170  *cur_event = EM_EVENT_UNDEF;
1171  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1172 
1173  if (EM_CHECK_LEVEL > 1) {
1174  /* check that tmo buf is valid before accessing other struct members */
1175  RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer),
1176  EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE,
1177  "Invalid tmo buffer");
1178 
1179  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1180  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_DELETE,
1181  "Invalid tmo state:%d", tmo_state);
1182 
1183  RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID,
1184  EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE,
1185  "Invalid tmo odp_timer, deleted?");
1186  }
1187 
1188  TMR_DBG_PRINT("ODP timer %p\n", tmo->odp_timer);
1189 
1190  /* change this first to increase propability to catch e.g. double delete */
1191  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN);
1192 
1193  odp_event_t odp_evt;
1194 
1195 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
1196  /* ODP 1.43 does not allow to delete active timer, do it first */
1197  odp_evt = ODP_EVENT_INVALID;
1198  if (tmo_state == EM_TMO_STATE_ACTIVE) {
1199  int cret;
1200 
1201  if (tmo->is_ring) {
1202  cret = odp_timer_periodic_cancel(tmo->odp_timer);
1204  EM_ESCOPE_TMO_DELETE,
1205  "ring active but odp timer cancel failed, rv %d\n", cret);
1206  } else {
1207  cret = odp_timer_cancel(tmo->odp_timer, &odp_evt);
1208  RETURN_ERROR_IF(cret == ODP_TIMER_FAIL, EM_ERR_LIB_FAILED,
1209  EM_ESCOPE_TMO_DELETE,
1210  "was active but odp timer cancel failed, rv %d\n", cret);
1211  }
1212 
1213  TMR_DBG_PRINT("tmo cancelled first, odp rv %d\n", cret);
1214  }
1215 
1216  int fret = odp_timer_free(tmo->odp_timer);
1217 
1218  RETURN_ERROR_IF(fret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE,
1219  "odp timer free failed!?, rv %d\n", fret);
1220 #else
1221  odp_evt = odp_timer_free(tmo->odp_timer);
1222 #endif
1223  odp_buffer_t tmp = tmo->odp_buffer;
1224  em_event_t tmo_ev = EM_EVENT_UNDEF;
1225 
1226  tmo->odp_timer = ODP_TIMER_INVALID;
1227  tmo->odp_buffer = ODP_BUFFER_INVALID;
1228  tmo->timer = EM_TIMER_UNDEF;
1229 
1230  if (tmo->is_ring && tmo->odp_timeout != ODP_EVENT_INVALID) {
1231  TMR_DBG_PRINT("ring: free unused ODP timeout ev %p\n", tmo->odp_timeout);
1232  free_odp_timeout(tmo->odp_timeout);
1233  tmo->odp_timeout = ODP_EVENT_INVALID;
1234  }
1235 
1236  if (odp_evt != ODP_EVENT_INVALID) {
1237  /* these errors no not free buffer to prevent potential further corruption */
1238  RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && !odp_event_is_valid(odp_evt),
1239  EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE,
1240  "Corrupted tmo event returned");
1241  RETURN_ERROR_IF(tmo->is_ring, EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE,
1242  "odp_timer_free returned event %p for a ring!\n", odp_evt);
1243 
1244  tmo_ev = event_odp2em(odp_evt);
1245  if (esv_enabled())
1246  tmo_ev = evstate_em2usr(tmo_ev, event_to_hdr(tmo_ev), EVSTATE__TMO_DELETE);
1247  }
1248 
1249  odp_buffer_free(tmp);
1250  *cur_event = tmo_ev;
1251  TMR_DBG_PRINT("tmo %p delete ok, event returned %p\n", tmo, tmo_ev);
1252  return EM_OK;
1253 }
1254 
1256  em_event_t tmo_ev)
1257 {
1259  (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF),
1260  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS,
1261  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1262  tmo, tmo_ev);
1263  /* check that tmo buf is valid before accessing other struct members */
1264  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1265  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS,
1266  "Invalid tmo buffer");
1268  (tmo->flags & EM_TMO_FLAG_PERIODIC),
1269  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_ABS,
1270  "Cannot set periodic tmo, use _set_periodic()");
1272  !is_event_type_valid(tmo_ev),
1273  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS,
1274  "invalid event type");
1275  if (EM_CHECK_LEVEL > 1) {
1276  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1277 
1278  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1279  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_ABS,
1280  "Invalid tmo state:%d", tmo_state);
1281  }
1283  tmo->odp_timer == ODP_TIMER_INVALID,
1284  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS,
1285  "Invalid tmo odp_timer");
1286 
1287  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1288  odp_event_t odp_ev = event_em2odp(tmo_ev);
1289  bool esv_ena = esv_enabled();
1290  odp_timer_start_t startp;
1291 
1294  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS,
1295  "Invalid event type: timer-ring");
1296 
1297  if (esv_ena)
1298  evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS);
1299 
1300  /* set tmo active and arm with absolute time */
1301  startp.tick_type = ODP_TIMER_TICK_ABS;
1302  startp.tick = ticks_abs;
1303  startp.tmo_ev = odp_ev;
1305  ev_hdr->tmo = tmo;
1306  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1307  int odpret = odp_timer_start(tmo->odp_timer, &startp);
1308 
1309  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1310  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1311  ev_hdr->tmo = EM_TMO_UNDEF;
1312  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1313  if (esv_ena)
1314  evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS__FAIL);
1315 
1316  em_status_t retval = timer_rv_odp2em(odpret);
1317 
1318  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1319  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1320  return retval;
1321  }
1322 
1323  return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_ABS,
1324  "odp_timer_start():%d", odpret);
1325  }
1326  TMR_DBG_PRINT("OK\n");
1327  return EM_OK;
1328 }
1329 
1331  em_event_t tmo_ev)
1332 {
1333  if (EM_CHECK_LEVEL > 0) {
1334  RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF,
1335  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1336  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1337  tmo, tmo_ev);
1338 
1340  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1341  "%s: Periodic no longer supported", __func__);
1342  }
1343  if (EM_CHECK_LEVEL > 1) {
1344  /* check that tmo buf is valid before accessing other struct members */
1345  RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer),
1346  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL,
1347  "Invalid tmo buffer");
1348 
1349  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1350 
1351  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1352  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_REL,
1353  "Invalid tmo state:%d", tmo_state);
1354  }
1356  !is_event_type_valid(tmo_ev),
1357  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1358  "invalid event type");
1359 
1360  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1361  odp_event_t odp_ev = event_em2odp(tmo_ev);
1362  bool esv_ena = esv_enabled();
1363  odp_timer_start_t startp;
1364 
1367  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL,
1368  "Invalid event type: timer-ring");
1369 
1370  if (esv_ena)
1371  evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL);
1372 
1373  /* set tmo active and arm with relative time */
1374  startp.tick_type = ODP_TIMER_TICK_REL;
1375  startp.tick = ticks_rel;
1376  startp.tmo_ev = odp_ev;
1378  ev_hdr->tmo = tmo;
1379  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1380  int odpret = odp_timer_start(tmo->odp_timer, &startp);
1381 
1382  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1383  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1384  ev_hdr->tmo = EM_TMO_UNDEF;
1385  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1386  if (esv_ena)
1387  evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL__FAIL);
1388 
1389  em_status_t retval = timer_rv_odp2em(odpret);
1390 
1391  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1392  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1393  return retval;
1394  }
1395  return INTERNAL_ERROR(retval, EM_ESCOPE_TMO_SET_REL,
1396  "odp_timer_start():%d", odpret);
1397  }
1398  TMR_DBG_PRINT("OK\n");
1399  return EM_OK;
1400 }
1401 
1403  em_timer_tick_t start_abs,
1404  em_timer_tick_t period,
1405  em_event_t tmo_ev)
1406 {
1408  (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF),
1409  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC,
1410  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1411  tmo, tmo_ev);
1412  /* check that tmo buf is valid before accessing other struct members */
1413  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1414  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC,
1415  "Invalid tmo buffer");
1416  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC),
1417  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC,
1418  "Not periodic tmo");
1419  if (EM_CHECK_LEVEL > 1) {
1420  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1421 
1422  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1423  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC,
1424  "Invalid tmo state:%d", tmo_state);
1425  }
1427  !is_event_type_valid(tmo_ev),
1428  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC,
1429  "invalid event type");
1430 
1431  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1432  odp_event_t odp_ev = event_em2odp(tmo_ev);
1433  bool esv_ena = esv_enabled();
1434  odp_timer_start_t startp;
1435 
1438  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC,
1439  "Invalid event type: timer-ring");
1440 
1441  if (esv_ena)
1442  evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC);
1443 
1444  TMR_DBG_PRINT("start %lu, period %lu\n", start_abs, period);
1445 
1446  tmo->period = period;
1447  if (start_abs == 0)
1448  start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period;
1449  tmo->last_tick = start_abs;
1450  TMR_DBG_PRINT("last_tick %lu, now %lu\n", tmo->last_tick,
1451  odp_timer_current_tick(tmo->odp_timer_pool));
1452 
1453  /* set tmo active and arm with absolute time */
1454  startp.tick_type = ODP_TIMER_TICK_ABS;
1455  startp.tick = start_abs;
1456  startp.tmo_ev = odp_ev;
1458  ev_hdr->tmo = tmo;
1459  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1460  int odpret = odp_timer_start(tmo->odp_timer, &startp);
1461 
1462  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1463  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1464  ev_hdr->tmo = EM_TMO_UNDEF;
1465  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1466  if (esv_ena)
1467  evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC__FAIL);
1468 
1469  TMR_DBG_PRINT("diff to tmo %ld\n",
1470  (int64_t)tmo->last_tick -
1471  (int64_t)odp_timer_current_tick(tmo->odp_timer_pool));
1472 
1473  em_status_t retval = timer_rv_odp2em(odpret);
1474 
1475  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1476  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1477  return retval;
1478  }
1479  return INTERNAL_ERROR(retval,
1480  EM_ESCOPE_TMO_SET_PERIODIC,
1481  "odp_timer_start():%d", odpret);
1482  }
1483  TMR_DBG_PRINT("OK\n");
1484  return EM_OK;
1485 }
1486 
1488  em_timer_tick_t start_abs,
1489  uint64_t multiplier,
1490  em_event_t tmo_ev)
1491 {
1493  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1494  "Inv.args: tmo UNDEF");
1495  /* check that tmo buf is valid before accessing other struct members */
1496  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1497  EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1498  "Invalid tmo buffer");
1499  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC),
1500  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1501  "Not periodic tmo");
1502  if (EM_CHECK_LEVEL > 1) {
1503  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1504 
1505  RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN,
1506  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1507  "Invalid tmo state:%d", tmo_state);
1508  }
1509 
1510  odp_timer_periodic_start_t startp;
1511  odp_event_t odp_ev = tmo->odp_timeout; /* pre-allocated */
1512 
1513  if (tmo_ev != EM_EVENT_UNDEF) { /* user gave event to (re-)use */
1514  odp_ev = event_em2odp(tmo_ev);
1516  odp_event_type(odp_ev) != ODP_EVENT_TIMEOUT,
1517  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1518  "Inv.args: not TIMER event given");
1519  odp_timeout_t odp_tmo = odp_timeout_from_event(odp_ev);
1520  event_hdr_t *const ev_hdr = odp_timeout_user_area(odp_tmo);
1521 
1523  ev_hdr->tmo = tmo;
1524  TMR_DBG_PRINT("user event %p\n", tmo_ev);
1525  } else {
1526  tmo->odp_timeout = ODP_EVENT_INVALID; /* now used */
1527  }
1528 
1529  if (odp_ev == ODP_EVENT_INVALID) { /* re-start, pre-alloc used */
1530  odp_event_t odp_tmo_event = alloc_odp_timeout(tmo);
1531 
1532  if (unlikely(odp_tmo_event == ODP_EVENT_INVALID))
1533  return INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_SET_PERIODIC_RING,
1534  "Ring: odp timeout event allocation failed");
1535  odp_ev = odp_tmo_event;
1536  }
1537 
1538  TMR_DBG_PRINT("ring tmo start_abs %lu, M=%lu, odp ev=%p\n", start_abs, multiplier, odp_ev);
1539  startp.first_tick = start_abs;
1540  startp.freq_multiplier = multiplier;
1541  startp.tmo_ev = odp_ev;
1542  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE);
1543  int odpret = odp_timer_periodic_start(tmo->odp_timer, &startp);
1544 
1545  if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1546  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1547 
1548  em_status_t retval = timer_rv_odp2em(odpret);
1549 
1550  if (retval == EM_ERR_TOONEAR) { /* skip errorhandler */
1551  TMR_DBG_PRINT("TOONEAR, skip ErrH\n");
1552  return retval;
1553  }
1554  return INTERNAL_ERROR(retval,
1555  EM_ESCOPE_TMO_SET_PERIODIC_RING,
1556  "odp_timer_periodic_start(): ret %d", odpret);
1557  }
1558  /* ok */
1559  TMR_DBG_PRINT("OK\n");
1560  return EM_OK;
1561 }
1562 
1563 em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event)
1564 {
1565  if (EM_CHECK_LEVEL > 0) {
1566  RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL,
1567  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CANCEL,
1568  "Invalid args: tmo:%" PRI_TMO " cur_event:%p",
1569  tmo, cur_event);
1570  }
1571  *cur_event = EM_EVENT_UNDEF;
1572  if (EM_CHECK_LEVEL > 1) {
1573  RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer),
1574  EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL,
1575  "Invalid tmo buffer");
1576  RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID,
1577  EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL,
1578  "Invalid tmo odp_timer");
1579  }
1580 
1581  /* check state: EM_TMO_STATE_UNKNOWN | EM_TMO_STATE_IDLE | EM_TMO_STATE_ACTIVE */
1582  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1583 
1584  RETURN_ERROR_IF(tmo_state != EM_TMO_STATE_ACTIVE,
1585  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL,
1586  "Invalid tmo state:%d (!%d)", tmo_state, EM_TMO_STATE_ACTIVE);
1587 
1588  TMR_DBG_PRINT("ODP tmo %p\n", tmo->odp_timer);
1589 
1590  odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE);
1591 
1592  if (tmo->is_ring) { /* periodic ring never returns event here */
1593  RETURN_ERROR_IF(odp_timer_periodic_cancel(tmo->odp_timer) != 0,
1594  EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL,
1595  "odp periodic cancel fail");
1596  return EM_ERR_TOONEAR; /* ack will tell when no more coming */
1597  }
1598 
1599  /* not ring, cancel*/
1600  odp_event_t odp_ev = ODP_EVENT_INVALID;
1601  int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev);
1602 
1603  if (ret != 0) { /* speculative, odp does not today separate fail and too late */
1604  if (EM_CHECK_LEVEL > 1) {
1605  RETURN_ERROR_IF(odp_ev != ODP_EVENT_INVALID,
1606  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL,
1607  "Bug? ODP timer cancel fail but return event!");
1608  }
1609  TMR_DBG_PRINT("fail, odpret %d. Assume TOONEAR\n", ret);
1610  return EM_ERR_TOONEAR; /* expired, other cases caught above */
1611  }
1612 
1613  /*
1614  * Cancel successful (ret == 0): odp_ev contains the canceled tmo event
1615  */
1616 
1617  if (EM_CHECK_LEVEL > 2) {
1618  RETURN_ERROR_IF(!odp_event_is_valid(odp_ev),
1619  EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL,
1620  "Invalid tmo event from odp_timer_cancel");
1621  }
1622 
1623  em_event_t tmo_ev = event_odp2em(odp_ev);
1624  event_hdr_t *ev_hdr = event_to_hdr(tmo_ev);
1625 
1626  /* successful cancel also resets the event tmo type */
1627  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1628  ev_hdr->tmo = EM_TMO_UNDEF;
1629 
1630  if (esv_enabled())
1631  tmo_ev = evstate_em2usr(tmo_ev, ev_hdr, EVSTATE__TMO_CANCEL);
1632 
1633  *cur_event = tmo_ev;
1634  TMR_DBG_PRINT("OK\n");
1635  return EM_OK;
1636 }
1637 
1638 em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev)
1639 {
1641  (tmo == EM_TMO_UNDEF || next_tmo_ev == EM_EVENT_UNDEF),
1642  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK,
1643  "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "",
1644  tmo, next_tmo_ev);
1645  /* check that tmo buf is valid before accessing other struct members */
1646  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1647  EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK,
1648  "Tmo ACK: invalid tmo buffer");
1649  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC),
1650  EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_ACK,
1651  "Tmo ACK: Not a periodic tmo");
1652 
1653  if (EM_TIMER_TMO_STATS)
1654  tmo->stats.num_acks++;
1655 
1656  em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state);
1657  event_hdr_t *ev_hdr = event_to_hdr(next_tmo_ev);
1658  odp_event_t odp_ev = event_em2odp(next_tmo_ev);
1659 
1660  if (tmo->is_ring) /* ring timer */
1661  return ack_ring_timeout_event(tmo, next_tmo_ev, tmo_state, ev_hdr, odp_ev);
1662 
1663  /* not periodic ring, set next timeout */
1664  if (unlikely(tmo_state != EM_TMO_STATE_ACTIVE)) {
1665  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1666  ev_hdr->tmo = EM_TMO_UNDEF;
1667 
1668  if (tmo_state == EM_TMO_STATE_IDLE) /* canceled, skip errorhandler */
1669  return EM_ERR_CANCELED;
1670 
1671  return INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_ACK,
1672  "Tmo ACK: invalid tmo state:%d", tmo_state);
1673  }
1674 
1675  bool esv_ena = esv_enabled();
1676 
1677  if (esv_ena)
1678  evstate_usr2em(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK);
1679  /*
1680  * The periodic timer will silently stop if ack fails! Attempt to
1681  * handle exceptions and if the tmo cannot be renewed, call
1682  * the errorhandler so the application may recover.
1683  */
1684  tmo->last_tick += tmo->period; /* maintain absolute time */
1685  int ret;
1686  int tries = EM_TIMER_ACK_TRIES;
1687  em_status_t err;
1688  odp_timer_start_t startp;
1689 
1690  startp.tick_type = ODP_TIMER_TICK_ABS;
1691  startp.tmo_ev = odp_ev;
1692  ev_hdr->flags.tmo_type = EM_TMO_TYPE_PERIODIC; /* could be new event */
1693  ev_hdr->tmo = tmo;
1694 
1695  /* try to set tmo EM_TIMER_ACK_TRIES times */
1696  do {
1697  /* ask new timeout for next period */
1698  startp.tick = tmo->last_tick;
1699  ret = odp_timer_start(tmo->odp_timer, &startp);
1700  /*
1701  * Calling ack() was delayed over next period if 'ret' is
1702  * ODP_TIMER_TOO_NEAR, i.e. now in past. Other errors
1703  * should not happen, fatal for this tmo
1704  */
1705  if (likely(ret != ODP_TIMER_TOO_NEAR)) {
1706  if (ret != ODP_TIMER_SUCCESS) {
1707  TMR_DBG_PRINT("ODP return %d\n"
1708  "tmo tgt/tick now %lu/%lu\n",
1709  ret, tmo->last_tick,
1710  odp_timer_current_tick(tmo->odp_timer_pool));
1711  }
1712  break; /* ok */
1713  }
1714 
1715  /* ODP_TIMER_TOO_NEAR: ack() delayed beyond next time slot */
1716  if (EM_TIMER_TMO_STATS)
1717  tmo->stats.num_late_ack++;
1718  TMR_DBG_PRINT("late, tgt/now %lu/%lu\n", tmo->last_tick,
1719  odp_timer_current_tick(tmo->odp_timer_pool));
1720 
1721  if (tmo->flags & EM_TMO_FLAG_NOSKIP) /* not allowed to skip, send immediately */
1722  return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue);
1723 
1724  /* skip already passed periods and try again */
1725  handle_ack_skip(tmo);
1726 
1727  tries--;
1728  if (unlikely(tries < 1)) {
1730  EM_ESCOPE_TMO_ACK,
1731  "Tmo ACK: too many retries:%u",
1732  EM_TIMER_ACK_TRIES);
1733  goto ack_err;
1734  }
1735  } while (ret != ODP_TIMER_SUCCESS);
1736 
1737  if (unlikely(ret != ODP_TIMER_SUCCESS)) {
1738  err = INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK,
1739  "Tmo ACK: failed to renew tmo (odp ret %d)",
1740  ret);
1741  goto ack_err;
1742  }
1743  return EM_OK;
1744 
1745 ack_err:
1746  /* fail, restore event state */
1747  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1748  ev_hdr->tmo = EM_TMO_UNDEF;
1749  if (esv_ena)
1750  evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__FAIL);
1751  return err;
1752 }
1753 
1754 int em_timer_get_all(em_timer_t *tmr_list, int max)
1755 {
1756  odp_ticketlock_lock(&em_shm->timers.timer_lock);
1757 
1758  const uint32_t num_timers = em_shm->timers.num_timers;
1759 
1760  if (tmr_list && max > 0 && num_timers > 0) {
1761  int num = 0;
1762 
1763  for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) {
1764  if (em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) {
1765  tmr_list[num] = TMR_I2H(i);
1766  num++;
1767  if (num >= max)
1768  break;
1769  }
1770  }
1771  }
1772 
1773  odp_ticketlock_unlock(&em_shm->timers.timer_lock);
1774 
1775  return num_timers;
1776 }
1777 
1779 {
1780  odp_timer_pool_info_t poolinfo;
1781  int i = TMR_H2I(tmr);
1782  int ret;
1784 
1785  if (EM_CHECK_LEVEL > 0)
1786  RETURN_ERROR_IF(!is_timer_valid(tmr) || tmr_attr == NULL,
1787  EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_ATTR,
1788  "Inv.args: timer:%" PRI_TMR " tmr_attr:%p",
1789  tmr, tmr_attr);
1790 
1791  /* get current values from ODP */
1792  ret = odp_timer_pool_info(em_shm->timers.timer[i].odp_tmr_pool, &poolinfo);
1793  RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_GET_ATTR,
1794  "ODP timer pool info failed");
1795 
1796  timer_clksrc_odp2em(poolinfo.param.clk_src, &clk);
1797 
1798  if (poolinfo.param.timer_type == ODP_TIMER_TYPE_SINGLE) {
1799  tmr_attr->resparam.res_ns = poolinfo.param.res_ns;
1800  tmr_attr->resparam.res_hz = poolinfo.param.res_hz;
1801  tmr_attr->resparam.max_tmo = poolinfo.param.max_tmo;
1802  tmr_attr->resparam.min_tmo = poolinfo.param.min_tmo;
1803  tmr_attr->resparam.clk_src = clk;
1804  memset(&tmr_attr->ringparam, 0, sizeof(em_timer_ring_param_t));
1805  } else {
1806  tmr_attr->ringparam.base_hz.integer = poolinfo.param.periodic.base_freq_hz.integer;
1807  tmr_attr->ringparam.base_hz.numer = poolinfo.param.periodic.base_freq_hz.numer;
1808  tmr_attr->ringparam.base_hz.denom = poolinfo.param.periodic.base_freq_hz.denom;
1809  tmr_attr->ringparam.max_mul = poolinfo.param.periodic.max_multiplier;
1810  tmr_attr->ringparam.res_ns = poolinfo.param.res_ns;
1811  tmr_attr->ringparam.clk_src = clk;
1812  memset(&tmr_attr->resparam, 0, sizeof(em_timer_res_param_t));
1813  }
1814 
1815  tmr_attr->num_tmo = poolinfo.param.num_timers;
1816  tmr_attr->flags = em_shm->timers.timer[i].flags;
1817 
1818  strncpy(tmr_attr->name, poolinfo.name, EM_TIMER_NAME_LEN - 1);
1819  tmr_attr->name[EM_TIMER_NAME_LEN - 1] = '\0';
1820  return EM_OK;
1821 }
1822 
1823 uint64_t em_timer_get_freq(em_timer_t tmr)
1824 {
1825  const timer_storage_t *const tmrs = &em_shm->timers;
1826 
1827  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) {
1828  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_FREQ,
1829  "Invalid timer:%" PRI_TMR "", tmr);
1830  return 0;
1831  }
1832 
1833  return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool,
1834  1000ULL * 1000ULL * 1000ULL); /* 1 sec */
1835 }
1836 
1837 uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks)
1838 {
1839  const timer_storage_t *const tmrs = &em_shm->timers;
1840 
1841  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) {
1842  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_TICK_TO_NS,
1843  "Invalid timer:%" PRI_TMR "", tmr);
1844  return 0;
1845  }
1846 
1847  return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ticks);
1848 }
1849 
1850 em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns)
1851 {
1852  const timer_storage_t *const tmrs = &em_shm->timers;
1853 
1854  if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) {
1855  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_NS_TO_TICK,
1856  "Invalid timer:%" PRI_TMR "", tmr);
1857  return 0;
1858  }
1859 
1860  return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns);
1861 }
1862 
1864 {
1865  if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) {
1866  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo");
1867  return EM_TMO_STATE_UNKNOWN;
1868  }
1869  if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) {
1870  INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo buffer");
1871  return EM_TMO_STATE_UNKNOWN;
1872  }
1873 
1874  return odp_atomic_load_acq_u32(&tmo->state);
1875 }
1876 
1878 {
1880  EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATS,
1881  "Invalid tmo");
1882  /* check that tmo buf is valid before accessing other struct members */
1883  RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer),
1884  EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATS,
1885  "Invalid tmo buffer");
1886  RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo->odp_timer == ODP_TIMER_INVALID,
1887  EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATS,
1888  "tmo deleted?");
1889 
1890  if (EM_TIMER_TMO_STATS) {
1891  if (stat)
1892  *stat = tmo->stats;
1893  } else {
1894  return EM_ERR_NOT_IMPLEMENTED;
1895  }
1896 
1897  return EM_OK;
1898 }
1899 
1900 em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset)
1901 {
1902  if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) {
1903  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid event given");
1904  return EM_TMO_TYPE_NONE;
1905  }
1906 
1907  event_hdr_t *ev_hdr = event_to_hdr(event);
1908  em_tmo_type_t type = (em_tmo_type_t)ev_hdr->flags.tmo_type;
1909 
1910  if (EM_CHECK_LEVEL > 1 && unlikely(!can_have_tmo_type(event))) {
1911  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE,
1912  "Invalid event type");
1913  return EM_TMO_TYPE_NONE;
1914  }
1915 
1916  if (EM_CHECK_LEVEL > 2 && unlikely(type > EM_TMO_TYPE_PERIODIC)) {
1917  INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATE,
1918  "Invalid tmo event type, header corrupted?");
1919  return EM_TMO_TYPE_NONE;
1920  }
1921 
1922  if (tmo)
1923  *tmo = (type == EM_TMO_TYPE_NONE) ? EM_TMO_UNDEF : ev_hdr->tmo;
1924 
1925  if (reset && ev_hdr->event_type != EM_EVENT_TYPE_TIMER_IND) {
1926  ev_hdr->flags.tmo_type = EM_TMO_TYPE_NONE;
1927  ev_hdr->tmo = EM_TMO_UNDEF;
1928  }
1929 
1930  return type;
1931 }
1932 
1933 void *em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo)
1934 {
1935  if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) {
1936  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_USERPTR, "Invalid event given");
1937  return NULL;
1938  }
1939 
1940  odp_event_t odp_event = event_em2odp(event);
1941  odp_event_type_t evtype = odp_event_type(odp_event);
1942 
1943  if (unlikely(evtype != ODP_EVENT_TIMEOUT)) /* no errorhandler for other events */
1944  return NULL;
1945 
1946  event_hdr_t *ev_hdr = event_to_hdr(event); /* will not return on error */
1947 
1948  if (tmo) /* always periodic timeout here */
1949  *tmo = ev_hdr->tmo;
1950 
1951  return odp_timeout_user_ptr(odp_timeout_from_event(odp_event));
1952 }
1953 
1955 {
1956  if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) {
1957  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Invalid tmo given");
1958  return EM_TIMER_UNDEF;
1959  }
1960  if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) {
1961  INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_TIMER, "Corrupted tmo?");
1962  return EM_TIMER_UNDEF;
1963  }
1964 
1965  return tmo->timer;
1966 }
1967 
1968 uint64_t em_timer_to_u64(em_timer_t timer)
1969 {
1970  return (uint64_t)timer;
1971 }
1972 
1974 {
1975  return (uint64_t)tmo;
1976 }
EM_TMO_TYPE_NONE
@ EM_TMO_TYPE_NONE
Definition: api/event_machine_timer.h:251
em_timer_ring_capability
em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring)
Check periodic ring timer capability.
Definition: event_machine_timer.c:577
em_tmo_set_abs
em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, em_event_t tmo_ev)
Definition: event_machine_timer.c:1255
em_timer_current_tick
em_timer_tick_t em_timer_current_tick(em_timer_t tmr)
Definition: event_machine_timer.c:1045
em_timer_capability_t::max_timers
uint32_t max_timers
Definition: api/event_machine_timer.h:382
em_tmo_stats_t
Definition: api/event_machine_timer.h:368
EM_OK
#define EM_OK
Definition: event_machine_types.h:329
EM_TMO_FLAG_PERIODIC
@ EM_TMO_FLAG_PERIODIC
Definition: event_machine_timer_hw_specific.h:82
em_timer_attr_t::resparam
em_timer_res_param_t resparam
Definition: api/event_machine_timer.h:339
em_tmo_create_arg
em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue, em_tmo_args_t *args)
Definition: event_machine_timer.c:1061
event_hdr::event_size
uint32_t event_size
Definition: em_event_types.h:255
EM_EVENT_TYPE_SW
@ EM_EVENT_TYPE_SW
Definition: event_machine_hw_types.h:72
em_tmo_set_rel
em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, em_event_t tmo_ev)
Definition: event_machine_timer.c:1330
em_timer_capability_t::max_num_tmo
uint32_t max_num_tmo
Definition: api/event_machine_timer.h:384
EM_ERR_TOOFAR
@ EM_ERR_TOOFAR
Definition: event_machine_hw_types.h:307
em_tmo_delete
em_status_t em_tmo_delete(em_tmo_t tmo, em_event_t *cur_event)
Definition: event_machine_timer.c:1161
em_tmo_flag_t
em_tmo_flag_t
Definition: event_machine_timer_hw_specific.h:80
EM_ERR_NOT_INITIALIZED
@ EM_ERR_NOT_INITIALIZED
Definition: event_machine_hw_types.h:280
EM_TIMER_FLAG_RING
@ EM_TIMER_FLAG_RING
Definition: event_machine_timer_hw_specific.h:63
em_timer_tick_to_ns
uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks)
Definition: event_machine_timer.c:1837
event_hdr_t
struct event_hdr event_hdr_t
em_tmo_get_state
em_tmo_state_t em_tmo_get_state(em_tmo_t tmo)
Definition: event_machine_timer.c:1863
EM_TMO_FLAG_NOSKIP
@ EM_TMO_FLAG_NOSKIP
Definition: event_machine_timer_hw_specific.h:83
PRI_TMO
#define PRI_TMO
Definition: event_machine_timer_hw_specific.h:72
em_timer_capability_t::max_res
em_timer_res_param_t max_res
Definition: api/event_machine_timer.h:386
em_timer_attr_t::ringparam
em_timer_ring_param_t ringparam
Definition: api/event_machine_timer.h:351
em_timer_ring_param_t
Definition: api/event_machine_timer.h:306
em_tmo_stats_t::num_period_skips
uint64_t num_period_skips
Definition: api/event_machine_timer.h:374
EM_EVENT_UNDEF
#define EM_EVENT_UNDEF
Definition: event_machine_types.h:62
em_tmo_ack
em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev)
Definition: event_machine_timer.c:1638
EM_TMO_TYPE_ONESHOT
@ EM_TMO_TYPE_ONESHOT
Definition: api/event_machine_timer.h:252
PRI_TMR
#define PRI_TMR
Definition: event_machine_timer_hw_specific.h:49
em_timer_capability_t
Definition: api/event_machine_timer.h:380
EM_QUEUE_TYPE_PARALLEL
@ EM_QUEUE_TYPE_PARALLEL
Definition: event_machine_hw_types.h:117
em_timer_tick_t
uint64_t em_timer_tick_t
Definition: api/event_machine_timer.h:261
em_timer_capability
em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src)
Definition: event_machine_timer.c:502
em_timer_get_attr
em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr)
Definition: event_machine_timer.c:1778
event_hdr::event_type
em_event_type_t event_type
Definition: em_event_types.h:241
queue_elem_t::type
uint8_t type
Definition: em_queue_types.h:216
em_timer_delete
em_status_t em_timer_delete(em_timer_t tmr)
Definition: event_machine_timer.c:980
evstate_usr2em_revert
void evstate_usr2em_revert(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
Definition: em_event_state.c:999
em_timer_attr_t::name
char name[EM_TIMER_NAME_LEN]
Definition: api/event_machine_timer.h:345
em_timer_ring_create
em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr)
Definition: event_machine_timer.c:770
em_timer_attr_t::num_tmo
uint32_t num_tmo
Definition: api/event_machine_timer.h:341
em_send
em_status_t em_send(em_event_t event, em_queue_t queue)
Definition: event_machine_event.c:661
EM_ERR_ALLOC_FAILED
@ EM_ERR_ALLOC_FAILED
Definition: event_machine_hw_types.h:287
EM_ERR_LIB_FAILED
@ EM_ERR_LIB_FAILED
Definition: event_machine_hw_types.h:291
timer_storage_t
Definition: em_timer_types.h:50
em_timer_capability_t::max_base_hz
em_fract_u64_t max_base_hz
Definition: api/event_machine_timer.h:399
EM_ERR_NOT_SUPPORTED
@ EM_ERR_NOT_SUPPORTED
Definition: event_machine_hw_types.h:284
em_timer_ring_param_t::base_hz
em_fract_u64_t base_hz
Definition: api/event_machine_timer.h:310
EM_QUEUE_TYPE_ATOMIC
@ EM_QUEUE_TYPE_ATOMIC
Definition: event_machine_hw_types.h:112
evstate_free
void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
Definition: em_event_state.c:914
EM_EVENT_TYPE_TIMER
@ EM_EVENT_TYPE_TIMER
Definition: event_machine_hw_types.h:78
em_tmo_stats_t::num_acks
uint64_t num_acks
Definition: api/event_machine_timer.h:370
em_timer_res_param_t::max_tmo
uint64_t max_tmo
Definition: api/event_machine_timer.h:299
EM_ERR_TOONEAR
@ EM_ERR_TOONEAR
Definition: event_machine_hw_types.h:305
em_tmo_create
em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue)
Definition: event_machine_timer.c:1056
event_hdr::flags
union event_hdr::@34 flags
PRI_EVENT
#define PRI_EVENT
Definition: event_machine_types.h:64
em_timer_capability_t::ring
struct em_timer_capability_t::@2 ring
em_timer_timeout_t
Definition: em_timer_types.h:76
em_core_count
int em_core_count(void)
Definition: event_machine_core.c:40
em_tmo_get_userptr
void * em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo)
Definition: event_machine_timer.c:1933
event_hdr::event
em_event_t event
Definition: em_event_types.h:246
em_fract_u64_t::denom
uint64_t denom
Definition: api/event_machine_timer.h:275
EM_QUEUE_TYPE_UNSCHEDULED
@ EM_QUEUE_TYPE_UNSCHEDULED
Definition: event_machine_hw_types.h:127
em_tmo_args_t::userptr
void * userptr
Definition: api/event_machine_timer.h:410
event_hdr
Definition: em_event_types.h:184
em_timer_ring_param_t::clk_src
em_timer_clksrc_t clk_src
Definition: api/event_machine_timer.h:308
em_timer_capability_t::max_rings
uint32_t max_rings
Definition: api/event_machine_timer.h:393
EM_ERR_BAD_CONTEXT
@ EM_ERR_BAD_CONTEXT
Definition: event_machine_hw_types.h:269
em_timer_res_capability
em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src)
Definition: event_machine_timer.c:544
queue_elem_t::odp_queue
odp_queue_t odp_queue
Definition: em_queue_types.h:228
em_timer_attr_t
Definition: api/event_machine_timer.h:335
em_event_get_type
em_event_type_t em_event_get_type(em_event_t event)
Definition: event_machine_event.c:968
RETURN_ERROR_IF
#define RETURN_ERROR_IF(cond, error, escope, fmt,...)
Definition: em_error.h:50
em_timer_res_param_t::min_tmo
uint64_t min_tmo
Definition: api/event_machine_timer.h:297
em_timer_clksrc_t
em_timer_clksrc_t
Definition: event_machine_timer_hw_specific.h:95
EM_ERR_BAD_ID
@ EM_ERR_BAD_ID
Definition: event_machine_hw_types.h:265
INTERNAL_ERROR
#define INTERNAL_ERROR(error, escope, fmt,...)
Definition: em_error.h:43
EM_TMO_STATE_IDLE
@ EM_TMO_STATE_IDLE
Definition: api/event_machine_timer.h:242
EM_TIMER_CLKSRC_DEFAULT
#define EM_TIMER_CLKSRC_DEFAULT
Definition: event_machine_timer_hw_specific.h:106
EM_TMO_TYPE_PERIODIC
@ EM_TMO_TYPE_PERIODIC
Definition: api/event_machine_timer.h:253
em_timer_attr_init
void em_timer_attr_init(em_timer_attr_t *tmr_attr)
Definition: event_machine_timer.c:378
em_tmo_stats_t::num_late_ack
uint64_t num_late_ack
Definition: api/event_machine_timer.h:372
EM_TIMER_FLAG_NONE
@ EM_TIMER_FLAG_NONE
Definition: event_machine_timer_hw_specific.h:59
event_hdr::user_area
ev_hdr_user_area_t user_area
Definition: em_event_types.h:277
evstate_alloc_tmo
em_event_t evstate_alloc_tmo(const em_event_t event, event_hdr_t *const ev_hdr)
Definition: em_event_state.c:750
em_timer_capability_t::max_tmo
em_timer_res_param_t max_tmo
Definition: api/event_machine_timer.h:388
em_status_t
uint32_t em_status_t
Definition: event_machine_types.h:321
EM_ERR_CANCELED
@ EM_ERR_CANCELED
Definition: event_machine_hw_types.h:309
em_tmo_set_periodic
em_status_t em_tmo_set_periodic(em_tmo_t tmo, em_timer_tick_t start_abs, em_timer_tick_t period, em_event_t tmo_ev)
Definition: event_machine_timer.c:1402
PRI_QUEUE
#define PRI_QUEUE
Definition: event_machine_types.h:109
em_tmo_get_type
em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset)
Definition: event_machine_timer.c:1900
EM_CHECK_LEVEL
#define EM_CHECK_LEVEL
Definition: event_machine_config.h:253
EM_ERR_OPERATION_FAILED
@ EM_ERR_OPERATION_FAILED
Definition: event_machine_hw_types.h:289
em_fract_u64_t::numer
uint64_t numer
Definition: api/event_machine_timer.h:272
event_hdr::egrp
em_event_group_t egrp
Definition: em_event_types.h:265
evstate_em2usr
em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
Definition: em_event_state.c:952
EM_TMO_FLAG_ONESHOT
@ EM_TMO_FLAG_ONESHOT
Definition: event_machine_timer_hw_specific.h:81
em_event_type_t
uint32_t em_event_type_t
Definition: event_machine_types.h:85
em_shm
em_shm_t * em_shm
Definition: event_machine_init.c:41
EM_TMO_UNDEF
#define EM_TMO_UNDEF
Definition: event_machine_timer_hw_specific.h:125
event_timer_t
Definition: em_timer_types.h:38
EM_EVENT_TYPE_TIMER_IND
@ EM_EVENT_TYPE_TIMER_IND
Definition: event_machine_hw_types.h:91
em_include.h
EM_TIMER_UNDEF
#define EM_TIMER_UNDEF
Definition: event_machine_timer_hw_specific.h:120
EM_QUEUE_TYPE_PARALLEL_ORDERED
@ EM_QUEUE_TYPE_PARALLEL_ORDERED
Definition: event_machine_hw_types.h:122
em_timer_ring_param_t::max_mul
uint64_t max_mul
Definition: api/event_machine_timer.h:312
em_tmo_state_t
em_tmo_state_t
Definition: api/event_machine_timer.h:240
em_timer_res_param_t::res_ns
uint64_t res_ns
Definition: api/event_machine_timer.h:293
EM_CHECK_INIT_CALLED
#define EM_CHECK_INIT_CALLED
Definition: em_include.h:69
EM_EVENT_TYPE_PACKET
@ EM_EVENT_TYPE_PACKET
Definition: event_machine_hw_types.h:75
event_hdr::tmo_type
uint8_t tmo_type
Definition: em_event_types.h:225
EM_TIMER_FLAG_PRIVATE
@ EM_TIMER_FLAG_PRIVATE
Definition: event_machine_timer_hw_specific.h:61
em_tmo_get_timer
em_timer_t em_tmo_get_timer(em_tmo_t tmo)
Definition: event_machine_timer.c:1954
EM_ERR_BAD_POINTER
@ EM_ERR_BAD_POINTER
Definition: event_machine_hw_types.h:271
em_fract_u64_t::integer
uint64_t integer
Definition: api/event_machine_timer.h:269
em_timer_ring_param_t::res_ns
uint64_t res_ns
Definition: api/event_machine_timer.h:314
em_timer_get_all
int em_timer_get_all(em_timer_t *tmr_list, int max)
Definition: event_machine_timer.c:1754
EM_ERR_BAD_ARG
@ EM_ERR_BAD_ARG
Definition: event_machine_hw_types.h:261
EM_ERR_NOT_IMPLEMENTED
@ EM_ERR_NOT_IMPLEMENTED
Definition: event_machine_hw_types.h:282
em_timer_ring_attr_init
em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, em_timer_clksrc_t clk_src, uint64_t base_hz, uint64_t max_mul, uint64_t res_ns)
Definition: event_machine_timer.c:444
em_timer_to_u64
uint64_t em_timer_to_u64(em_timer_t timer)
Definition: event_machine_timer.c:1968
em_timer_get_freq
uint64_t em_timer_get_freq(em_timer_t tmr)
Definition: event_machine_timer.c:1823
evstate_usr2em
void evstate_usr2em(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
Definition: em_event_state.c:990
em_timer_res_param_t
Definition: api/event_machine_timer.h:289
em_timer_ns_to_tick
em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns)
Definition: event_machine_timer.c:1850
event_hdr::tmo
em_tmo_t tmo
Definition: em_event_types.h:272
EM_ERR_BAD_STATE
@ EM_ERR_BAD_STATE
Definition: event_machine_hw_types.h:263
em_tmo_set_periodic_ring
em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, em_timer_tick_t start_abs, uint64_t multiplier, em_event_t tmo_ev)
Definition: event_machine_timer.c:1487
em_tmo_get_stats
em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat)
Definition: event_machine_timer.c:1877
EM_EVENT_GROUP_UNDEF
#define EM_EVENT_GROUP_UNDEF
Definition: event_machine_types.h:141
em_timer_capability_t::min_base_hz
em_fract_u64_t min_base_hz
Definition: api/event_machine_timer.h:397
em_timer_create
em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr)
Definition: event_machine_timer.c:616
em_tmo_args_t
Definition: api/event_machine_timer.h:408
queue_elem_t
Definition: em_queue_types.h:180
em_timer_attr_t::__internal_check
uint32_t __internal_check
Definition: api/event_machine_timer.h:359
em_tmo_type_t
em_tmo_type_t
Definition: api/event_machine_timer.h:250
em_timer_res_param_t::res_hz
uint64_t res_hz
Definition: api/event_machine_timer.h:295
ev_hdr_user_area_t::isinit
uint32_t isinit
Definition: em_event_types.h:168
em_timer_res_param_t::clk_src
em_timer_clksrc_t clk_src
Definition: api/event_machine_timer.h:291
em_tmo_to_u64
uint64_t em_tmo_to_u64(em_tmo_t tmo)
Definition: event_machine_timer.c:1973
em_tmo_cancel
em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event)
Definition: event_machine_timer.c:1563
em_timer_attr_t::flags
em_timer_flag_t flags
Definition: api/event_machine_timer.h:343
EM_TMO_STATE_ACTIVE
@ EM_TMO_STATE_ACTIVE
Definition: api/event_machine_timer.h:243