54 #define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1))
55 #define TMR_H2I(x) ((int)((uintptr_t)(x) - 1))
57 static inline em_status_t timer_rv_odp2em(
int odpret)
60 case ODP_TIMER_SUCCESS:
62 case ODP_TIMER_TOO_NEAR:
64 case ODP_TIMER_TOO_FAR:
73 static inline int is_queue_valid_type(em_timer_t tmr,
const queue_elem_t *q_elem)
75 unsigned int tmridx = (
unsigned int)TMR_H2I(tmr);
88 static inline bool is_event_type_valid(em_event_t event)
102 static inline bool can_have_tmo_type(em_event_t event)
115 static inline int is_timer_valid(em_timer_t tmr)
123 i = (
unsigned int)TMR_H2I(tmr);
124 if (unlikely(i >= EM_ODP_MAX_TIMERS))
127 if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID ||
128 tmrs->timer[i].tmo_pool == ODP_POOL_INVALID))
144 "Invalid event type:%u, expected timer-ring:%u",
149 "Wrong event returned? tmo %p->%p", tmo, ev_hdr->
tmo);
151 int ret = odp_timer_periodic_ack(tmo->odp_timer, odp_ev);
153 if (unlikely(ret < 0)) {
156 "Tmo ACK: ring timer odp ack fail, rv %d", ret);
159 if (unlikely(ret == 2)) {
162 atomic_thread_fence(memory_order_release);
163 TMR_DBG_PRINT(
"last periodic event %p\n", odp_ev);
176 if (timer->tmo_pool != ODP_POOL_INVALID &&
177 timer->tmo_pool !=
em_shm->timers.shared_tmo_pool)
178 odp_pool_destroy(timer->tmo_pool);
179 if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID)
180 odp_timer_pool_destroy(timer->odp_tmr_pool);
181 timer->tmo_pool = ODP_POOL_INVALID;
182 timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID;
183 TMR_DBG_PRINT(
"cleaned up failed timer create\n");
186 static odp_pool_t create_tmo_handle_pool(uint32_t num_buf, uint32_t cache,
const event_timer_t *tmr)
188 odp_pool_param_t odp_pool_param;
190 char tmo_pool_name[ODP_POOL_NAME_LEN];
192 odp_pool_param_init(&odp_pool_param);
193 odp_pool_param.type = ODP_POOL_BUFFER;
195 odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE;
196 odp_pool_param.buf.cache_size = cache;
197 odp_pool_param.stats.all = 0;
198 TMR_DBG_PRINT(
"tmo handle pool cache %d\n", odp_pool_param.buf.cache_size);
201 uint32_t num = num_buf + ((
em_core_count() - 1) * odp_pool_param.buf.cache_size);
203 if (num_buf != num) {
204 TMR_DBG_PRINT(
"Adjusted pool size %d->%d due to local caching (%d)\n",
205 num_buf, num, odp_pool_param.buf.cache_size);
207 odp_pool_param.buf.num = num;
208 snprintf(tmo_pool_name, ODP_POOL_NAME_LEN,
"Tmo-pool-%d", tmr->idx);
209 pool = odp_pool_create(tmo_pool_name, &odp_pool_param);
210 if (pool != ODP_POOL_INVALID) {
211 TMR_DBG_PRINT(
"Created ODP-pool: %s for %d timeouts\n",
212 tmo_pool_name, odp_pool_param.buf.num);
217 static inline odp_event_t alloc_odp_timeout(
em_tmo_t tmo)
219 odp_timeout_t odp_tmo = odp_timeout_alloc(tmo->ring_tmo_pool);
221 if (unlikely(odp_tmo == ODP_TIMEOUT_INVALID))
222 return ODP_EVENT_INVALID;
225 event_hdr_t *
const ev_hdr = odp_timeout_user_area(odp_tmo);
226 odp_event_t odp_event = odp_timeout_to_event(odp_tmo);
227 em_event_t
event = event_odp2em(odp_event);
229 if (unlikely(!ev_hdr)) {
230 odp_timeout_free(odp_tmo);
231 return ODP_EVENT_INVALID;
236 ev_hdr->
flags.all = 0;
248 static inline void free_odp_timeout(odp_event_t odp_event)
251 em_event_t
event = event_odp2em(odp_event);
254 event = ev_hdr->
event;
258 odp_event_free(odp_event);
261 static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev,
270 if (unlikely(err !=
EM_OK)) {
271 err =
INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK,
"Tmo ACK: noskip em_send fail");
279 static inline void handle_ack_skip(
em_tmo_t tmo)
281 uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool);
284 if (odpt > tmo->last_tick)
285 skips = ((odpt - tmo->last_tick) / tmo->period) + 1;
289 tmo->last_tick += skips * tmo->period;
290 TMR_DBG_PRINT(
"%lu skips * %lu ticks => new tgt %lu\n",
291 skips, tmo->period, tmo->last_tick);
292 if (EM_TIMER_TMO_STATS)
308 if (unlikely(flags & inv_flags))
316 if (unlikely(tmr_attr == NULL)) {
323 "Not initialized: em_timer_attr_init(tmr_attr) not called");
328 "Only res_ns OR res_hz allowed");
334 static inline bool check_timer_attr_ring(
const em_timer_attr_t *ring_attr)
336 if (unlikely(ring_attr == NULL)) {
343 "Not initialized: em_timer_ring_attr_init(ring_attr) not called");
352 "Invalid attr values for ring timer");
359 static inline int find_free_timer_index(
void)
369 for (i = 0; i < EM_ODP_MAX_TIMERS; i++) {
372 if (timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)
392 odp_timer_clk_src_t odp_clksrc;
393 odp_timer_capability_t odp_capa;
394 odp_timer_res_capability_t odp_res_capa;
400 "Unsupported EM-timer clock source:%d",
404 err = odp_timer_capability(odp_clksrc, &odp_capa);
407 "Timer capability: ret %d, odp-clksrc:%d",
412 TMR_DBG_PRINT(
"odp says highest res %lu\n", odp_capa.highest_res_ns);
413 if (unlikely(odp_capa.highest_res_ns > tmr_attr->
resparam.
res_ns)) {
415 "Timer capability: maxres %lu req %lu, odp-clksrc:%d!",
420 memset(&odp_res_capa, 0,
sizeof(odp_timer_res_capability_t));
422 err = odp_timer_res_capability(odp_clksrc, &odp_res_capa);
425 "Timer res capability failed: ret %d, odp-clksrc:%d, res %lu",
430 TMR_DBG_PRINT(
"res %lu -> ODP says min %lu, max %lu\n",
432 odp_res_capa.max_tmo);
434 tmr_attr->
num_tmo = EM_ODP_DEFAULT_TMOS;
435 if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS)
436 tmr_attr->
num_tmo = odp_capa.max_timers;
440 tmr_attr->
name[0] = 0;
460 ring_attr->
num_tmo = EM_ODP_DEFAULT_RING_TMOS;
462 ring_attr->
name[0] = 0;
464 odp_timer_clk_src_t odp_clksrc;
465 odp_timer_capability_t capa;
470 if (unlikely(odp_timer_capability(odp_clksrc, &capa) != 0)) {
471 TMR_DBG_PRINT(
"odp_timer_capability returned error for clk_src %u\n", odp_clksrc);
475 if (capa.periodic.max_pools == 0)
478 if (capa.periodic.max_timers < ring_attr->
num_tmo)
479 ring_attr->
num_tmo = capa.periodic.max_timers;
481 odp_timer_periodic_capability_t pcapa;
488 rv = odp_timer_periodic_capability(odp_clksrc, &pcapa);
505 EM_LOG(EM_LOG_DBG,
"%s(): NULL capa ptr!\n", __func__);
509 odp_timer_clk_src_t odp_clksrc;
510 odp_timer_capability_t odp_capa;
512 if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) ||
513 odp_timer_capability(odp_clksrc, &odp_capa))) {
514 EM_LOG(EM_LOG_DBG,
"%s: Not supported clk_src %d\n", __func__, clk_src);
518 capa->
max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ?
519 odp_capa.max_pools : EM_ODP_MAX_TIMERS;
547 EM_LOG(EM_LOG_DBG,
"%s: NULL ptr res\n", __func__);
551 odp_timer_clk_src_t odp_clksrc;
552 odp_timer_res_capability_t odp_res_capa;
555 err = timer_clksrc_em2odp(clk_src, &odp_clksrc);
557 EM_LOG(EM_LOG_DBG,
"%s: Not supported clk_src %d\n", __func__, clk_src);
560 memset(&odp_res_capa, 0,
sizeof(odp_timer_res_capability_t));
561 odp_res_capa.res_ns = res->
res_ns;
562 odp_res_capa.res_hz = res->
res_hz;
563 odp_res_capa.max_tmo = res->
max_tmo;
564 err = odp_timer_res_capability(odp_clksrc, &odp_res_capa);
566 EM_LOG(EM_LOG_DBG,
"%s: ODP res_capability failed (ret %d)!\n", __func__, err);
569 res->
min_tmo = odp_res_capa.min_tmo;
570 res->
max_tmo = odp_res_capa.max_tmo;
571 res->
res_ns = odp_res_capa.res_ns;
572 res->
res_hz = odp_res_capa.res_hz;
579 odp_timer_clk_src_t odp_clksrc;
580 odp_timer_periodic_capability_t pcapa;
583 EM_LOG(EM_LOG_DBG,
"%s: NULL ptr ring\n", __func__);
587 if (unlikely(timer_clksrc_em2odp(ring->
clk_src, &odp_clksrc))) {
588 EM_LOG(EM_LOG_DBG,
"%s: Invalid clk_src %d\n", __func__, ring->
clk_src);
595 pcapa.max_multiplier = ring->
max_mul;
596 pcapa.res_ns = ring->
res_ns;
597 int rv = odp_timer_periodic_capability(odp_clksrc, &pcapa);
602 ring->
max_mul = pcapa.max_multiplier;
603 ring->
res_ns = pcapa.res_ns;
605 if (unlikely(rv < 0)) {
606 EM_LOG(EM_LOG_DBG,
"%s: odp failed periodic capability for clk_src %d\n",
621 "Timer is not initialized!");
626 if (check_timer_attr(tmr_attr) ==
false)
630 odp_timer_pool_param_t odp_tpool_param;
631 odp_timer_clk_src_t odp_clksrc;
633 odp_timer_pool_param_init(&odp_tpool_param);
638 odp_tpool_param.num_timers = tmr_attr->
num_tmo;
640 if (unlikely(timer_clksrc_em2odp(tmr_attr->
resparam.
clk_src, &odp_clksrc))) {
642 "Unsupported EM-timer clock source:%d",
646 odp_tpool_param.clk_src = odp_clksrc;
649 odp_timer_capability_t capa;
651 if (unlikely(odp_timer_capability(odp_clksrc, &capa))) {
653 "ODP timer capa failed for clk:%d",
657 if (unlikely(!capa.queue_type_sched)) {
659 "ODP does not support scheduled q for clk:%d",
664 odp_ticketlock_lock(&
em_shm->timers.timer_lock);
666 int i = find_free_timer_index();
668 if (unlikely(i >= EM_ODP_MAX_TIMERS)) {
669 odp_ticketlock_unlock(&
em_shm->timers.timer_lock);
671 "No more timers available");
676 char timer_pool_name[ODP_TIMER_POOL_NAME_LEN];
677 const char *name = tmr_attr->
name;
678 const char *reason =
"";
680 if (tmr_attr->
name[0] ==
'\0') {
681 snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN,
682 "EM-timer-%d", timer->idx);
683 name = timer_pool_name;
686 TMR_DBG_PRINT(
"Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n",
687 odp_tpool_param.clk_src, odp_tpool_param.res_ns,
688 odp_tpool_param.res_hz);
689 timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param);
690 if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) {
691 reason =
"odp_timer_pool_create error";
694 TMR_DBG_PRINT(
"Created timer: %s with idx: %d\n", name, timer->idx);
697 if (!
em_shm->opt.timer.shared_tmo_pool_enable) {
698 odp_pool_t opool = create_tmo_handle_pool(tmr_attr->
num_tmo,
699 em_shm->opt.timer.tmo_pool_cache, timer);
701 if (unlikely(opool == ODP_POOL_INVALID)) {
702 reason =
"Tmo handle buffer pool create failed";
706 timer->tmo_pool = opool;
707 TMR_DBG_PRINT(
"Created per-timer tmo handle pool\n");
709 if (
em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) {
711 create_tmo_handle_pool(
em_shm->opt.timer.shared_tmo_pool_size,
712 em_shm->opt.timer.tmo_pool_cache, timer);
714 if (unlikely(opool == ODP_POOL_INVALID)) {
715 reason =
"Shared tmo handle buffer pool create failed";
718 timer->tmo_pool = opool;
719 em_shm->timers.shared_tmo_pool = opool;
720 TMR_DBG_PRINT(
"Created shared tmo handle pool for total %u tmos\n",
721 em_shm->opt.timer.shared_tmo_pool_size);
723 timer->tmo_pool =
em_shm->timers.shared_tmo_pool;
727 timer->num_tmo_reserve = tmr_attr->
num_tmo;
728 if (
em_shm->opt.timer.shared_tmo_pool_enable) {
729 uint32_t left =
em_shm->opt.timer.shared_tmo_pool_size -
em_shm->timers.reserved;
731 if (timer->num_tmo_reserve > left) {
732 TMR_DBG_PRINT(
"Not enough tmos left in shared pool (%u)\n", left);
733 reason =
"Not enough tmos left in shared pool";
736 em_shm->timers.reserved += timer->num_tmo_reserve;
737 TMR_DBG_PRINT(
"Updated shared tmo reserve by +%u to %u\n",
738 timer->num_tmo_reserve,
em_shm->timers.reserved);
740 timer->flags = tmr_attr->
flags;
741 timer->plain_q_ok = capa.queue_type_plain;
742 timer->is_ring =
false;
744 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
745 if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) {
746 reason =
"odp_timer_pool_start_multi failed";
750 odp_timer_pool_start();
752 em_shm->timers.num_timers++;
753 odp_ticketlock_unlock(&
em_shm->timers.timer_lock);
755 TMR_DBG_PRINT(
"ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i),
em_shm->timers.num_timers);
759 cleanup_timer_create_fail(timer);
760 odp_ticketlock_unlock(&
em_shm->timers.timer_lock);
762 TMR_DBG_PRINT(
"ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n",
763 odp_tpool_param.clk_src, odp_tpool_param.res_ns,
764 odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers);
766 "Timer pool create failed, reason: ", reason);
775 "Timer is disabled!");
779 if (
EM_CHECK_LEVEL > 0 && unlikely(check_timer_attr_ring(ring_attr) ==
false)) {
781 "NULL or incorrect attribute");
785 odp_timer_pool_param_t odp_tpool_param;
786 odp_timer_clk_src_t odp_clksrc;
788 odp_timer_pool_param_init(&odp_tpool_param);
789 odp_tpool_param.timer_type = ODP_TIMER_TYPE_PERIODIC;
790 odp_tpool_param.exp_mode = ODP_TIMER_EXP_AFTER;
791 odp_tpool_param.num_timers = ring_attr->
num_tmo;
793 if (unlikely(timer_clksrc_em2odp(ring_attr->
ringparam.
clk_src, &odp_clksrc))) {
795 "Unsupported EM-timer clock source:%d",
799 odp_tpool_param.clk_src = odp_clksrc;
804 odp_tpool_param.res_hz = 0;
808 odp_timer_capability_t capa;
810 if (unlikely(odp_timer_capability(odp_clksrc, &capa))) {
812 "ODP timer capa failed for clk:%d",
816 if (unlikely(!capa.queue_type_sched)) {
818 "ODP does not support scheduled q for clk:%d",
826 odp_ticketlock_lock(&tmrs->timer_lock);
829 uint32_t left =
em_shm->opt.timer.ring.timer_event_pool_size - tmrs->ring_reserved;
831 if (ring_attr->
num_tmo > left) {
832 odp_ticketlock_unlock(&tmrs->timer_lock);
834 "Too few ring timeout events left (req %u/%u)",
840 int i = find_free_timer_index();
842 if (unlikely(i >= EM_ODP_MAX_TIMERS)) {
843 odp_ticketlock_unlock(&tmrs->timer_lock);
845 "No more timers available");
852 char timer_pool_name[ODP_TIMER_POOL_NAME_LEN];
853 const char *name = ring_attr->
name;
854 const char *reason =
"";
856 if (ring_attr->
name[0] ==
'\0') {
857 snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN,
858 "EM-timer-%d", timer->idx);
859 name = timer_pool_name;
862 TMR_DBG_PRINT(
"Creating ODP periodic tmr pool: clk %d, res_ns %lu, base_hz %lu\n",
863 odp_tpool_param.clk_src, odp_tpool_param.res_ns,
864 odp_tpool_param.periodic.base_freq_hz.integer);
865 timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param);
866 if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) {
867 reason =
"odp_timer_pool_create failed";
870 TMR_DBG_PRINT(
"Created ring timer: %s with idx: %d\n", name, timer->idx);
873 if (!
em_shm->opt.timer.shared_tmo_pool_enable) {
874 odp_pool_t opool = create_tmo_handle_pool(ring_attr->
num_tmo,
875 em_shm->opt.timer.tmo_pool_cache, timer);
877 if (unlikely(opool == ODP_POOL_INVALID)) {
878 reason =
"tmo handle pool creation failed";
882 timer->tmo_pool = opool;
883 TMR_DBG_PRINT(
"Created per-timer tmo handle pool %p\n", opool);
885 if (
em_shm->timers.shared_tmo_pool == ODP_POOL_INVALID) {
887 create_tmo_handle_pool(
em_shm->opt.timer.shared_tmo_pool_size,
888 em_shm->opt.timer.tmo_pool_cache, timer);
890 if (unlikely(opool == ODP_POOL_INVALID)) {
891 reason =
"Shared tmo handle pool creation failed";
895 timer->tmo_pool = opool;
896 em_shm->timers.shared_tmo_pool = opool;
897 TMR_DBG_PRINT(
"Created shared tmo handle pool %p\n", opool);
899 timer->tmo_pool =
em_shm->timers.shared_tmo_pool;
903 timer->num_tmo_reserve = ring_attr->
num_tmo;
904 if (
em_shm->opt.timer.shared_tmo_pool_enable) {
905 left =
em_shm->opt.timer.shared_tmo_pool_size -
em_shm->timers.reserved;
907 if (timer->num_tmo_reserve > left) {
908 TMR_DBG_PRINT(
"Not enough tmos left in shared pool (%u)\n", left);
909 reason =
"Not enough tmos left in shared pool";
912 em_shm->timers.reserved += timer->num_tmo_reserve;
913 TMR_DBG_PRINT(
"Updated shared tmo reserve by +%u to %u\n",
914 timer->num_tmo_reserve,
em_shm->timers.reserved);
918 if (tmrs->ring_tmo_pool == ODP_POOL_INVALID) {
919 odp_pool_param_t odp_tmo_pool_param;
920 char pool_name[ODP_POOL_NAME_LEN];
922 odp_pool_param_init(&odp_tmo_pool_param);
923 odp_tmo_pool_param.type = ODP_POOL_TIMEOUT;
924 odp_tmo_pool_param.tmo.cache_size =
em_shm->opt.timer.ring.timer_event_pool_cache;
925 TMR_DBG_PRINT(
"ring tmo event pool cache %u\n", odp_tmo_pool_param.tmo.cache_size);
926 odp_tmo_pool_param.tmo.num =
em_shm->opt.timer.ring.timer_event_pool_size;
927 TMR_DBG_PRINT(
"ring tmo event pool size %u\n", odp_tmo_pool_param.tmo.num);
928 odp_tmo_pool_param.tmo.uarea_size =
sizeof(
event_hdr_t);
929 odp_tmo_pool_param.stats.all = 0;
930 snprintf(pool_name, ODP_POOL_NAME_LEN,
"Ring-%d-tmo-pool", timer->idx);
931 tmrs->ring_tmo_pool = odp_pool_create(pool_name, &odp_tmo_pool_param);
932 if (unlikely(tmrs->ring_tmo_pool == ODP_POOL_INVALID)) {
933 reason =
"odp timeout event pool creation failed";
936 TMR_DBG_PRINT(
"Created ODP-timeout event pool %p: '%s'\n",
937 tmrs->ring_tmo_pool, pool_name);
940 tmrs->ring_reserved += ring_attr->
num_tmo;
941 TMR_DBG_PRINT(
"Updated ring reserve by +%u to %u\n", ring_attr->
num_tmo,
942 tmrs->ring_reserved);
945 timer->num_ring_reserve = ring_attr->
num_tmo;
946 timer->flags = ring_attr->
flags;
947 timer->plain_q_ok = capa.queue_type_plain;
948 timer->is_ring =
true;
949 tmrs->num_ring_create_calls++;
951 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
952 if (odp_timer_pool_start_multi(&timer->odp_tmr_pool, 1) != 1) {
953 reason =
"odp_timer_pool_start_multi failed";
957 odp_timer_pool_start();
960 odp_ticketlock_unlock(&
em_shm->timers.timer_lock);
962 TMR_DBG_PRINT(
"ret %" PRI_TMR ", total timers %u\n", TMR_I2H(i), tmrs->num_timers);
966 cleanup_timer_create_fail(timer);
967 odp_ticketlock_unlock(&tmrs->timer_lock);
969 TMR_DBG_PRINT(
"ERR odp tmr ring pool in: clk %u, res %lu, base_hz %lu, max_mul %lu, num tmo %u\n",
976 "Ring timer create failed, reason: ", reason);
983 int i = TMR_H2I(tmr);
985 odp_pool_t pool_fail = ODP_POOL_INVALID;
988 odp_ticketlock_lock(&tmrs->timer_lock);
989 if (unlikely(!is_timer_valid(tmr))) {
990 odp_ticketlock_unlock(&tmrs->timer_lock);
992 "Invalid timer:%" PRI_TMR "", tmr);
995 if (tmrs->timer[i].tmo_pool != tmrs->shared_tmo_pool) {
996 if (unlikely(odp_pool_destroy(tmrs->timer[i].tmo_pool) != 0)) {
998 pool_fail = tmrs->timer[i].tmo_pool;
1000 TMR_DBG_PRINT(
"Deleted odp pool %p\n", tmrs->timer[i].tmo_pool);
1003 tmrs->timer[i].tmo_pool = ODP_POOL_INVALID;
1004 odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool);
1005 tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID;
1008 if (tmrs->timer[i].is_ring && tmrs->num_rings) {
1010 if (tmrs->num_rings < 1)
1011 TMR_DBG_PRINT(
"Last ring deleted");
1012 tmrs->ring_reserved -= tmrs->timer[i].num_ring_reserve;
1013 TMR_DBG_PRINT(
"Updated ring reserve by -%u to %u\n",
1014 tmrs->timer[i].num_ring_reserve, tmrs->ring_reserved);
1015 tmrs->timer[i].num_ring_reserve = 0;
1019 if (tmrs->shared_tmo_pool != ODP_POOL_INVALID) {
1020 tmrs->reserved -= tmrs->timer[i].num_tmo_reserve;
1021 TMR_DBG_PRINT(
"Updated tmo reserve by -%u to %u\n",
1022 tmrs->timer[i].num_tmo_reserve, tmrs->reserved);
1023 tmrs->timer[i].num_tmo_reserve = 0;
1025 if (tmrs->num_timers == 0 && tmrs->shared_tmo_pool != ODP_POOL_INVALID) {
1027 if (unlikely(odp_pool_destroy(tmrs->shared_tmo_pool) != 0)) {
1029 pool_fail = tmrs->shared_tmo_pool;
1031 TMR_DBG_PRINT(
"Deleted shared tmo pool %p\n", tmrs->shared_tmo_pool);
1032 tmrs->shared_tmo_pool = ODP_POOL_INVALID;
1036 odp_ticketlock_unlock(&tmrs->timer_lock);
1037 if (unlikely(rv !=
EM_OK)) {
1039 "timer %p delete fail, odp pool %p fail\n", tmr, pool_fail);
1041 TMR_DBG_PRINT(
"ok, deleted timer %p, num_timers %u\n", tmr, tmrs->num_timers);
1048 int i = TMR_H2I(tmr);
1053 return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool);
1064 const queue_elem_t *
const q_elem = queue_elem_get(queue);
1067 if (unlikely(!is_timer_valid(tmr))) {
1069 "Invalid timer:%" PRI_TMR "", tmr);
1072 if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) {
1078 if (unlikely(!is_queue_valid_type(tmr, q_elem))) {
1084 if (unlikely(!check_tmo_flags(flags))) {
1086 "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x",
1092 int i = TMR_H2I(tmr);
1095 em_shm->timers.timer[i].is_ring &&
1098 "Tmr:%" PRI_TMR ": asking oneshot with ring timer!",
1103 odp_buffer_t tmo_buf = odp_buffer_alloc(
em_shm->timers.timer[i].tmo_pool);
1105 if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) {
1107 "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr);
1112 odp_timer_pool_t odptmr =
em_shm->timers.timer[i].odp_tmr_pool;
1114 const void *userptr = NULL;
1119 tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->
odp_queue, userptr);
1120 if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) {
1122 "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", tmr);
1123 odp_buffer_free(tmo_buf);
1129 tmo->odp_timer_pool = odptmr;
1131 tmo->odp_buffer = tmo_buf;
1134 tmo->is_ring =
em_shm->timers.timer[i].is_ring;
1135 tmo->odp_timeout = ODP_EVENT_INVALID;
1136 tmo->ring_tmo_pool =
em_shm->timers.ring_tmo_pool;
1139 odp_event_t odp_tmo_event = alloc_odp_timeout(tmo);
1141 if (unlikely(odp_tmo_event == ODP_EVENT_INVALID)) {
1143 "Ring: odp timeout event allocation failed");
1144 odp_timer_free(tmo->odp_timer);
1145 odp_buffer_free(tmo_buf);
1148 tmo->odp_timeout = odp_tmo_event;
1149 TMR_DBG_PRINT(
"Ring: allocated odp timeout ev %p\n", tmo->odp_timeout);
1152 if (EM_TIMER_TMO_STATS)
1156 TMR_DBG_PRINT(
"ODP timer %p allocated\n", tmo->odp_timer);
1157 TMR_DBG_PRINT(
"tmo %p created\n", tmo);
1166 "Invalid args: tmo:%" PRI_TMO, tmo);
1175 "Invalid tmo buffer");
1179 "Invalid tmo state:%d", tmo_state);
1183 "Invalid tmo odp_timer, deleted?");
1186 TMR_DBG_PRINT(
"ODP timer %p\n", tmo->odp_timer);
1188 odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN);
1190 #if ODP_VERSION_API_NUM(1, 43, 0) <= ODP_VERSION_API
1192 int fret = odp_timer_free(tmo->odp_timer);
1195 "odp timer free failed!?, rv %d\n", fret);
1198 odp_event_t odp_evt;
1200 odp_evt = ODP_EVENT_INVALID;
1201 odp_evt = odp_timer_free(tmo->odp_timer);
1204 "odp timer free returned an event %p\n", odp_evt);
1207 odp_buffer_t tmp = tmo->odp_buffer;
1209 tmo->odp_timer = ODP_TIMER_INVALID;
1210 tmo->odp_buffer = ODP_BUFFER_INVALID;
1212 if (tmo->is_ring && tmo->odp_timeout != ODP_EVENT_INVALID) {
1213 TMR_DBG_PRINT(
"ring: free unused ODP timeout ev %p\n", tmo->odp_timeout);
1214 free_odp_timeout(tmo->odp_timeout);
1215 tmo->odp_timeout = ODP_EVENT_INVALID;
1218 odp_buffer_free(tmp);
1220 TMR_DBG_PRINT(
"tmo %p delete ok\n", tmo);
1236 "Invalid tmo buffer");
1240 "Cannot set periodic tmo, use _set_periodic()");
1242 !is_event_type_valid(tmo_ev),
1244 "invalid event type");
1250 "Invalid tmo state:%d", tmo_state);
1253 tmo->odp_timer == ODP_TIMER_INVALID,
1255 "Invalid tmo odp_timer");
1258 odp_event_t odp_ev = event_em2odp(tmo_ev);
1259 bool esv_ena = esv_enabled();
1260 odp_timer_start_t startp;
1265 "Invalid event type: timer-ring");
1271 startp.tick_type = ODP_TIMER_TICK_ABS;
1272 startp.tick = ticks_abs;
1273 startp.tmo_ev = odp_ev;
1277 int odpret = odp_timer_start(tmo->odp_timer, &startp);
1279 if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1289 TMR_DBG_PRINT(
"TOONEAR, skip ErrH\n");
1294 "odp_timer_start():%d", odpret);
1296 TMR_DBG_PRINT(
"OK\n");
1311 "%s: Periodic no longer supported", __func__);
1317 "Invalid tmo buffer");
1323 "Invalid tmo state:%d", tmo_state);
1326 !is_event_type_valid(tmo_ev),
1328 "invalid event type");
1331 odp_event_t odp_ev = event_em2odp(tmo_ev);
1332 bool esv_ena = esv_enabled();
1333 odp_timer_start_t startp;
1338 "Invalid event type: timer-ring");
1344 startp.tick_type = ODP_TIMER_TICK_REL;
1345 startp.tick = ticks_rel;
1346 startp.tmo_ev = odp_ev;
1350 int odpret = odp_timer_start(tmo->odp_timer, &startp);
1352 if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1362 TMR_DBG_PRINT(
"TOONEAR, skip ErrH\n");
1366 "odp_timer_start():%d", odpret);
1368 TMR_DBG_PRINT(
"OK\n");
1385 "Invalid tmo buffer");
1388 "Not periodic tmo");
1394 "Invalid tmo state:%d", tmo_state);
1397 !is_event_type_valid(tmo_ev),
1399 "invalid event type");
1402 odp_event_t odp_ev = event_em2odp(tmo_ev);
1403 bool esv_ena = esv_enabled();
1404 odp_timer_start_t startp;
1409 "Invalid event type: timer-ring");
1414 TMR_DBG_PRINT(
"start %lu, period %lu\n", start_abs, period);
1416 tmo->period = period;
1418 start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period;
1419 tmo->last_tick = start_abs;
1420 TMR_DBG_PRINT(
"last_tick %lu, now %lu\n", tmo->last_tick,
1421 odp_timer_current_tick(tmo->odp_timer_pool));
1424 startp.tick_type = ODP_TIMER_TICK_ABS;
1425 startp.tick = start_abs;
1426 startp.tmo_ev = odp_ev;
1430 int odpret = odp_timer_start(tmo->odp_timer, &startp);
1432 if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1439 TMR_DBG_PRINT(
"diff to tmo %ld\n",
1440 (int64_t)tmo->last_tick -
1441 (int64_t)odp_timer_current_tick(tmo->odp_timer_pool));
1446 TMR_DBG_PRINT(
"TOONEAR, skip ErrH\n");
1450 EM_ESCOPE_TMO_SET_PERIODIC,
1451 "odp_timer_start():%d", odpret);
1453 TMR_DBG_PRINT(
"OK\n");
1459 uint64_t multiplier,
1464 "Inv.args: tmo UNDEF");
1468 "Invalid tmo buffer");
1471 "Not periodic tmo");
1477 "Invalid tmo state:%d", tmo_state);
1480 odp_timer_periodic_start_t startp;
1481 odp_event_t odp_ev = tmo->odp_timeout;
1484 odp_ev = event_em2odp(tmo_ev);
1486 odp_event_type(odp_ev) != ODP_EVENT_TIMEOUT,
1488 "Inv.args: not TIMER event given");
1489 odp_timeout_t odp_tmo = odp_timeout_from_event(odp_ev);
1490 event_hdr_t *
const ev_hdr = odp_timeout_user_area(odp_tmo);
1494 TMR_DBG_PRINT(
"user event %p\n", tmo_ev);
1496 tmo->odp_timeout = ODP_EVENT_INVALID;
1499 if (odp_ev == ODP_EVENT_INVALID) {
1500 odp_event_t odp_tmo_event = alloc_odp_timeout(tmo);
1502 if (unlikely(odp_tmo_event == ODP_EVENT_INVALID))
1504 "Ring: odp timeout event allocation failed");
1505 odp_ev = odp_tmo_event;
1508 TMR_DBG_PRINT(
"ring tmo start_abs %lu, M=%lu, odp ev=%p\n", start_abs, multiplier, odp_ev);
1509 startp.first_tick = start_abs;
1510 startp.freq_multiplier = multiplier;
1511 startp.tmo_ev = odp_ev;
1513 int odpret = odp_timer_periodic_start(tmo->odp_timer, &startp);
1515 if (unlikely(odpret != ODP_TIMER_SUCCESS)) {
1521 TMR_DBG_PRINT(
"TOONEAR, skip ErrH\n");
1525 EM_ESCOPE_TMO_SET_PERIODIC_RING,
1526 "odp_timer_periodic_start(): ret %d", odpret);
1529 TMR_DBG_PRINT(
"OK\n");
1538 "Invalid args: tmo:%" PRI_TMO " cur_event:%p",
1545 "Invalid tmo buffer");
1548 "Invalid tmo odp_timer");
1558 TMR_DBG_PRINT(
"ODP tmo %p\n", tmo->odp_timer);
1565 "odp periodic cancel fail");
1570 odp_event_t odp_ev = ODP_EVENT_INVALID;
1571 int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev);
1573 if (ret == ODP_TIMER_TOO_NEAR) {
1577 "ODP timer cancel return TOONEAR but return event!");
1579 TMR_DBG_PRINT(
"ODP returned TOONEAR\n");
1585 "ODP timer cancel fail!");
1594 "Invalid tmo event from odp_timer_cancel");
1597 em_event_t tmo_ev = event_odp2em(odp_ev);
1607 *cur_event = tmo_ev;
1608 TMR_DBG_PRINT(
"OK\n");
1622 "Tmo ACK: invalid tmo buffer");
1625 "Tmo ACK: Not a periodic tmo");
1627 if (EM_TIMER_TMO_STATS)
1632 odp_event_t odp_ev = event_em2odp(next_tmo_ev);
1635 return ack_ring_timeout_event(tmo, next_tmo_ev, tmo_state, ev_hdr, odp_ev);
1646 "Tmo ACK: invalid tmo state:%d", tmo_state);
1649 bool esv_ena = esv_enabled();
1658 tmo->last_tick += tmo->period;
1660 int tries = EM_TIMER_ACK_TRIES;
1662 odp_timer_start_t startp;
1664 startp.tick_type = ODP_TIMER_TICK_ABS;
1665 startp.tmo_ev = odp_ev;
1672 startp.tick = tmo->last_tick;
1673 ret = odp_timer_start(tmo->odp_timer, &startp);
1679 if (likely(ret != ODP_TIMER_TOO_NEAR)) {
1680 if (ret != ODP_TIMER_SUCCESS) {
1681 TMR_DBG_PRINT(
"ODP return %d\n"
1682 "tmo tgt/tick now %lu/%lu\n",
1683 ret, tmo->last_tick,
1684 odp_timer_current_tick(tmo->odp_timer_pool));
1690 if (EM_TIMER_TMO_STATS)
1692 TMR_DBG_PRINT(
"late, tgt/now %lu/%lu\n", tmo->last_tick,
1693 odp_timer_current_tick(tmo->odp_timer_pool));
1696 return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue);
1699 handle_ack_skip(tmo);
1702 if (unlikely(tries < 1)) {
1705 "Tmo ACK: too many retries:%u",
1706 EM_TIMER_ACK_TRIES);
1709 }
while (ret != ODP_TIMER_SUCCESS);
1711 if (unlikely(ret != ODP_TIMER_SUCCESS)) {
1713 "Tmo ACK: failed to renew tmo (odp ret %d)",
1730 odp_ticketlock_lock(&
em_shm->timers.timer_lock);
1732 const uint32_t num_timers =
em_shm->timers.num_timers;
1734 if (tmr_list && max > 0 && num_timers > 0) {
1737 for (
int i = 0; i < EM_ODP_MAX_TIMERS; i++) {
1738 if (
em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) {
1739 tmr_list[num] = TMR_I2H(i);
1747 odp_ticketlock_unlock(&
em_shm->timers.timer_lock);
1754 odp_timer_pool_info_t poolinfo;
1755 int i = TMR_H2I(tmr);
1762 "Inv.args: timer:%" PRI_TMR " tmr_attr:%p",
1766 ret = odp_timer_pool_info(
em_shm->timers.timer[i].odp_tmr_pool, &poolinfo);
1768 "ODP timer pool info failed");
1770 timer_clksrc_odp2em(poolinfo.param.clk_src, &clk);
1772 if (poolinfo.param.timer_type == ODP_TIMER_TYPE_SINGLE) {
1789 tmr_attr->
num_tmo = poolinfo.param.num_timers;
1792 strncpy(tmr_attr->
name, poolinfo.name, EM_TIMER_NAME_LEN - 1);
1793 tmr_attr->
name[EM_TIMER_NAME_LEN - 1] =
'\0';
1803 "Invalid timer:%" PRI_TMR "", tmr);
1807 return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool,
1808 1000ULL * 1000ULL * 1000ULL);
1817 "Invalid timer:%" PRI_TMR "", tmr);
1821 return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ticks);
1830 "Invalid timer:%" PRI_TMR "", tmr);
1834 return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns);
1841 return EM_TMO_STATE_UNKNOWN;
1843 if (
EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) {
1845 return EM_TMO_STATE_UNKNOWN;
1848 return odp_atomic_load_acq_u32(&tmo->state);
1859 "Invalid tmo buffer");
1864 if (EM_TIMER_TMO_STATS) {
1886 "Invalid event type");
1892 "Invalid tmo event type, header corrupted?");
1914 odp_event_t odp_event = event_em2odp(event);
1915 odp_event_type_t evtype = odp_event_type(odp_event);
1917 if (unlikely(evtype != ODP_EVENT_TIMEOUT))
1925 return odp_timeout_user_ptr(odp_timeout_from_event(odp_event));
1934 if (
EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) {
1944 return (uint64_t)timer;
1949 return (uint64_t)tmo;
#define INTERNAL_ERROR(error, escope, fmt,...)
#define RETURN_ERROR_IF(cond, error, escope, fmt,...)
void evstate_usr2em_revert(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
em_event_t evstate_alloc_tmo(const em_event_t event, event_hdr_t *const ev_hdr)
em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
void evstate_usr2em(em_event_t event, event_hdr_t *const ev_hdr, const uint16_t api_op)
struct event_hdr event_hdr_t
#define EM_CHECK_INIT_CALLED
@ EM_QUEUE_TYPE_UNSCHEDULED
@ EM_QUEUE_TYPE_PARALLEL_ORDERED
@ EM_ERR_OPERATION_FAILED
@ EM_EVENT_TYPE_TIMER_IND
#define EM_TIMER_CLKSRC_DEFAULT
#define EM_EVENT_GROUP_UNDEF
em_event_type_t em_event_get_type(em_event_t event)
em_status_t em_send(em_event_t event, em_queue_t queue)
em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src)
em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr)
em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, em_event_t tmo_ev)
uint64_t em_tmo_to_u64(em_tmo_t tmo)
em_tmo_type_t em_tmo_get_type(em_event_t event, em_tmo_t *tmo, bool reset)
int em_timer_get_all(em_timer_t *tmr_list, int max)
em_status_t em_timer_delete(em_timer_t tmr)
em_timer_t em_tmo_get_timer(em_tmo_t tmo)
em_status_t em_timer_ring_attr_init(em_timer_attr_t *ring_attr, em_timer_clksrc_t clk_src, uint64_t base_hz, uint64_t max_mul, uint64_t res_ns)
em_status_t em_tmo_set_periodic_ring(em_tmo_t tmo, em_timer_tick_t start_abs, uint64_t multiplier, em_event_t tmo_ev)
em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns)
em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue)
em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr)
em_tmo_state_t em_tmo_get_state(em_tmo_t tmo)
em_status_t em_tmo_delete(em_tmo_t tmo)
em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event)
em_timer_t em_timer_ring_create(const em_timer_attr_t *ring_attr)
em_status_t em_tmo_set_periodic(em_tmo_t tmo, em_timer_tick_t start_abs, em_timer_tick_t period, em_event_t tmo_ev)
em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat)
em_timer_tick_t em_timer_current_tick(em_timer_t tmr)
em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, em_event_t tmo_ev)
uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks)
uint64_t em_timer_to_u64(em_timer_t timer)
em_tmo_t em_tmo_create_arg(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue, em_tmo_args_t *args)
void em_timer_attr_init(em_timer_attr_t *tmr_attr)
uint64_t em_timer_get_freq(em_timer_t tmr)
em_status_t em_timer_ring_capability(em_timer_ring_param_t *ring)
Check periodic ring timer capability.
em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev)
void * em_tmo_get_userptr(em_event_t event, em_tmo_t *tmo)
em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src)
em_timer_ring_param_t ringparam
char name[EM_TIMER_NAME_LEN]
uint32_t __internal_check
em_timer_res_param_t resparam
em_fract_u64_t max_base_hz
em_timer_res_param_t max_tmo
struct em_timer_capability_t::@2 ring
em_timer_res_param_t max_res
em_fract_u64_t min_base_hz
em_timer_clksrc_t clk_src
em_timer_clksrc_t clk_src
uint64_t num_period_skips
union event_hdr::@34 flags
ev_hdr_user_area_t user_area
em_event_type_t event_type