41 if (core_count > EM_MAX_CORES || core_count > odp_thread_count_max())
45 if (mask_count != core_count)
49 if (last_phys_id >= EM_MAX_CORES)
54 env_spinlock_init(&core_map->
lock);
58 core_map->
count = core_count;
63 while (logic_id < core_count && phys_id < EM_MAX_CORES) {
65 core_map->phys_vs_logic.
logic[phys_id] = logic_id;
66 core_map->phys_vs_logic.
phys[logic_id] = phys_id;
78 const int phys_core = odp_cpu_id();
79 const int odp_thr = odp_thread_id();
80 int32_t current_core_count;
82 if (unlikely(phys_core >= EM_MAX_CORES))
84 if (odp_thr >= ODP_THREAD_COUNT_MAX ||
85 odp_thr >= odp_thread_count_max())
89 locm->
core_id = core_map->phys_vs_logic.
logic[phys_core];
91 if (unlikely(locm->
core_id < 0))
93 if (unlikely(locm->
core_id >= EM_MAX_CORES))
96 env_spinlock_lock(&core_map->
lock);
100 env_spinlock_unlock(&core_map->
lock);
102 EM_DBG(
"EM-core%02d: %s() - current core count:%d\n",
112 if (unlikely(current_core_count < 1)) {
113 EM_LOG(EM_LOG_ERR,
"Current core count invalid: %d", current_core_count);
117 EM_DBG(
"EM-core%02d: %s() - current core count:%d\n",
123 int logic_to_thr_core_id(
const int logic_core)
125 if (unlikely(logic_core >= EM_MAX_CORES))
128 return em_shm->core_map.thr_vs_logic.odp_thr[logic_core];
131 int thr_to_logic_core_id(
const int thr_id)
133 if (unlikely(thr_id >= ODP_THREAD_COUNT_MAX))
136 return em_shm->core_map.thr_vs_logic.logic[thr_id];
140 odp_thrmask_t *
const odp_thrmask )
145 odp_thrmask_zero(odp_thrmask);
148 for (
int i = 0; i < core_count; i++) {
150 odp_thread_id = logic_to_thr_core_id(i);
151 odp_thrmask_set(odp_thrmask, odp_thread_id);
157 odp_cpumask_t *
const odp_cpumask )
162 odp_cpumask_zero(odp_cpumask);
165 for (
int i = 0; i < core_count; i++) {
167 cpu_id = logic_to_phys_core_id(i);
168 odp_cpumask_set(odp_cpumask, cpu_id);