Skip to content

Commit 4e3a028

Browse files
committed
dp: switch the lock to use sys_sem
sys_sem semaphores don't invoke a syscall in uncongested cases. Switch over to it to reduce syscall number. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
1 parent c29d2dd commit 4e3a028

File tree

4 files changed

+53
-7
lines changed

4 files changed

+53
-7
lines changed

src/schedule/zephyr_dp_schedule.c

Lines changed: 44 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,14 @@ SOF_DEFINE_REG_UUID(dp_sched);
3333

3434
DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
3535

36+
#if CONFIG_SOF_USERSPACE_APPLICATION
37+
struct dp_sem_buf {
38+
struct sys_sem sem[CONFIG_CORE_COUNT];
39+
uint8_t reserved[CONFIG_MM_DRV_PAGE_SIZE - sizeof(struct sys_sem) * CONFIG_CORE_COUNT];
40+
};
41+
42+
static struct dp_sem_buf __aligned(4096) dp_sched_sem;
43+
#else
3644
#define DP_LOCK_INIT(i, _) Z_SEM_INITIALIZER(dp_lock[i], 1, 1)
3745
#define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,))
3846

@@ -42,29 +50,56 @@ DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
4250
*/
4351
static
4452
STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOCK_INIT_LIST };
53+
#endif
4554

4655
/* Each per-core instance of DP scheduler has separate structures; hence, locks are per-core.
4756
*
4857
* TODO: consider using cpu_get_id() instead of supplying core as a parameter.
4958
*/
5059
unsigned int scheduler_dp_lock(uint16_t core)
5160
{
61+
#if CONFIG_SOF_USERSPACE_APPLICATION
62+
sys_sem_take(&dp_sched_sem.sem[core], K_FOREVER);
63+
#else
5264
k_sem_take(&dp_lock[core], K_FOREVER);
65+
#endif
66+
5367
return core;
5468
}
5569

5670
void scheduler_dp_unlock(unsigned int key)
5771
{
72+
#if CONFIG_SOF_USERSPACE_APPLICATION
73+
sys_sem_give(&dp_sched_sem.sem[key]);
74+
#else
5875
k_sem_give(&dp_lock[key]);
76+
#endif
5977
}
6078

61-
void scheduler_dp_grant(k_tid_t thread_id, uint16_t core)
79+
#if CONFIG_SOF_USERSPACE_APPLICATION
80+
int scheduler_dp_add_domain(struct k_mem_domain *domain)
6281
{
63-
#if CONFIG_USERSPACE
64-
k_thread_access_grant(thread_id, &dp_lock[core]);
65-
#endif
82+
struct k_mem_partition part = {
83+
.start = (uintptr_t)&dp_sched_sem,
84+
.size = sizeof(dp_sched_sem),
85+
.attr = K_MEM_PARTITION_P_RW_U_RW,
86+
};
87+
88+
return k_mem_domain_add_partition(domain, &part);
6689
}
6790

91+
int scheduler_dp_rm_domain(struct k_mem_domain *domain)
92+
{
93+
struct k_mem_partition part = {
94+
.start = (uintptr_t)&dp_sched_sem,
95+
.size = sizeof(dp_sched_sem),
96+
.attr = K_MEM_PARTITION_P_RW_U_RW,
97+
};
98+
99+
return k_mem_domain_remove_partition(domain, &part);
100+
}
101+
#endif
102+
68103
/* dummy LL task - to start LL on secondary cores */
69104
static enum task_state scheduler_dp_ll_tick_dummy(void *data)
70105
{
@@ -370,6 +405,11 @@ int scheduler_dp_init(void)
370405

371406
scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch);
372407

408+
#if CONFIG_SOF_USERSPACE_APPLICATION
409+
for (unsigned int i = 0; i < ARRAY_SIZE(dp_sched_sem.sem); i++)
410+
sys_sem_init(dp_sched_sem.sem + i, 1, 1);
411+
#endif
412+
373413
/* init src of DP tick */
374414
ret = schedule_task_init_ll(&dp_sch->ll_tick_src,
375415
SOF_UUID(dp_sched_uuid),

src/schedule/zephyr_dp_schedule.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,11 +55,12 @@ void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_
5555
void dp_thread_fn(void *p1, void *p2, void *p3);
5656
unsigned int scheduler_dp_lock(uint16_t core);
5757
void scheduler_dp_unlock(unsigned int key);
58-
void scheduler_dp_grant(k_tid_t thread_id, uint16_t core);
5958
int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
6059
const struct task_ops *ops, struct processing_module *mod,
6160
uint16_t core, size_t stack_size, uint32_t options);
6261
#if CONFIG_SOF_USERSPACE_APPLICATION
62+
int scheduler_dp_add_domain(struct k_mem_domain *domain);
63+
int scheduler_dp_rm_domain(struct k_mem_domain *domain);
6364
void scheduler_dp_domain_free(struct processing_module *pmod);
6465
int scheduler_dp_domain_init(void);
6566
#else

src/schedule/zephyr_dp_schedule_application.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,7 @@ void scheduler_dp_domain_free(struct processing_module *pmod)
396396

397397
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP);
398398
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG);
399+
scheduler_dp_rm_domain(dp_mdom + core);
399400
#endif
400401
}
401402

@@ -505,7 +506,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
505506

506507
#if CONFIG_USERSPACE
507508
k_thread_access_grant(pdata->thread_id, pdata->sem, &dp_sync[core]);
508-
scheduler_dp_grant(pdata->thread_id, core);
509509

510510
unsigned int pidx;
511511
size_t size;
@@ -531,6 +531,12 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
531531
goto e_dom;
532532
}
533533

534+
ret = scheduler_dp_add_domain(dp_mdom + core);
535+
if (ret < 0) {
536+
tr_err(&dp_tr, "failed to add DP lock domain %d", ret);
537+
goto e_dom;
538+
}
539+
534540
ret = llext_manager_add_domain(mod->dev->ipc_config.id, dp_mdom + core);
535541
if (ret < 0) {
536542
tr_err(&dp_tr, "failed to add LLEXT to domain %d", ret);

src/schedule/zephyr_dp_schedule_thread.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,6 @@ int scheduler_dp_task_init(struct task **task,
270270
CONFIG_DP_THREAD_PRIORITY, (*task)->flags, K_FOREVER);
271271

272272
k_thread_access_grant(pdata->thread_id, pdata->event);
273-
scheduler_dp_grant(pdata->thread_id, cpu_get_id());
274273

275274
/* pin the thread to specific core */
276275
ret = k_thread_cpu_pin(pdata->thread_id, core);

0 commit comments

Comments
 (0)