@@ -33,6 +33,14 @@ SOF_DEFINE_REG_UUID(dp_sched);
3333
3434DECLARE_TR_CTX (dp_tr , SOF_UUID (dp_sched_uuid ), LOG_LEVEL_INFO );
3535
36+ #if CONFIG_SOF_USERSPACE_APPLICATION
37+ struct dp_sem_buf {
38+ struct sys_sem sem [CONFIG_CORE_COUNT ];
39+ uint8_t reserved [CONFIG_MM_DRV_PAGE_SIZE - sizeof (struct sys_sem ) * CONFIG_CORE_COUNT ];
40+ };
41+
42+ static struct dp_sem_buf __aligned (4096 ) dp_sched_sem ;
43+ #else
3644#define DP_LOCK_INIT (i , _ ) Z_SEM_INITIALIZER(dp_lock[i], 1, 1)
3745#define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,))
3846
@@ -42,29 +50,56 @@ DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
4250 */
4351static
4452STRUCT_SECTION_ITERABLE_ARRAY (k_sem , dp_lock , CONFIG_MP_MAX_NUM_CPUS ) = { DP_LOCK_INIT_LIST };
53+ #endif
4554
4655/* Each per-core instance of DP scheduler has separate structures; hence, locks are per-core.
4756 *
4857 * TODO: consider using cpu_get_id() instead of supplying core as a parameter.
4958 */
5059unsigned int scheduler_dp_lock (uint16_t core )
5160{
61+ #if CONFIG_SOF_USERSPACE_APPLICATION
62+ sys_sem_take (& dp_sched_sem .sem [core ], K_FOREVER );
63+ #else
5264 k_sem_take (& dp_lock [core ], K_FOREVER );
65+ #endif
66+
5367 return core ;
5468}
5569
5670void scheduler_dp_unlock (unsigned int key )
5771{
72+ #if CONFIG_SOF_USERSPACE_APPLICATION
73+ sys_sem_give (& dp_sched_sem .sem [key ]);
74+ #else
5875 k_sem_give (& dp_lock [key ]);
76+ #endif
5977}
6078
61- void scheduler_dp_grant (k_tid_t thread_id , uint16_t core )
79+ #if CONFIG_SOF_USERSPACE_APPLICATION
80+ int scheduler_dp_add_domain (struct k_mem_domain * domain )
6281{
63- #if CONFIG_USERSPACE
64- k_thread_access_grant (thread_id , & dp_lock [core ]);
65- #endif
82+ struct k_mem_partition part = {
83+ .start = (uintptr_t )& dp_sched_sem ,
84+ .size = sizeof (dp_sched_sem ),
85+ .attr = K_MEM_PARTITION_P_RW_U_RW ,
86+ };
87+
88+ return k_mem_domain_add_partition (domain , & part );
6689}
6790
91+ int scheduler_dp_rm_domain (struct k_mem_domain * domain )
92+ {
93+ struct k_mem_partition part = {
94+ .start = (uintptr_t )& dp_sched_sem ,
95+ .size = sizeof (dp_sched_sem ),
96+ .attr = K_MEM_PARTITION_P_RW_U_RW ,
97+ };
98+
99+ return k_mem_domain_remove_partition (domain , & part );
100+ }
101+ #endif
102+
68103/* dummy LL task - to start LL on secondary cores */
69104static enum task_state scheduler_dp_ll_tick_dummy (void * data )
70105{
@@ -370,6 +405,11 @@ int scheduler_dp_init(void)
370405
371406 scheduler_init (SOF_SCHEDULE_DP , & schedule_dp_ops , dp_sch );
372407
408+ #if CONFIG_SOF_USERSPACE_APPLICATION
409+ for (unsigned int i = 0 ; i < ARRAY_SIZE (dp_sched_sem .sem ); i ++ )
410+ sys_sem_init (dp_sched_sem .sem + i , 1 , 1 );
411+ #endif
412+
373413 /* init src of DP tick */
374414 ret = schedule_task_init_ll (& dp_sch -> ll_tick_src ,
375415 SOF_UUID (dp_sched_uuid ),
0 commit comments