Skip to content

Commit 9c87e2b

Browse files
lyakhlgirdwood
authored andcommitted
dp:application: embed IPC flattening buffer in module data
On the one hand IPCs are serialized, so a single IPC buffer for all DP threads would be enough. But it has to be a page large to be added to every DP thread memory domain. On the other hand we can allocate such an IPC flattening buffer for each DP thread. Then it doesn't need to be mapped separately, doesn't need an own memory partition in thread's memory domain. A page is 4KiB, the buffer is probably less than 100 bytes large. So as long as we don't have more than 40 DP threads we're better off using per-thread buffers, and we aren't likely to ever get that many DP threads. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
1 parent 537878a commit 9c87e2b

File tree

2 files changed

+12
-22
lines changed

2 files changed

+12
-22
lines changed

src/schedule/zephyr_dp_schedule.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,11 @@ struct scheduler_dp_data {
2525

2626
enum sof_dp_part_type {
2727
SOF_DP_PART_HEAP,
28-
SOF_DP_PART_IPC,
2928
SOF_DP_PART_CFG,
3029
SOF_DP_PART_TYPE_COUNT,
3130
};
3231

32+
struct ipc4_flat;
3333
struct task_dp_pdata {
3434
k_tid_t thread_id; /* zephyr thread ID */
3535
struct k_thread *thread; /* pointer to the kernels' thread object */
@@ -41,6 +41,7 @@ struct task_dp_pdata {
4141
#if CONFIG_SOF_USERSPACE_APPLICATION
4242
struct k_sem *sem; /* pointer to semaphore for task scheduling */
4343
struct k_sem sem_struct; /* semaphore for task scheduling for kernel threads */
44+
struct ipc4_flat *flat;
4445
unsigned char pend_ipc;
4546
unsigned char pend_proc;
4647
struct k_mem_partition mpart[SOF_DP_PART_TYPE_COUNT];

src/schedule/zephyr_dp_schedule_application.c

Lines changed: 10 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,6 @@ static struct k_mem_domain dp_mdom[CONFIG_CORE_COUNT];
3636
#define DP_SYNC_INIT_LIST LISTIFY(CONFIG_CORE_COUNT, DP_SYNC_INIT, (,))
3737
static STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_sync, CONFIG_CORE_COUNT) = { DP_SYNC_INIT_LIST };
3838

39-
/* TODO: make this a shared kernel->module buffer for IPC parameters */
40-
static uint8_t ipc_buf[4096] __aligned(4096);
41-
4239
struct ipc4_flat {
4340
unsigned int cmd;
4441
int ret;
@@ -52,7 +49,7 @@ struct ipc4_flat {
5249
enum ipc4_pipeline_state state;
5350
int n_sources;
5451
int n_sinks;
55-
void *source_sink[];
52+
void *source_sink[2 * CONFIG_MODULE_MAX_CONNECTIONS];
5653
} pipeline_state;
5754
};
5855
};
@@ -79,15 +76,14 @@ static int ipc_thread_flatten(unsigned int cmd, const union scheduler_dp_thread_
7976
case COMP_TRIGGER_STOP:
8077
break;
8178
case COMP_TRIGGER_PREPARE:
82-
if (sizeof(flat->cmd) + sizeof(flat->ret) + sizeof(flat->pipeline_state) +
83-
sizeof(void *) * (param->pipeline_state.n_sources +
84-
param->pipeline_state.n_sinks) >
85-
sizeof(ipc_buf))
79+
if (param->pipeline_state.n_sources > CONFIG_MODULE_MAX_CONNECTIONS ||
80+
param->pipeline_state.n_sinks > CONFIG_MODULE_MAX_CONNECTIONS)
8681
return -ENOMEM;
8782

8883
flat->pipeline_state.state = param->pipeline_state.state;
8984
flat->pipeline_state.n_sources = param->pipeline_state.n_sources;
9085
flat->pipeline_state.n_sinks = param->pipeline_state.n_sinks;
86+
/* Up to 2 * CONFIG_MODULE_MAX_CONNECTIONS */
9187
memcpy(flat->pipeline_state.source_sink, param->pipeline_state.sources,
9288
flat->pipeline_state.n_sources *
9389
sizeof(flat->pipeline_state.source_sink[0]));
@@ -178,12 +174,10 @@ int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd,
178174

179175
unsigned int lock_key = scheduler_dp_lock(pmod->dev->task->core);
180176

181-
struct ipc4_flat *flat = (struct ipc4_flat *)ipc_buf;
182-
183177
/* IPCs are serialised */
184-
flat->ret = -ENOSYS;
178+
pdata->flat->ret = -ENOSYS;
185179

186-
ret = ipc_thread_flatten(cmd, param, flat);
180+
ret = ipc_thread_flatten(cmd, param, pdata->flat);
187181
if (!ret) {
188182
pdata->pend_ipc++;
189183
k_sem_give(pdata->sem);
@@ -197,7 +191,7 @@ int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd,
197191
if (ret < 0)
198192
tr_err(&dp_tr, "Failed waiting for DP thread: %d", ret);
199193
else
200-
ret = flat->ret;
194+
ret = pdata->flat->ret;
201195
}
202196

203197
return ret;
@@ -316,7 +310,7 @@ void dp_thread_fn(void *p1, void *p2, void *p3)
316310
if (pend_ipc) {
317311
/* handle IPC */
318312
tr_dbg(&dp_tr, "got IPC wake up for %p state %d", pmod, task->state);
319-
ipc_thread_unflatten_run(pmod, (struct ipc4_flat *)ipc_buf);
313+
ipc_thread_unflatten_run(pmod, task_pdata->flat);
320314
k_sem_give(&dp_sync[task->core]);
321315
}
322316

@@ -401,7 +395,6 @@ void scheduler_dp_domain_free(struct processing_module *pmod)
401395
struct task_dp_pdata *pdata = pmod->dev->task->priv_data;
402396

403397
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP);
404-
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_IPC);
405398
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG);
406399
#endif
407400
}
@@ -416,6 +409,7 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
416409
struct task task;
417410
struct task_dp_pdata pdata;
418411
struct comp_driver drv;
412+
struct ipc4_flat flat;
419413
} *task_memory;
420414

421415
int ret;
@@ -465,6 +459,7 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
465459
/* It will be overwritten for K_USER threads to dynamic ones. */
466460
pdata->sem = &pdata->sem_struct;
467461
pdata->thread = &pdata->thread_struct;
462+
pdata->flat = &task_memory->flat;
468463

469464
#ifdef CONFIG_USERSPACE
470465
if (options & K_USER) {
@@ -527,12 +522,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
527522
.size = size,
528523
.attr = K_MEM_PARTITION_P_RW_U_RW,
529524
};
530-
/* IPC flattening buffer partition */
531-
pdata->mpart[SOF_DP_PART_IPC] = (struct k_mem_partition){
532-
.start = (uintptr_t)&ipc_buf,
533-
.size = sizeof(ipc_buf),
534-
.attr = K_MEM_PARTITION_P_RW_U_RW,
535-
};
536525
/* Host mailbox partition for additional IPC parameters: read-only */
537526
pdata->mpart[SOF_DP_PART_CFG] = (struct k_mem_partition){
538527
.start = (uintptr_t)MAILBOX_HOSTBOX_BASE,

0 commit comments

Comments
 (0)