|
13 | 13 | #ifndef __POSIX_RTOS_SPINLOCK_H__ |
14 | 14 | #define __POSIX_RTOS_SPINLOCK_H__ |
15 | 15 |
|
16 | | -#include <arch/spinlock.h> |
17 | | -typedef uint32_t k_spinlock_key_t; |
18 | | -#include <sof/lib/memory.h> |
19 | | -#include <ipc/trace.h> |
20 | | - |
21 | 16 | #include <stdint.h> |
22 | 17 |
|
23 | | -/* |
24 | | - * Lock debugging provides a simple interface to debug deadlocks. The rmbox |
25 | | - * trace output will show an output :- |
26 | | - * |
27 | | - * 0xd70 [41.306406] delta [0.359638] lock eal |
28 | | - * 0xd80 [41.306409] delta [0.000002] value 0x00000000000001b7 |
29 | | - * 0xd90 [41.306411] delta [0.000002] value 0x0000000000000001 |
30 | | - * 0xda0 [41.306413] delta [0.000002] value 0x0000000001000348 |
31 | | - * |
32 | | - * "eal" indicates we are holding a lock with interrupts OFF. The next value |
33 | | - * is the line number of where the lock was acquired. The second number is the |
34 | | - * number of other locks held whilst this lock is held and the subsequent |
35 | | - * numbers list each lock and the line number of it's holder. e.g. to find |
36 | | - * the locks :- |
37 | | - * |
38 | | - * grep -rn lock --include *.c | grep 840 (search for lock at line 0x348) |
39 | | - * src/drivers/dw-dma.c:840: spinlock_init(&dma->lock); |
40 | | - * |
41 | | - * grep -rn lock --include *.c | grep 439 |
42 | | - * src/lib/alloc.c:439: k_spin_lock_irq(&memmap.lock, flags); |
43 | | - * |
44 | | - * Every lock entry and exit shows LcE and LcX in trace alongside the lock |
45 | | - * line numbers in hex. e.g. |
46 | | - * |
47 | | - * 0xfd60 [11032.730567] delta [0.000004] lock LcE |
48 | | - * 0xfd70 [11032.730569] delta [0.000002] value 0x00000000000000ae |
49 | | - * |
50 | | - * Deadlock can be confirmed in rmbox :- |
51 | | - * |
52 | | - * Debug log: |
53 | | - * debug: 0x0 (00) = 0xdead0007 (-559087609) |....| |
54 | | - * .... |
55 | | - * Error log: |
56 | | - * using 19.20MHz timestamp clock |
57 | | - * 0xc30 [26.247240] delta [26.245851] lock DED |
58 | | - * 0xc40 [26.247242] delta [0.000002] value 0x00000000000002b4 |
59 | | - * 0xc50 [26.247244] delta [0.000002] value 0x0000000000000109 |
60 | | - * |
61 | | - * DED means deadlock has been detected and the DSP is now halted. The first |
62 | | - * value after DEA is the line number where deadlock occurs and the second |
63 | | - * number is the line number where the lock is allocated. These can be grepped |
64 | | - * like above. |
65 | | - */ |
66 | | - |
67 | | -#if CONFIG_DEBUG_LOCKS |
68 | | - |
69 | | -#include <rtos/panic.h> |
70 | | -#include <sof/trace/trace.h> |
71 | | -#include <ipc/trace.h> |
72 | | -#include <user/trace.h> |
| 18 | +struct k_spinlock { |
| 19 | +}; |
73 | 20 |
|
74 | | -#define DBG_LOCK_USERS 8 |
75 | | -#define DBG_LOCK_TRIES 10000 |
76 | | - |
77 | | -extern uint32_t lock_dbg_atomic; |
78 | | -extern uint32_t lock_dbg_user[DBG_LOCK_USERS]; |
79 | | - |
80 | | -extern struct tr_ctx sl_tr; |
81 | | - |
82 | | -/* panic on deadlock */ |
83 | | -#define spin_try_lock_dbg(lock, line) \ |
84 | | - do { \ |
85 | | - int __tries; \ |
86 | | - for (__tries = DBG_LOCK_TRIES; __tries > 0; __tries--) { \ |
87 | | - if (arch_try_lock(lock)) \ |
88 | | - break; /* lock acquired */ \ |
89 | | - } \ |
90 | | - if (__tries == 0) { \ |
91 | | - tr_err_atomic(&sl_tr, "DED"); \ |
92 | | - tr_err_atomic(&sl_tr, "line: %d", line); \ |
93 | | - tr_err_atomic(&sl_tr, "user: %d", (lock)->user); \ |
94 | | - panic(SOF_IPC_PANIC_DEADLOCK); /* lock not acquired */ \ |
95 | | - } \ |
96 | | - } while (0) |
97 | | - |
98 | | -#if CONFIG_DEBUG_LOCKS_VERBOSE |
99 | | -#define spin_lock_log(lock, line) \ |
100 | | - do { \ |
101 | | - if (lock_dbg_atomic) { \ |
102 | | - int __i = 0; \ |
103 | | - int __count = lock_dbg_atomic >= DBG_LOCK_USERS \ |
104 | | - ? DBG_LOCK_USERS : lock_dbg_atomic; \ |
105 | | - tr_err_atomic(&sl_tr, "eal"); \ |
106 | | - tr_err_atomic(&sl_tr, "line: %d", line); \ |
107 | | - tr_err_atomic(&sl_tr, "dbg_atomic: %d", lock_dbg_atomic); \ |
108 | | - for (__i = 0; __i < __count; __i++) { \ |
109 | | - tr_err_atomic(&sl_tr, "value: %d", \ |
110 | | - (lock_dbg_atomic << 24) | \ |
111 | | - lock_dbg_user[__i]); \ |
112 | | - } \ |
113 | | - } \ |
114 | | - } while (0) |
115 | | - |
116 | | -#define spin_lock_dbg(line) \ |
117 | | - do { \ |
118 | | - tr_info(&sl_tr, "LcE"); \ |
119 | | - tr_info(&sl_tr, "line: %d", line); \ |
120 | | - } while (0) |
121 | | - |
122 | | -#define spin_unlock_dbg(line) \ |
123 | | - do { \ |
124 | | - tr_info(&sl_tr, "LcX"); \ |
125 | | - tr_info(&sl_tr, "line: %d", line); \ |
126 | | - } while (0) |
127 | | - |
128 | | -#else /* CONFIG_DEBUG_LOCKS_VERBOSE */ |
129 | | -#define spin_lock_log(lock, line) do {} while (0) |
130 | | -#define spin_lock_dbg(line) do {} while (0) |
131 | | -#define spin_unlock_dbg(line) do {} while (0) |
132 | | -#endif /* CONFIG_DEBUG_LOCKS_VERBOSE */ |
133 | | - |
134 | | -#else /* CONFIG_DEBUG_LOCKS */ |
| 21 | +typedef uint32_t k_spinlock_key_t; |
135 | 22 |
|
136 | 23 | #define trace_lock(__e) do {} while (0) |
137 | 24 | #define tracev_lock(__e) do {} while (0) |
138 | 25 |
|
139 | 26 | #define spin_lock_dbg(line) do {} while (0) |
140 | 27 | #define spin_unlock_dbg(line) do {} while (0) |
141 | 28 |
|
142 | | -#endif /* CONFIG_DEBUG_LOCKS */ |
| 29 | +#define k_spinlock_init(lock) do {} while (0) |
143 | 30 |
|
144 | | -/* all SMP spinlocks need init, nothing todo on UP */ |
145 | | -static inline void _spinlock_init(struct k_spinlock *lock, int line) |
| 31 | +static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l) |
146 | 32 | { |
147 | | - arch_spinlock_init(lock); |
148 | | -#if CONFIG_DEBUG_LOCKS |
149 | | - lock->user = line; |
150 | | -#endif |
| 33 | + return 0; |
151 | 34 | } |
152 | 35 |
|
153 | | -#define k_spinlock_init(lock) _spinlock_init(lock, __LINE__) |
154 | | - |
155 | | -/* disables all IRQ sources and takes lock - enter atomic context */ |
156 | | -k_spinlock_key_t _k_spin_lock_irq(struct k_spinlock *lock); |
157 | | -#define k_spin_lock(lock) _k_spin_lock_irq(lock) |
158 | | - |
159 | | -/* re-enables current IRQ sources and releases lock - leave atomic context */ |
160 | | -void _k_spin_unlock_irq(struct k_spinlock *lock, k_spinlock_key_t key, int line); |
161 | | -#define k_spin_unlock(lock, key) _k_spin_unlock_irq(lock, key, __LINE__) |
| 36 | +static inline void k_spin_unlock(struct k_spinlock *l, |
| 37 | + k_spinlock_key_t key) |
| 38 | +{ |
| 39 | + (void)l; |
| 40 | + (void)key; |
| 41 | +} |
162 | 42 |
|
163 | 43 | #endif /* __POSIX_RTOS_SPINLOCK_H__ */ |
0 commit comments