-
Notifications
You must be signed in to change notification settings - Fork 11
Epoll optimizations #106
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Epoll optimizations #106
Changes from all commits
7442ff4
f76bb7f
e9fb520
d79cd71
520f684
19147ae
c4fff0c
50fd0ef
2a5a998
71376c8
8cd4391
e3e0ac5
4d4e964
8afa09a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -33,6 +33,7 @@ | |||||||||
| #include <atomic> | ||||||||||
| #include <cstddef> | ||||||||||
| #include <memory> | ||||||||||
| #include <mutex> | ||||||||||
| #include <optional> | ||||||||||
| #include <stop_token> | ||||||||||
|
|
||||||||||
|
|
@@ -51,8 +52,8 @@ | |||||||||
|
|
||||||||||
| Persistent Registration | ||||||||||
| ----------------------- | ||||||||||
| File descriptors are registered with epoll once (via descriptor_data) and | ||||||||||
| stay registered until closed. The descriptor_data tracks which operations | ||||||||||
| File descriptors are registered with epoll once (via descriptor_state) and | ||||||||||
| stay registered until closed. The descriptor_state tracks which operations | ||||||||||
| are pending (read_op, write_op, connect_op). When an event arrives, the | ||||||||||
| reactor dispatches to the appropriate pending operation. | ||||||||||
|
|
||||||||||
|
|
@@ -82,42 +83,68 @@ class epoll_socket_impl; | |||||||||
| class epoll_acceptor_impl; | ||||||||||
| struct epoll_op; | ||||||||||
|
|
||||||||||
| // Forward declaration | ||||||||||
| class epoll_scheduler; | ||||||||||
|
|
||||||||||
| /** Per-descriptor state for persistent epoll registration. | ||||||||||
|
|
||||||||||
| Tracks pending operations for a file descriptor. The fd is registered | ||||||||||
| once with epoll and stays registered until closed. Events are dispatched | ||||||||||
| to the appropriate pending operation (EPOLLIN -> read_op, etc.). | ||||||||||
| once with epoll and stays registered until closed. | ||||||||||
|
|
||||||||||
| This struct extends scheduler_op to support deferred I/O processing. | ||||||||||
| When epoll events arrive, the reactor sets ready_events and queues | ||||||||||
| this descriptor for processing. When popped from the scheduler queue, | ||||||||||
| operator() performs the actual I/O and queues completion handlers. | ||||||||||
|
|
||||||||||
| @par Deferred I/O Model | ||||||||||
| The reactor no longer performs I/O directly. Instead: | ||||||||||
| 1. Reactor sets ready_events and queues descriptor_state | ||||||||||
| 2. Scheduler pops descriptor_state and calls operator() | ||||||||||
| 3. operator() performs I/O under mutex and queues completions | ||||||||||
|
|
||||||||||
| With edge-triggered epoll (EPOLLET), atomic operations are required to | ||||||||||
| synchronize between operation registration and reactor event delivery. | ||||||||||
| The read_ready/write_ready flags cache edge events that arrived before | ||||||||||
| an operation was registered. | ||||||||||
| This eliminates per-descriptor mutex locking from the reactor hot path. | ||||||||||
|
|
||||||||||
| @par Thread Safety | ||||||||||
| The mutex protects operation pointers and ready flags during I/O. | ||||||||||
| ready_events_ and is_enqueued_ are atomic for lock-free reactor access. | ||||||||||
| */ | ||||||||||
| struct descriptor_data | ||||||||||
| struct descriptor_state : scheduler_op | ||||||||||
| { | ||||||||||
| /// Currently registered events (EPOLLIN, EPOLLOUT, etc.) | ||||||||||
| std::uint32_t registered_events = 0; | ||||||||||
| std::mutex mutex; | ||||||||||
|
|
||||||||||
| /// Pending read operation (nullptr if none) | ||||||||||
| std::atomic<epoll_op*> read_op{nullptr}; | ||||||||||
| // Protected by mutex | ||||||||||
| epoll_op* read_op = nullptr; | ||||||||||
| epoll_op* write_op = nullptr; | ||||||||||
| epoll_op* connect_op = nullptr; | ||||||||||
|
|
||||||||||
| /// Pending write operation (nullptr if none) | ||||||||||
| std::atomic<epoll_op*> write_op{nullptr}; | ||||||||||
| // Caches edge events that arrived before an op was registered | ||||||||||
| bool read_ready = false; | ||||||||||
| bool write_ready = false; | ||||||||||
|
|
||||||||||
| /// Pending connect operation (nullptr if none) | ||||||||||
| std::atomic<epoll_op*> connect_op{nullptr}; | ||||||||||
| // Set during registration only (no mutex needed) | ||||||||||
| std::uint32_t registered_events = 0; | ||||||||||
| int fd = -1; | ||||||||||
|
|
||||||||||
| /// Cached read readiness (edge event arrived before op registered) | ||||||||||
| std::atomic<bool> read_ready{false}; | ||||||||||
| // For deferred I/O - set by reactor, read by scheduler | ||||||||||
| std::atomic<std::uint32_t> ready_events_{0}; | ||||||||||
| std::atomic<bool> is_enqueued_{false}; | ||||||||||
| epoll_scheduler const* scheduler_ = nullptr; | ||||||||||
|
|
||||||||||
| /// Cached write readiness (edge event arrived before op registered) | ||||||||||
| std::atomic<bool> write_ready{false}; | ||||||||||
| // Prevents impl destruction while this descriptor_state is queued. | ||||||||||
| // Set by close_socket() when is_enqueued_ is true, cleared by operator(). | ||||||||||
| std::shared_ptr<void> impl_ref_; | ||||||||||
|
|
||||||||||
| /// The file descriptor | ||||||||||
| int fd = -1; | ||||||||||
| /// Add ready events atomically. | ||||||||||
| void add_ready_events(std::uint32_t ev) noexcept | ||||||||||
| { | ||||||||||
| ready_events_.fetch_or(ev, std::memory_order_relaxed); | ||||||||||
| } | ||||||||||
|
|
||||||||||
| /// Perform deferred I/O and queue completions. | ||||||||||
| void operator()() override; | ||||||||||
|
|
||||||||||
| /// Whether this descriptor is managed by persistent registration | ||||||||||
| bool is_registered = false; | ||||||||||
| /// Destroy without invoking. | ||||||||||
| void destroy() override {} | ||||||||||
|
Comment on lines
+146
to
+147
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
🐛 Proposed fix /// Destroy without invoking.
- void destroy() override {}
+ void destroy() override { impl_ref_.reset(); }📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||
| }; | ||||||||||
|
|
||||||||||
| struct epoll_op : scheduler_op | ||||||||||
|
|
@@ -206,17 +233,6 @@ struct epoll_op : scheduler_op | |||||||||
| cancelled.store(true, std::memory_order_release); | ||||||||||
| } | ||||||||||
|
|
||||||||||
| void start(std::stop_token token) | ||||||||||
| { | ||||||||||
| cancelled.store(false, std::memory_order_release); | ||||||||||
| stop_cb.reset(); | ||||||||||
| socket_impl_ = nullptr; | ||||||||||
| acceptor_impl_ = nullptr; | ||||||||||
|
|
||||||||||
| if (token.stop_possible()) | ||||||||||
| stop_cb.emplace(token, canceller{this}); | ||||||||||
| } | ||||||||||
|
|
||||||||||
| void start(std::stop_token token, epoll_socket_impl* impl) | ||||||||||
| { | ||||||||||
| cancelled.store(false, std::memory_order_release); | ||||||||||
|
|
||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
🧩 Analysis chain
🏁 Script executed:
rg -n -C5 'ready_events_' --type=cppRepository: cppalliance/corosio
Length of output: 3334
🏁 Script executed:
rg -n -B10 -A10 'ready_events_\.exchange' --type=cppRepository: cppalliance/corosio
Length of output: 1572
Change
fetch_orto usememory_order_releaseto pair with consumer'smemory_order_acquire.The producer-side
fetch_orwithmemory_order_relaxed(line 140) does not provide the release semantics needed to synchronize with the consumer'sexchange(..., memory_order_acquire)(scheduler.cpp:190). Acquire semantics require a corresponding release store to establish a happens-before relationship; relaxed operations provide no such guarantee. This can result in I/O side-effects from the reactor thread not being visible to the scheduler thread when it readsready_events_.Change to
memory_order_releaseon the producer side:🤖 Prompt for AI Agents