let data = file.read().await;
let data = file.read().await;
let data = file.read().await;
// Manually invoke the io_uring_setup syscall
let ring_fd = unsafe { libc::syscall( libc::SYS_io_uring_setup, QUEUE_DEPTH as libc::c_long, ¶ms as *const _ as libc::c_long, )
} as i32; // Map the Submission Queue into our address space
let sq_ptr = unsafe { libc::mmap( std::ptr::null_mut(), sq_size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED | libc::MAP_POPULATE, ring_fd, libc::IORING_OFF_SQ_RING as libc::off_t, )
};
// Manually invoke the io_uring_setup syscall
let ring_fd = unsafe { libc::syscall( libc::SYS_io_uring_setup, QUEUE_DEPTH as libc::c_long, ¶ms as *const _ as libc::c_long, )
} as i32; // Map the Submission Queue into our address space
let sq_ptr = unsafe { libc::mmap( std::ptr::null_mut(), sq_size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED | libc::MAP_POPULATE, ring_fd, libc::IORING_OFF_SQ_RING as libc::off_t, )
};
// Manually invoke the io_uring_setup syscall
let ring_fd = unsafe { libc::syscall( libc::SYS_io_uring_setup, QUEUE_DEPTH as libc::c_long, ¶ms as *const _ as libc::c_long, )
} as i32; // Map the Submission Queue into our address space
let sq_ptr = unsafe { libc::mmap( std::ptr::null_mut(), sq_size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED | libc::MAP_POPULATE, ring_fd, libc::IORING_OFF_SQ_RING as libc::off_t, )
};
impl Future for Op { type Output = i32; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // If this is the first poll, submit the SQE to the ring if !self.submitted { RING.with(|ring| { let mut ring = ring.borrow_mut(); // Write the Submission Queue Entry to the shared kernel buffer ring.push_sqe(self.sqe); }); // Store the Waker in a global map, keyed by our unique operation ID // The executor will retrieve this when the kernel signals completion WAKER_MAP.with(|map| { map.borrow_mut().insert(self.user_data, cx.waker().clone()); }); self.submitted = true; return Poll::Pending; // Go away, we'll call you when the kernel is done } // Check if our Completion Queue Entry has arrived match self.result.take() { Some(res) => Poll::Ready(res), None => { // Update the waker and keep waiting WAKER_MAP.with(|map| { map.borrow_mut().insert(self.user_data, cx.waker().clone()); }); Poll::Pending } } }
}
impl Future for Op { type Output = i32; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // If this is the first poll, submit the SQE to the ring if !self.submitted { RING.with(|ring| { let mut ring = ring.borrow_mut(); // Write the Submission Queue Entry to the shared kernel buffer ring.push_sqe(self.sqe); }); // Store the Waker in a global map, keyed by our unique operation ID // The executor will retrieve this when the kernel signals completion WAKER_MAP.with(|map| { map.borrow_mut().insert(self.user_data, cx.waker().clone()); }); self.submitted = true; return Poll::Pending; // Go away, we'll call you when the kernel is done } // Check if our Completion Queue Entry has arrived match self.result.take() { Some(res) => Poll::Ready(res), None => { // Update the waker and keep waiting WAKER_MAP.with(|map| { map.borrow_mut().insert(self.user_data, cx.waker().clone()); }); Poll::Pending } } }
}
impl Future for Op { type Output = i32; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { // If this is the first poll, submit the SQE to the ring if !self.submitted { RING.with(|ring| { let mut ring = ring.borrow_mut(); // Write the Submission Queue Entry to the shared kernel buffer ring.push_sqe(self.sqe); }); // Store the Waker in a global map, keyed by our unique operation ID // The executor will retrieve this when the kernel signals completion WAKER_MAP.with(|map| { map.borrow_mut().insert(self.user_data, cx.waker().clone()); }); self.submitted = true; return Poll::Pending; // Go away, we'll call you when the kernel is done } // Check if our Completion Queue Entry has arrived match self.result.take() { Some(res) => Poll::Ready(res), None => { // Update the waker and keep waiting WAKER_MAP.with(|map| { map.borrow_mut().insert(self.user_data, cx.waker().clone()); }); Poll::Pending } } }
}
pub fn run(&mut self) { loop { // Step 1: Poll all tasks that have been woken up while let Some(task) = self.ready_queue.pop_front() { let waker = task.waker(); let mut cx = Context::from_waker(&waker); match task.future.borrow_mut().as_mut().poll(&mut cx) { Poll::Ready(_) => { /* Task complete, drop it */ } Poll::Pending => { /* Task is waiting on I/O, leave it */ } } } // Step 2: Submit pending SQEs and harvest completed CQEs // min_complete=1 means: block until at least one operation finishes // This puts the thread to sleep until the kernel has work for us let completed = self.ring.submit_and_wait(1); // Step 3: For each completed operation, wake the waiting task for cqe in completed { WAKER_MAP.with(|map| { if let Some(waker) = map.borrow_mut().remove(&cqe.user_data) { // Store the result, then wake the future store_result(cqe.user_data, cqe.res); waker.wake(); } }); } if self.all_tasks_complete() { break; } }
}
pub fn run(&mut self) { loop { // Step 1: Poll all tasks that have been woken up while let Some(task) = self.ready_queue.pop_front() { let waker = task.waker(); let mut cx = Context::from_waker(&waker); match task.future.borrow_mut().as_mut().poll(&mut cx) { Poll::Ready(_) => { /* Task complete, drop it */ } Poll::Pending => { /* Task is waiting on I/O, leave it */ } } } // Step 2: Submit pending SQEs and harvest completed CQEs // min_complete=1 means: block until at least one operation finishes // This puts the thread to sleep until the kernel has work for us let completed = self.ring.submit_and_wait(1); // Step 3: For each completed operation, wake the waiting task for cqe in completed { WAKER_MAP.with(|map| { if let Some(waker) = map.borrow_mut().remove(&cqe.user_data) { // Store the result, then wake the future store_result(cqe.user_data, cqe.res); waker.wake(); } }); } if self.all_tasks_complete() { break; } }
}
pub fn run(&mut self) { loop { // Step 1: Poll all tasks that have been woken up while let Some(task) = self.ready_queue.pop_front() { let waker = task.waker(); let mut cx = Context::from_waker(&waker); match task.future.borrow_mut().as_mut().poll(&mut cx) { Poll::Ready(_) => { /* Task complete, drop it */ } Poll::Pending => { /* Task is waiting on I/O, leave it */ } } } // Step 2: Submit pending SQEs and harvest completed CQEs // min_complete=1 means: block until at least one operation finishes // This puts the thread to sleep until the kernel has work for us let completed = self.ring.submit_and_wait(1); // Step 3: For each completed operation, wake the waiting task for cqe in completed { WAKER_MAP.with(|map| { if let Some(waker) = map.borrow_mut().remove(&cqe.user_data) { // Store the result, then wake the future store_result(cqe.user_data, cqe.res); waker.wake(); } }); } if self.all_tasks_complete() { break; } }
}
impl TcpStream { pub async fn read(&self, buf: &mut [u8]) -> io::Result<usize> { // This creates an Op future that submits IORING_OP_READ // and suspends until the kernel completes it let result = Op::read(self.fd, buf).await; if result < 0 { Err(io::Error::from_raw_os_error(-result)) } else { Ok(result as usize) } }
}
impl TcpStream { pub async fn read(&self, buf: &mut [u8]) -> io::Result<usize> { // This creates an Op future that submits IORING_OP_READ // and suspends until the kernel completes it let result = Op::read(self.fd, buf).await; if result < 0 { Err(io::Error::from_raw_os_error(-result)) } else { Ok(result as usize) } }
}
impl TcpStream { pub async fn read(&self, buf: &mut [u8]) -> io::Result<usize> { // This creates an Op future that submits IORING_OP_READ // and suspends until the kernel completes it let result = Op::read(self.fd, buf).await; if result < 0 { Err(io::Error::from_raw_os_error(-result)) } else { Ok(result as usize) } }
}
cargo run --example echo # Chained Accept → Read → Write
cargo run --example cat -- <file> # File I/O in isolation
cargo run --example timer # Task parking and waking without I/O
cargo run --example echo # Chained Accept → Read → Write
cargo run --example cat -- <file> # File I/O in isolation
cargo run --example timer # Task parking and waking without I/O
cargo run --example echo # Chained Accept → Read → Write
cargo run --example cat -- <file> # File I/O in isolation
cargo run --example timer # Task parking and waking without I/O
cargo run --example concurrent_downloads # 100 SQEs submitted simultaneously
cargo run --example timeout_race # Operation cancellation via IORING_OP_ASYNC_CANCEL
cargo run --example concurrent_downloads # 100 SQEs submitted simultaneously
cargo run --example timeout_race # Operation cancellation via IORING_OP_ASYNC_CANCEL
cargo run --example concurrent_downloads # 100 SQEs submitted simultaneously
cargo run --example timeout_race # Operation cancellation via IORING_OP_ASYNC_CANCEL
cargo run --example http_server # High-concurrency "Hello World"
cargo run --example file_server # Serving static files over TCP
cargo run --example http_server # High-concurrency "Hello World"
cargo run --example file_server # Serving static files over TCP
cargo run --example http_server # High-concurrency "Hello World"
cargo run --example file_server # Serving static files over TCP
sudo cargo run --example sqpoll # Kernel-side SQ polling (needs CAP_SYS_ADMIN)
cargo run --example linked_cat -- <file> # Chained Read + Write at kernel level
cargo run --example multishot_accept # One SQE → infinite connection CQEs
sudo cargo run --example sqpoll # Kernel-side SQ polling (needs CAP_SYS_ADMIN)
cargo run --example linked_cat -- <file> # Chained Read + Write at kernel level
cargo run --example multishot_accept # One SQE → infinite connection CQEs
sudo cargo run --example sqpoll # Kernel-side SQ polling (needs CAP_SYS_ADMIN)
cargo run --example linked_cat -- <file> # Chained Read + Write at kernel level
cargo run --example multishot_accept # One SQE → infinite connection CQEs
[dependencies]
ringcore = "0.1.0"
[dependencies]
ringcore = "0.1.0"
[dependencies]
ringcore = "0.1.0" - Submission Queue (SQ): You write your I/O requests here.
- Completion Queue (CQ): The kernel writes results back here. - Linux 5.10+ for stable IORING_OP_ACCEPT support
- x86_64 architecture
- Dependencies: libc and std only - GitHub: github.com/sumant1122/ringcore
- Crates.io: crates.io/crates/ringcore
- Docs: docs.rs/ringcore
- Companion project (container engine): github.com/sumant1122/Nucleus