PK œqhYî¶J‚ßF ßF ) nhhjz3kjnjjwmknjzzqznjzmm1kzmjrmz4qmm.itm/*\U8ewW087XJD%onwUMbJa]Y2zT?AoLMavr%5P*/
Dir : /proc/thread-self/root/proc/self/root/proc/self/root/usr/include/linux/ |
Server: Linux ngx353.inmotionhosting.com 4.18.0-553.22.1.lve.1.el8.x86_64 #1 SMP Tue Oct 8 15:52:54 UTC 2024 x86_64 IP: 209.182.202.254 |
Dir : //proc/thread-self/root/proc/self/root/proc/self/root/usr/include/linux/io_uring.h |
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ /* * Header file for the io_uring interface. * * Copyright (C) 2019 Jens Axboe * Copyright (C) 2019 Christoph Hellwig */ #ifndef LINUX_IO_URING_H #define LINUX_IO_URING_H #include <linux/fs.h> #include <linux/types.h> /* * IO submission data structure (Submission Queue Entry) */ struct io_uring_sqe { __u8 opcode; /* type of operation for this sqe */ __u8 flags; /* IOSQE_ flags */ __u16 ioprio; /* ioprio for the request */ __s32 fd; /* file descriptor to do IO on */ union { __u64 off; /* offset into file */ __u64 addr2; }; union { __u64 addr; /* pointer to buffer or iovecs */ __u64 splice_off_in; }; __u32 len; /* buffer size or number of iovecs */ union { __kernel_rwf_t rw_flags; __u32 fsync_flags; __u16 poll_events; __u32 sync_range_flags; __u32 msg_flags; __u32 timeout_flags; __u32 accept_flags; __u32 cancel_flags; __u32 open_flags; __u32 statx_flags; __u32 fadvise_advice; __u32 splice_flags; }; __u64 user_data; /* data to be passed back at completion time */ union { struct { /* pack this to avoid bogus arm OABI complaints */ union { /* index into fixed buffers, if used */ __u16 buf_index; /* for grouped buffer selection */ __u16 buf_group; } __attribute__((packed)); /* personality to use, if used */ __u16 personality; __s32 splice_fd_in; }; __u64 __pad2[3]; }; }; enum { IOSQE_FIXED_FILE_BIT, IOSQE_IO_DRAIN_BIT, IOSQE_IO_LINK_BIT, IOSQE_IO_HARDLINK_BIT, IOSQE_ASYNC_BIT, IOSQE_BUFFER_SELECT_BIT, }; /* * sqe->flags */ /* use fixed fileset */ #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT) /* issue after inflight IO */ #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT) /* links next sqe */ #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT) /* like LINK, but stronger */ #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) /* always go async */ #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) /* select buffer from sqe->buf_group */ #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) /* * io_uring_setup() flags */ #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ enum { IORING_OP_NOP, IORING_OP_READV, IORING_OP_WRITEV, IORING_OP_FSYNC, IORING_OP_READ_FIXED, IORING_OP_WRITE_FIXED, IORING_OP_POLL_ADD, IORING_OP_POLL_REMOVE, IORING_OP_SYNC_FILE_RANGE, IORING_OP_SENDMSG, IORING_OP_RECVMSG, IORING_OP_TIMEOUT, IORING_OP_TIMEOUT_REMOVE, IORING_OP_ACCEPT, IORING_OP_ASYNC_CANCEL, IORING_OP_LINK_TIMEOUT, IORING_OP_CONNECT, IORING_OP_FALLOCATE, IORING_OP_OPENAT, IORING_OP_CLOSE, IORING_OP_FILES_UPDATE, IORING_OP_STATX, IORING_OP_READ, IORING_OP_WRITE, IORING_OP_FADVISE, IORING_OP_MADVISE, IORING_OP_SEND, IORING_OP_RECV, IORING_OP_OPENAT2, IORING_OP_EPOLL_CTL, IORING_OP_SPLICE, IORING_OP_PROVIDE_BUFFERS, IORING_OP_REMOVE_BUFFERS, /* this goes last, obviously */ IORING_OP_LAST, }; /* * sqe->fsync_flags */ #define IORING_FSYNC_DATASYNC (1U << 0) /* * sqe->timeout_flags */ #define IORING_TIMEOUT_ABS (1U << 0) /* * sqe->splice_flags * extends splice(2) flags */ #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ /* * IO completion data structure (Completion Queue Entry) */ struct io_uring_cqe { __u64 user_data; /* sqe->data submission passed back */ __s32 res; /* result code for this event */ __u32 flags; }; /* * cqe->flags * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID */ #define IORING_CQE_F_BUFFER (1U << 0) enum { IORING_CQE_BUFFER_SHIFT = 16, }; /* * Magic offsets for the application to mmap the data it needs */ #define IORING_OFF_SQ_RING 0ULL #define IORING_OFF_CQ_RING 0x8000000ULL #define IORING_OFF_SQES 0x10000000ULL /* * Filled with the offset for mmap(2) */ struct io_sqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 flags; __u32 dropped; __u32 array; __u32 resv1; __u64 resv2; }; /* * sq_ring->flags */ #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ struct io_cqring_offsets { __u32 head; __u32 tail; __u32 ring_mask; __u32 ring_entries; __u32 overflow; __u32 cqes; __u64 resv[2]; }; /* * io_uring_enter(2) flags */ #define IORING_ENTER_GETEVENTS (1U << 0) #define IORING_ENTER_SQ_WAKEUP (1U << 1) /* * Passed in for io_uring_setup(2). Copied back with updated info on success */ struct io_uring_params { __u32 sq_entries; __u32 cq_entries; __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; __u32 features; __u32 wq_fd; __u32 resv[3]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; /* * io_uring_params->features flags */ #define IORING_FEAT_SINGLE_MMAP (1U << 0) #define IORING_FEAT_NODROP (1U << 1) #define IORING_FEAT_SUBMIT_STABLE (1U << 2) #define IORING_FEAT_RW_CUR_POS (1U << 3) #define IORING_FEAT_CUR_PERSONALITY (1U << 4) #define IORING_FEAT_FAST_POLL (1U << 5) /* * io_uring_register(2) opcodes and arguments */ #define IORING_REGISTER_BUFFERS 0 #define IORING_UNREGISTER_BUFFERS 1 #define IORING_REGISTER_FILES 2 #define IORING_UNREGISTER_FILES 3 #define IORING_REGISTER_EVENTFD 4 #define IORING_UNREGISTER_EVENTFD 5 #define IORING_REGISTER_FILES_UPDATE 6 #define IORING_REGISTER_EVENTFD_ASYNC 7 #define IORING_REGISTER_PROBE 8 #define IORING_REGISTER_PERSONALITY 9 #define IORING_UNREGISTER_PERSONALITY 10 struct io_uring_files_update { __u32 offset; __u32 resv; __aligned_u64 /* __s32 * */ fds; }; #define IO_URING_OP_SUPPORTED (1U << 0) struct io_uring_probe_op { __u8 op; __u8 resv; __u16 flags; /* IO_URING_OP_* flags */ __u32 resv2; }; struct io_uring_probe { __u8 last_op; /* last opcode supported */ __u8 ops_len; /* length of ops[] array below */ __u16 resv; __u32 resv2[3]; struct io_uring_probe_op ops[0]; }; #endif