Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_PIPE_FS_I_H |
| 3 | #define _LINUX_PIPE_FS_I_H |
| 4 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 5 | #define PIPE_DEF_BUFFERS 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
Jens Axboe | 1432873 | 2006-05-03 10:35:26 +0200 | [diff] [blame] | 7 | #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ |
| 8 | #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ |
| 9 | #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ |
Linus Torvalds | 9883035a | 2012-04-29 13:12:42 -0700 | [diff] [blame] | 10 | #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ |
Christoph Hellwig | f6dd975 | 2020-05-20 17:58:12 +0200 | [diff] [blame] | 11 | #define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */ |
Linus Torvalds | 6c32978 | 2020-06-13 09:56:21 -0700 | [diff] [blame] | 12 | #define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */ |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 13 | #ifdef CONFIG_WATCH_QUEUE |
Linus Torvalds | 6c32978 | 2020-06-13 09:56:21 -0700 | [diff] [blame] | 14 | #define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */ |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 15 | #endif |
Jens Axboe | 3e7ee3e | 2006-04-02 23:11:04 +0200 | [diff] [blame] | 16 | |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 17 | /** |
| 18 | * struct pipe_buffer - a linux kernel pipe buffer |
| 19 | * @page: the page containing the data for the pipe buffer |
| 20 | * @offset: offset of data inside the @page |
| 21 | * @len: length of data inside the @page |
| 22 | * @ops: operations associated with this buffer. See @pipe_buf_operations. |
| 23 | * @flags: pipe buffer flags. See above. |
| 24 | * @private: private data owned by the ops. |
| 25 | **/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | struct pipe_buffer { |
| 27 | struct page *page; |
| 28 | unsigned int offset, len; |
Eric Dumazet | d4c3cca | 2006-12-13 00:34:04 -0800 | [diff] [blame] | 29 | const struct pipe_buf_operations *ops; |
Jens Axboe | 3e7ee3e | 2006-04-02 23:11:04 +0200 | [diff] [blame] | 30 | unsigned int flags; |
Jens Axboe | 497f962 | 2007-06-11 12:00:45 +0200 | [diff] [blame] | 31 | unsigned long private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 34 | /** |
| 35 | * struct pipe_inode_info - a linux kernel pipe |
Al Viro | 72b0d9a | 2013-03-21 02:32:24 -0400 | [diff] [blame] | 36 | * @mutex: mutex protecting the whole thing |
Randy Dunlap | 0bf999f | 2020-02-09 19:36:14 -0800 | [diff] [blame] | 37 | * @rd_wait: reader wait point in case of empty pipe |
| 38 | * @wr_wait: writer wait point in case of full pipe |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 39 | * @head: The point of buffer production |
| 40 | * @tail: The point of buffer consumption |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 41 | * @note_loss: The next read() should insert a data-lost message |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 42 | * @max_usage: The maximum number of slots that may be used in the ring |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 43 | * @ring_size: total number of buffers (should be a power of 2) |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 44 | * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 45 | * @tmp_page: cached released page |
| 46 | * @readers: number of current readers of this pipe |
| 47 | * @writers: number of current writers of this pipe |
Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 48 | * @files: number of struct file referring this pipe (protected by ->i_lock) |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 49 | * @r_counter: reader counter |
| 50 | * @w_counter: writer counter |
Linus Torvalds | 3b84482 | 2021-08-05 10:04:43 -0700 | [diff] [blame] | 51 | * @poll_usage: is this pipe used for epoll, which has crazy wakeups? |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 52 | * @fasync_readers: reader side fasync |
| 53 | * @fasync_writers: writer side fasync |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 54 | * @bufs: the circular array of pipe buffers |
Willy Tarreau | 759c011 | 2016-01-18 16:36:09 +0100 | [diff] [blame] | 55 | * @user: the user who created this pipe |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 56 | * @watch_queue: If this pipe is a watch_queue, this is the stuff for that |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 57 | **/ |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 58 | struct pipe_inode_info { |
Al Viro | 72b0d9a | 2013-03-21 02:32:24 -0400 | [diff] [blame] | 59 | struct mutex mutex; |
Linus Torvalds | 0ddad21 | 2019-12-09 09:48:27 -0800 | [diff] [blame] | 60 | wait_queue_head_t rd_wait, wr_wait; |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 61 | unsigned int head; |
| 62 | unsigned int tail; |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 63 | unsigned int max_usage; |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 64 | unsigned int ring_size; |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 65 | #ifdef CONFIG_WATCH_QUEUE |
| 66 | bool note_loss; |
| 67 | #endif |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 68 | unsigned int nr_accounted; |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 69 | unsigned int readers; |
| 70 | unsigned int writers; |
Al Viro | ba5bb14 | 2013-03-21 02:21:19 -0400 | [diff] [blame] | 71 | unsigned int files; |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 72 | unsigned int r_counter; |
| 73 | unsigned int w_counter; |
Linus Torvalds | 3b84482 | 2021-08-05 10:04:43 -0700 | [diff] [blame] | 74 | unsigned int poll_usage; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 75 | struct page *tmp_page; |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 76 | struct fasync_struct *fasync_readers; |
| 77 | struct fasync_struct *fasync_writers; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 78 | struct pipe_buffer *bufs; |
Willy Tarreau | 759c011 | 2016-01-18 16:36:09 +0100 | [diff] [blame] | 79 | struct user_struct *user; |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 80 | #ifdef CONFIG_WATCH_QUEUE |
| 81 | struct watch_queue *watch_queue; |
| 82 | #endif |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 83 | }; |
| 84 | |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 85 | /* |
| 86 | * Note on the nesting of these functions: |
| 87 | * |
Jens Axboe | cac36bb0 | 2007-06-14 13:10:48 +0200 | [diff] [blame] | 88 | * ->confirm() |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 89 | * ->try_steal() |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 90 | * |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 91 | * That is, ->try_steal() must be called on a confirmed buffer. See below for |
| 92 | * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the |
| 93 | * pipe and generic variants of these hooks. |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 94 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | struct pipe_buf_operations { |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 96 | /* |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 97 | * ->confirm() verifies that the data in the pipe buffer is there |
| 98 | * and that the contents are good. If the pages in the pipe belong |
| 99 | * to a file system, we may need to wait for IO completion in this |
| 100 | * hook. Returns 0 for good, or a negative error value in case of |
Christoph Hellwig | b8d9e7f | 2020-05-20 17:58:15 +0200 | [diff] [blame] | 101 | * error. If not present all pages are considered good. |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 102 | */ |
Jens Axboe | cac36bb0 | 2007-06-14 13:10:48 +0200 | [diff] [blame] | 103 | int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * When the contents of this pipe buffer has been completely |
| 107 | * consumed by a reader, ->release() is called. |
| 108 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | void (*release)(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 110 | |
| 111 | /* |
| 112 | * Attempt to take ownership of the pipe buffer and its contents. |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 113 | * ->try_steal() returns %true for success, in which case the contents |
| 114 | * of the pipe (the buf->page) is locked and now completely owned by the |
| 115 | * caller. The page may then be transferred to a different mapping, the |
| 116 | * most often used case is insertion into different file address space |
| 117 | * cache. |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 118 | */ |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 119 | bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 120 | |
| 121 | /* |
| 122 | * Get a reference to the pipe buffer. |
| 123 | */ |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 124 | bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | }; |
| 126 | |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 127 | /** |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 128 | * pipe_empty - Return true if the pipe is empty |
| 129 | * @head: The pipe ring head pointer |
| 130 | * @tail: The pipe ring tail pointer |
| 131 | */ |
| 132 | static inline bool pipe_empty(unsigned int head, unsigned int tail) |
| 133 | { |
| 134 | return head == tail; |
| 135 | } |
| 136 | |
| 137 | /** |
| 138 | * pipe_occupancy - Return number of slots used in the pipe |
| 139 | * @head: The pipe ring head pointer |
| 140 | * @tail: The pipe ring tail pointer |
| 141 | */ |
| 142 | static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) |
| 143 | { |
| 144 | return head - tail; |
| 145 | } |
| 146 | |
| 147 | /** |
| 148 | * pipe_full - Return true if the pipe is full |
| 149 | * @head: The pipe ring head pointer |
| 150 | * @tail: The pipe ring tail pointer |
| 151 | * @limit: The maximum amount of slots available. |
| 152 | */ |
| 153 | static inline bool pipe_full(unsigned int head, unsigned int tail, |
| 154 | unsigned int limit) |
| 155 | { |
| 156 | return pipe_occupancy(head, tail) >= limit; |
| 157 | } |
| 158 | |
| 159 | /** |
| 160 | * pipe_space_for_user - Return number of slots available to userspace |
| 161 | * @head: The pipe ring head pointer |
| 162 | * @tail: The pipe ring tail pointer |
| 163 | * @pipe: The pipe info structure |
| 164 | */ |
| 165 | static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail, |
| 166 | struct pipe_inode_info *pipe) |
| 167 | { |
| 168 | unsigned int p_occupancy, p_space; |
| 169 | |
| 170 | p_occupancy = pipe_occupancy(head, tail); |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 171 | if (p_occupancy >= pipe->max_usage) |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 172 | return 0; |
| 173 | p_space = pipe->ring_size - p_occupancy; |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 174 | if (p_space > pipe->max_usage) |
| 175 | p_space = pipe->max_usage; |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 176 | return p_space; |
| 177 | } |
| 178 | |
| 179 | /** |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 180 | * pipe_buf_get - get a reference to a pipe_buffer |
| 181 | * @pipe: the pipe that the buffer belongs to |
| 182 | * @buf: the buffer to get a reference to |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 183 | * |
| 184 | * Return: %true if the reference was successfully obtained. |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 185 | */ |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 186 | static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 187 | struct pipe_buffer *buf) |
| 188 | { |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 189 | return buf->ops->get(pipe, buf); |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 190 | } |
| 191 | |
Miklos Szeredi | a779638 | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 192 | /** |
| 193 | * pipe_buf_release - put a reference to a pipe_buffer |
| 194 | * @pipe: the pipe that the buffer belongs to |
| 195 | * @buf: the buffer to put a reference to |
| 196 | */ |
| 197 | static inline void pipe_buf_release(struct pipe_inode_info *pipe, |
| 198 | struct pipe_buffer *buf) |
| 199 | { |
| 200 | const struct pipe_buf_operations *ops = buf->ops; |
| 201 | |
| 202 | buf->ops = NULL; |
| 203 | ops->release(pipe, buf); |
| 204 | } |
| 205 | |
Miklos Szeredi | fba597d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 206 | /** |
| 207 | * pipe_buf_confirm - verify contents of the pipe buffer |
| 208 | * @pipe: the pipe that the buffer belongs to |
| 209 | * @buf: the buffer to confirm |
| 210 | */ |
| 211 | static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, |
| 212 | struct pipe_buffer *buf) |
| 213 | { |
Christoph Hellwig | b8d9e7f | 2020-05-20 17:58:15 +0200 | [diff] [blame] | 214 | if (!buf->ops->confirm) |
| 215 | return 0; |
Miklos Szeredi | fba597d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 216 | return buf->ops->confirm(pipe, buf); |
| 217 | } |
| 218 | |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 219 | /** |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 220 | * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 221 | * @pipe: the pipe that the buffer belongs to |
| 222 | * @buf: the buffer to attempt to steal |
| 223 | */ |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 224 | static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, |
| 225 | struct pipe_buffer *buf) |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 226 | { |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 227 | if (!buf->ops->try_steal) |
| 228 | return false; |
| 229 | return buf->ops->try_steal(pipe, buf); |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 230 | } |
| 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual |
| 233 | memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ |
| 234 | #define PIPE_SIZE PAGE_SIZE |
| 235 | |
Miklos Szeredi | 61e0d47 | 2009-04-14 19:48:41 +0200 | [diff] [blame] | 236 | /* Pipe lock and unlock operations */ |
| 237 | void pipe_lock(struct pipe_inode_info *); |
| 238 | void pipe_unlock(struct pipe_inode_info *); |
| 239 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); |
| 240 | |
Linus Torvalds | 472e5b0 | 2020-10-01 19:14:36 -0700 | [diff] [blame] | 241 | /* Wait for a pipe to be readable/writable while dropping the pipe lock */ |
| 242 | void pipe_wait_readable(struct pipe_inode_info *); |
| 243 | void pipe_wait_writable(struct pipe_inode_info *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | |
Al Viro | 7bee130 | 2013-03-21 11:04:15 -0400 | [diff] [blame] | 245 | struct pipe_inode_info *alloc_pipe_info(void); |
Al Viro | 4b8a8f1 | 2013-03-21 11:06:46 -0400 | [diff] [blame] | 246 | void free_pipe_info(struct pipe_inode_info *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 248 | /* Generic pipe buffer ops functions */ |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 249 | bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 250 | bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *); |
Miklos Szeredi | 6818173b | 2009-05-07 15:37:36 +0200 | [diff] [blame] | 251 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 252 | |
Miklos Szeredi | 28a625c | 2014-01-22 19:36:57 +0100 | [diff] [blame] | 253 | extern const struct pipe_buf_operations nosteal_pipe_buf_ops; |
| 254 | |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 255 | #ifdef CONFIG_WATCH_QUEUE |
| 256 | unsigned long account_pipe_buffers(struct user_struct *user, |
| 257 | unsigned long old, unsigned long new); |
| 258 | bool too_many_pipe_buffers_soft(unsigned long user_bufs); |
| 259 | bool too_many_pipe_buffers_hard(unsigned long user_bufs); |
| 260 | bool pipe_is_unprivileged_user(void); |
| 261 | #endif |
| 262 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 263 | /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 264 | #ifdef CONFIG_WATCH_QUEUE |
| 265 | int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); |
| 266 | #endif |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 267 | long pipe_fcntl(struct file *, unsigned int, unsigned long arg); |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 268 | struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); |
Linus Torvalds | c66fb34 | 2010-11-28 14:09:57 -0800 | [diff] [blame] | 269 | |
Al Viro | e4fad8e | 2012-07-21 15:33:25 +0400 | [diff] [blame] | 270 | int create_pipe_files(struct file **, int); |
Eric Biggers | 96e99be40 | 2018-02-06 15:42:00 -0800 | [diff] [blame] | 271 | unsigned int round_pipe_size(unsigned long size); |
Al Viro | e4fad8e | 2012-07-21 15:33:25 +0400 | [diff] [blame] | 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | #endif |