blob: b8c2e1bef1f17925249c152f7f4a64ade5cfc72b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Kent Overstreetcafe5632013-03-23 16:11:31 -07002#ifndef _BCACHE_H
3#define _BCACHE_H
4
5/*
6 * SOME HIGH LEVEL CODE DOCUMENTATION:
7 *
8 * Bcache mostly works with cache sets, cache devices, and backing devices.
9 *
10 * Support for multiple cache devices hasn't quite been finished off yet, but
11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
12 * like a md raid array and its component devices. Most of the code doesn't care
13 * about individual cache devices, the main abstraction is the cache set.
14 *
15 * Multiple cache devices is intended to give us the ability to mirror dirty
16 * cached data and metadata, without mirroring clean cached data.
17 *
18 * Backing devices are different, in that they have a lifetime independent of a
19 * cache set. When you register a newly formatted backing device it'll come up
20 * in passthrough mode, and then you can attach and detach a backing device from
21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
22 * invalidates any cached data for that backing device.
23 *
24 * A cache set can have multiple (many) backing devices attached to it.
25 *
26 * There's also flash only volumes - this is the reason for the distinction
27 * between struct cached_dev and struct bcache_device. A flash only volume
28 * works much like a bcache device that has a backing device, except the
29 * "cached" data is always dirty. The end result is that we get thin
30 * provisioning with very little additional code.
31 *
32 * Flash only volumes work but they're not production ready because the moving
33 * garbage collector needs more work. More on that later.
34 *
35 * BUCKETS/ALLOCATION:
36 *
37 * Bcache is primarily designed for caching, which means that in normal
38 * operation all of our available space will be allocated. Thus, we need an
39 * efficient way of deleting things from the cache so we can write new things to
40 * it.
41 *
42 * To do this, we first divide the cache device up into buckets. A bucket is the
43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
44 * works efficiently.
45 *
46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
47 * it. The gens and priorities for all the buckets are stored contiguously and
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
49 * of bcache's metadata is stored in buckets).
50 *
51 * The priority is used to implement an LRU. We reset a bucket's priority when
52 * we allocate it or on cache it, and every so often we decrement the priority
53 * of each bucket. It could be used to implement something more sophisticated,
54 * if anyone ever gets around to it.
55 *
56 * The generation is used for invalidating buckets. Each pointer also has an 8
57 * bit generation embedded in it; for a pointer to be considered valid, its gen
58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
59 * we have to do is increment its gen (and write its new gen to disk; we batch
60 * this up).
61 *
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
63 * contain metadata (including btree nodes).
64 *
65 * THE BTREE:
66 *
67 * Bcache is in large part design around the btree.
68 *
69 * At a high level, the btree is just an index of key -> ptr tuples.
70 *
71 * Keys represent extents, and thus have a size field. Keys also have a variable
72 * number of pointers attached to them (potentially zero, which is handy for
73 * invalidating the cache).
74 *
75 * The key itself is an inode:offset pair. The inode number corresponds to a
76 * backing device or a flash only volume. The offset is the ending offset of the
77 * extent within the inode - not the starting offset; this makes lookups
78 * slightly more convenient.
79 *
80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
81 * generation number. More on the gen later.
82 *
83 * Index lookups are not fully abstracted - cache lookups in particular are
84 * still somewhat mixed in with the btree code, but things are headed in that
85 * direction.
86 *
87 * Updates are fairly well abstracted, though. There are two different ways of
88 * updating the btree; insert and replace.
89 *
90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
91 * overwriting (possibly only partially) any extents they overlap with. This is
92 * used to update the index after a write.
93 *
94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
95 * overwriting a key that matches another given key. This is used for inserting
96 * data into the cache after a cache miss, and for background writeback, and for
97 * the moving garbage collector.
98 *
99 * There is no "delete" operation; deleting things from the index is
100 * accomplished by either by invalidating pointers (by incrementing a bucket's
101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
102 * previously present at that location in the index.
103 *
104 * This means that there are always stale/invalid keys in the btree. They're
105 * filtered out by the code that iterates through a btree node, and removed when
106 * a btree node is rewritten.
107 *
108 * BTREE NODES:
109 *
110 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
111 * free smaller than a bucket - so, that's how big our btree nodes are.
112 *
113 * (If buckets are really big we'll only use part of the bucket for a btree node
114 * - no less than 1/4th - but a bucket still contains no more than a single
115 * btree node. I'd actually like to change this, but for now we rely on the
116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117 *
118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119 * btree implementation.
120 *
121 * The way this is solved is that btree nodes are internally log structured; we
122 * can append new keys to an existing btree node without rewriting it. This
123 * means each set of keys we write is sorted, but the node is not.
124 *
125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126 * be expensive, and we have to distinguish between the keys we have written and
127 * the keys we haven't. So to do a lookup in a btree node, we have to search
128 * each sorted set. But we do merge written sets together lazily, so the cost of
129 * these extra searches is quite low (normally most of the keys in a btree node
130 * will be in one big set, and then there'll be one or two sets that are much
131 * smaller).
132 *
133 * This log structure makes bcache's btree more of a hybrid between a
134 * conventional btree and a compacting data structure, with some of the
135 * advantages of both.
136 *
137 * GARBAGE COLLECTION:
138 *
139 * We can't just invalidate any bucket - it might contain dirty data or
140 * metadata. If it once contained dirty data, other writes might overwrite it
141 * later, leaving no valid pointers into that bucket in the index.
142 *
143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144 * It also counts how much valid data it each bucket currently contains, so that
145 * allocation can reuse buckets sooner when they've been mostly overwritten.
146 *
147 * It also does some things that are really internal to the btree
148 * implementation. If a btree node contains pointers that are stale by more than
149 * some threshold, it rewrites the btree node to avoid the bucket's generation
150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151 *
152 * THE JOURNAL:
153 *
154 * Bcache's journal is not necessary for consistency; we always strictly
155 * order metadata writes so that the btree and everything else is consistent on
156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
157 * caching (with recovery from unclean shutdown) before journalling was
158 * implemented.
159 *
160 * Rather, the journal is purely a performance optimization; we can't complete a
161 * write until we've updated the index on disk, otherwise the cache would be
162 * inconsistent in the event of an unclean shutdown. This means that without the
163 * journal, on random write workloads we constantly have to update all the leaf
164 * nodes in the btree, and those writes will be mostly empty (appending at most
165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
166 * and it puts more strain on the various btree resorting/compacting code.
167 *
168 * The journal is just a log of keys we've inserted; on startup we just reinsert
169 * all the keys in the open journal entries. That means that when we're updating
170 * a node in the btree, we can wait until a 4k block of keys fills up before
171 * writing them out.
172 *
173 * For simplicity, we only journal updates to leaf nodes; updates to parent
174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175 * the complexity to deal with journalling them (in particular, journal replay)
176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
177 */
178
179#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
180
Kent Overstreet81ab4192013-10-31 15:46:42 -0700181#include <linux/bcache.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -0700182#include <linux/bio.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -0700183#include <linux/kobject.h>
184#include <linux/list.h>
185#include <linux/mutex.h>
186#include <linux/rbtree.h>
187#include <linux/rwsem.h>
Elena Reshetova3b304d22017-10-30 14:46:32 -0700188#include <linux/refcount.h>
Kent Overstreetcafe5632013-03-23 16:11:31 -0700189#include <linux/types.h>
190#include <linux/workqueue.h>
191
Kent Overstreet67539e82013-09-10 22:53:34 -0700192#include "bset.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -0700193#include "util.h"
194#include "closure.h"
195
196struct bucket {
197 atomic_t pin;
198 uint16_t prio;
199 uint8_t gen;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700200 uint8_t last_gc; /* Most out of date gen in the btree */
Nicholas Swenson981aa8c2013-11-07 17:53:19 -0800201 uint16_t gc_mark; /* Bitfield used by GC. See below for field */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202};
203
204/*
205 * I'd use bitfields for these, but I don't trust the compiler not to screw me
206 * as multiple threads touch struct bucket without locking
207 */
208
209BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
Kent Overstreet4fe6a812014-03-13 13:46:29 -0700210#define GC_MARK_RECLAIMABLE 1
211#define GC_MARK_DIRTY 2
212#define GC_MARK_METADATA 3
Darrick J. Wong94717442014-01-28 16:57:39 -0800213#define GC_SECTORS_USED_SIZE 13
214#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
215BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
Nicholas Swenson981aa8c2013-11-07 17:53:19 -0800216BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700217
Kent Overstreetcafe5632013-03-23 16:11:31 -0700218#include "journal.h"
219#include "stats.h"
220struct search;
221struct btree;
222struct keybuf;
223
224struct keybuf_key {
225 struct rb_node node;
226 BKEY_PADDED(key);
227 void *private;
228};
229
Kent Overstreetcafe5632013-03-23 16:11:31 -0700230struct keybuf {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700231 struct bkey last_scanned;
232 spinlock_t lock;
233
234 /*
235 * Beginning and end of range in rb tree - so that we can skip taking
236 * lock and checking the rb tree when we need to check for overlapping
237 * keys.
238 */
239 struct bkey start;
240 struct bkey end;
241
242 struct rb_root keys;
243
Kent Overstreet48a915a2013-10-31 15:43:22 -0700244#define KEYBUF_NR 500
Kent Overstreetcafe5632013-03-23 16:11:31 -0700245 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
246};
247
Kent Overstreetcafe5632013-03-23 16:11:31 -0700248struct bcache_device {
249 struct closure cl;
250
251 struct kobject kobj;
252
253 struct cache_set *c;
254 unsigned id;
255#define BCACHEDEVNAME_SIZE 12
256 char name[BCACHEDEVNAME_SIZE];
257
258 struct gendisk *disk;
259
Kent Overstreetc4d951d2013-08-21 17:49:09 -0700260 unsigned long flags;
261#define BCACHE_DEV_CLOSING 0
262#define BCACHE_DEV_DETACHING 1
263#define BCACHE_DEV_UNLINK_DONE 2
Kent Overstreetcafe5632013-03-23 16:11:31 -0700264
Kent Overstreet48a915a2013-10-31 15:43:22 -0700265 unsigned nr_stripes;
Kent Overstreet2d679fc2013-08-17 02:13:15 -0700266 unsigned stripe_size;
Kent Overstreet279afba2013-06-05 06:21:07 -0700267 atomic_t *stripe_sectors_dirty;
Kent Overstreet48a915a2013-10-31 15:43:22 -0700268 unsigned long *full_dirty_stripes;
Kent Overstreet279afba2013-06-05 06:21:07 -0700269
Kent Overstreetcafe5632013-03-23 16:11:31 -0700270 struct bio_set *bio_split;
271
272 unsigned data_csum:1;
273
274 int (*cache_miss)(struct btree *, struct search *,
275 struct bio *, unsigned);
276 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700277};
278
279struct io {
280 /* Used to track sequential IO so it can be skipped */
281 struct hlist_node hash;
282 struct list_head lru;
283
284 unsigned long jiffies;
285 unsigned sequential;
286 sector_t last;
287};
288
289struct cached_dev {
290 struct list_head list;
291 struct bcache_device disk;
292 struct block_device *bdev;
293
294 struct cache_sb sb;
295 struct bio sb_bio;
296 struct bio_vec sb_bv[1];
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800297 struct closure sb_write;
298 struct semaphore sb_write_mutex;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700299
300 /* Refcount on the cache set. Always nonzero when we're caching. */
Elena Reshetova3b304d22017-10-30 14:46:32 -0700301 refcount_t count;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700302 struct work_struct detach;
303
304 /*
305 * Device might not be running if it's dirty and the cache set hasn't
306 * showed up yet.
307 */
308 atomic_t running;
309
310 /*
311 * Writes take a shared lock from start to finish; scanning for dirty
312 * data to refill the rb tree requires an exclusive lock.
313 */
314 struct rw_semaphore writeback_lock;
315
316 /*
317 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
318 * data in the cache. Protected by writeback_lock; must have an
319 * shared lock to set and exclusive lock to clear.
320 */
321 atomic_t has_dirty;
322
Michael Lyleb1092c92018-01-08 12:21:24 -0800323 /*
324 * Set to zero by things that touch the backing volume-- except
325 * writeback. Incremented by writeback. Used to determine when to
326 * accelerate idle writeback.
327 */
328 atomic_t backing_idle;
329
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700330 struct bch_ratelimit writeback_rate;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700331 struct delayed_work writeback_rate_update;
332
Kent Overstreetc2a4f312013-09-23 23:17:31 -0700333 /* Limit number of writeback bios in flight */
334 struct semaphore in_flight;
Kent Overstreet5e6926da2013-07-24 17:50:06 -0700335 struct task_struct *writeback_thread;
Tang Junhui9baf30972017-09-06 14:25:59 +0800336 struct workqueue_struct *writeback_write_wq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700337
338 struct keybuf writeback_keys;
339
Michael Lyle6e6ccc62018-01-08 12:21:23 -0800340 /*
341 * Order the write-half of writeback operations strongly in dispatch
342 * order. (Maintain LBA order; don't allow reads completing out of
343 * order to re-order the writes...)
344 */
345 struct closure_waitlist writeback_ordering_wait;
346 atomic_t writeback_sequence_next;
347
Kent Overstreetcafe5632013-03-23 16:11:31 -0700348 /* For tracking sequential IO */
349#define RECENT_IO_BITS 7
350#define RECENT_IO (1 << RECENT_IO_BITS)
351 struct io io[RECENT_IO];
352 struct hlist_head io_hash[RECENT_IO + 1];
353 struct list_head io_lru;
354 spinlock_t io_lock;
355
356 struct cache_accounting accounting;
357
358 /* The rest of this all shows up in sysfs */
359 unsigned sequential_cutoff;
360 unsigned readahead;
361
Kent Overstreetcafe5632013-03-23 16:11:31 -0700362 unsigned verify:1;
Kent Overstreet5ceaaad2013-09-10 14:27:42 -0700363 unsigned bypass_torture_test:1;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700364
Kent Overstreet72c27062013-06-05 06:24:39 -0700365 unsigned partial_stripes_expensive:1;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700366 unsigned writeback_metadata:1;
367 unsigned writeback_running:1;
368 unsigned char writeback_percent;
369 unsigned writeback_delay;
370
Kent Overstreetcafe5632013-03-23 16:11:31 -0700371 uint64_t writeback_rate_target;
Kent Overstreet16749c22013-11-11 13:58:34 -0800372 int64_t writeback_rate_proportional;
Michael Lyle1d316e62017-10-13 16:35:36 -0700373 int64_t writeback_rate_integral;
374 int64_t writeback_rate_integral_scaled;
Michael Lylee41166c2017-10-13 16:35:38 -0700375 int32_t writeback_rate_change;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700376
377 unsigned writeback_rate_update_seconds;
Michael Lyle1d316e62017-10-13 16:35:36 -0700378 unsigned writeback_rate_i_term_inverse;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700379 unsigned writeback_rate_p_term_inverse;
Michael Lyle1d316e62017-10-13 16:35:36 -0700380 unsigned writeback_rate_minimum;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700381};
382
Kent Overstreet78365412013-12-17 01:29:34 -0800383enum alloc_reserve {
384 RESERVE_BTREE,
385 RESERVE_PRIO,
386 RESERVE_MOVINGGC,
387 RESERVE_NONE,
388 RESERVE_NR,
Kent Overstreetcafe5632013-03-23 16:11:31 -0700389};
390
391struct cache {
392 struct cache_set *set;
393 struct cache_sb sb;
394 struct bio sb_bio;
395 struct bio_vec sb_bv[1];
396
397 struct kobject kobj;
398 struct block_device *bdev;
399
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700400 struct task_struct *alloc_thread;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700401
402 struct closure prio;
403 struct prio_set *disk_buckets;
404
405 /*
406 * When allocating new buckets, prio_write() gets first dibs - since we
407 * may not be allocate at all without writing priorities and gens.
408 * prio_buckets[] contains the last buckets we wrote priorities to (so
409 * gc can mark them as metadata), prio_next[] contains the buckets
410 * allocated for the next prio write.
411 */
412 uint64_t *prio_buckets;
413 uint64_t *prio_last_buckets;
414
415 /*
416 * free: Buckets that are ready to be used
417 *
418 * free_inc: Incoming buckets - these are buckets that currently have
419 * cached data in them, and we can't reuse them until after we write
420 * their new gen to disk. After prio_write() finishes writing the new
421 * gens/prios, they'll be moved to the free list (and possibly discarded
422 * in the process)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700423 */
Kent Overstreet78365412013-12-17 01:29:34 -0800424 DECLARE_FIFO(long, free)[RESERVE_NR];
Kent Overstreetcafe5632013-03-23 16:11:31 -0700425 DECLARE_FIFO(long, free_inc);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700426
427 size_t fifo_last_bucket;
428
429 /* Allocation stuff: */
430 struct bucket *buckets;
431
432 DECLARE_HEAP(struct bucket *, heap);
433
434 /*
Kent Overstreetcafe5632013-03-23 16:11:31 -0700435 * If nonzero, we know we aren't going to find any buckets to invalidate
436 * until a gc finishes - otherwise we could pointlessly burn a ton of
437 * cpu
438 */
Kent Overstreetbe628be2016-10-26 20:31:17 -0700439 unsigned invalidate_needs_gc;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700440
441 bool discard; /* Get rid of? */
442
Kent Overstreetcafe5632013-03-23 16:11:31 -0700443 struct journal_device journal;
444
445 /* The rest of this all shows up in sysfs */
446#define IO_ERROR_SHIFT 20
447 atomic_t io_errors;
448 atomic_t io_count;
449
450 atomic_long_t meta_sectors_written;
451 atomic_long_t btree_sectors_written;
452 atomic_long_t sectors_written;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700453};
454
455struct gc_stat {
456 size_t nodes;
457 size_t key_bytes;
458
459 size_t nkeys;
460 uint64_t data; /* sectors */
Kent Overstreetcafe5632013-03-23 16:11:31 -0700461 unsigned in_use; /* percent */
462};
463
464/*
465 * Flag bits, for how the cache set is shutting down, and what phase it's at:
466 *
467 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
468 * all the backing devices first (their cached data gets invalidated, and they
469 * won't automatically reattach).
470 *
471 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
472 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
473 * flushing dirty data).
Slava Pestovbf0c55c2014-07-11 12:17:41 -0700474 *
475 * CACHE_SET_RUNNING means all cache devices have been registered and journal
476 * replay is complete.
Kent Overstreetcafe5632013-03-23 16:11:31 -0700477 */
478#define CACHE_SET_UNREGISTERING 0
479#define CACHE_SET_STOPPING 1
Slava Pestovbf0c55c2014-07-11 12:17:41 -0700480#define CACHE_SET_RUNNING 2
Kent Overstreetcafe5632013-03-23 16:11:31 -0700481
482struct cache_set {
483 struct closure cl;
484
485 struct list_head list;
486 struct kobject kobj;
487 struct kobject internal;
488 struct dentry *debug;
489 struct cache_accounting accounting;
490
491 unsigned long flags;
492
493 struct cache_sb sb;
494
495 struct cache *cache[MAX_CACHES_PER_SET];
496 struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
497 int caches_loaded;
498
499 struct bcache_device **devices;
Coly Li28312312018-01-08 12:21:28 -0800500 unsigned devices_max_used;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700501 struct list_head cached_devs;
502 uint64_t cached_dev_sectors;
503 struct closure caching;
504
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800505 struct closure sb_write;
506 struct semaphore sb_write_mutex;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700507
508 mempool_t *search;
509 mempool_t *bio_meta;
510 struct bio_set *bio_split;
511
512 /* For the btree cache */
513 struct shrinker shrink;
514
Kent Overstreetcafe5632013-03-23 16:11:31 -0700515 /* For the btree cache and anything allocation related */
516 struct mutex bucket_lock;
517
518 /* log2(bucket_size), in sectors */
519 unsigned short bucket_bits;
520
521 /* log2(block_size), in sectors */
522 unsigned short block_bits;
523
524 /*
525 * Default number of pages for a new btree node - may be less than a
526 * full bucket
527 */
528 unsigned btree_pages;
529
530 /*
531 * Lists of struct btrees; lru is the list for structs that have memory
532 * allocated for actual btree node, freed is for structs that do not.
533 *
534 * We never free a struct btree, except on shutdown - we just put it on
535 * the btree_cache_freed list and reuse it later. This simplifies the
536 * code, and it doesn't cost us much memory as the memory usage is
537 * dominated by buffers that hold the actual btree node data and those
538 * can be freed - and the number of struct btrees allocated is
539 * effectively bounded.
540 *
541 * btree_cache_freeable effectively is a small cache - we use it because
542 * high order page allocations can be rather expensive, and it's quite
543 * common to delete and allocate btree nodes in quick succession. It
544 * should never grow past ~2-3 nodes in practice.
545 */
546 struct list_head btree_cache;
547 struct list_head btree_cache_freeable;
548 struct list_head btree_cache_freed;
549
550 /* Number of elements in btree_cache + btree_cache_freeable lists */
Kent Overstreet0a63b662014-03-17 17:15:53 -0700551 unsigned btree_cache_used;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700552
553 /*
554 * If we need to allocate memory for a new btree node and that
555 * allocation fails, we can cannibalize another node in the btree cache
Kent Overstreet0a63b662014-03-17 17:15:53 -0700556 * to satisfy the allocation - lock to guarantee only one thread does
557 * this at a time:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700558 */
Kent Overstreet0a63b662014-03-17 17:15:53 -0700559 wait_queue_head_t btree_cache_wait;
560 struct task_struct *btree_cache_alloc_lock;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700561
562 /*
563 * When we free a btree node, we increment the gen of the bucket the
564 * node is in - but we can't rewrite the prios and gens until we
565 * finished whatever it is we were doing, otherwise after a crash the
566 * btree node would be freed but for say a split, we might not have the
567 * pointers to the new nodes inserted into the btree yet.
568 *
569 * This is a refcount that blocks prio_write() until the new keys are
570 * written.
571 */
572 atomic_t prio_blocked;
Kent Overstreet35fcd842013-07-24 17:29:09 -0700573 wait_queue_head_t bucket_wait;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700574
575 /*
576 * For any bio we don't skip we subtract the number of sectors from
577 * rescale; when it hits 0 we rescale all the bucket priorities.
578 */
579 atomic_t rescale;
580 /*
581 * When we invalidate buckets, we use both the priority and the amount
582 * of good data to determine which buckets to reuse first - to weight
583 * those together consistently we keep track of the smallest nonzero
584 * priority of any bucket.
585 */
586 uint16_t min_prio;
587
588 /*
Kent Overstreet3a2fd9d2014-02-27 17:51:12 -0800589 * max(gen - last_gc) for all buckets. When it gets too big we have to gc
Kent Overstreetcafe5632013-03-23 16:11:31 -0700590 * to keep gens from wrapping around.
591 */
592 uint8_t need_gc;
593 struct gc_stat gc_stats;
594 size_t nbuckets;
Tang Junhuid44c2f92017-10-30 14:46:33 -0700595 size_t avail_nbuckets;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700596
Kent Overstreet72a44512013-10-24 17:19:26 -0700597 struct task_struct *gc_thread;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700598 /* Where in the btree gc currently is */
599 struct bkey gc_done;
600
601 /*
602 * The allocation code needs gc_mark in struct bucket to be correct, but
603 * it's not while a gc is in progress. Protected by bucket_lock.
604 */
605 int gc_mark_valid;
606
607 /* Counts how many sectors bio_insert has added to the cache */
608 atomic_t sectors_to_gc;
Kent Overstreetbe628be2016-10-26 20:31:17 -0700609 wait_queue_head_t gc_wait;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700610
Kent Overstreetcafe5632013-03-23 16:11:31 -0700611 struct keybuf moving_gc_keys;
612 /* Number of moving GC bios in flight */
Kent Overstreet72a44512013-10-24 17:19:26 -0700613 struct semaphore moving_in_flight;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700614
Nicholas Swensonda415a02014-01-09 16:03:04 -0800615 struct workqueue_struct *moving_gc_wq;
616
Kent Overstreetcafe5632013-03-23 16:11:31 -0700617 struct btree *root;
618
619#ifdef CONFIG_BCACHE_DEBUG
620 struct btree *verify_data;
Kent Overstreet78b77bf2013-12-17 22:49:08 -0800621 struct bset *verify_ondisk;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700622 struct mutex verify_lock;
623#endif
624
625 unsigned nr_uuids;
626 struct uuid_entry *uuids;
627 BKEY_PADDED(uuid_bucket);
Kent Overstreetcb7a5832013-12-16 15:27:25 -0800628 struct closure uuid_write;
629 struct semaphore uuid_write_mutex;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700630
631 /*
632 * A btree node on disk could have too many bsets for an iterator to fit
Kent Overstreet57943512013-04-25 13:58:35 -0700633 * on the stack - have to dynamically allocate them
Kent Overstreetcafe5632013-03-23 16:11:31 -0700634 */
Kent Overstreet57943512013-04-25 13:58:35 -0700635 mempool_t *fill_iter;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700636
Kent Overstreet67539e82013-09-10 22:53:34 -0700637 struct bset_sort_state sort;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700638
639 /* List of buckets we're currently writing data to */
640 struct list_head data_buckets;
641 spinlock_t data_bucket_lock;
642
643 struct journal journal;
644
645#define CONGESTED_MAX 1024
646 unsigned congested_last_us;
647 atomic_t congested;
648
649 /* The rest of this all shows up in sysfs */
650 unsigned congested_read_threshold_us;
651 unsigned congested_write_threshold_us;
652
Kent Overstreetcafe5632013-03-23 16:11:31 -0700653 struct time_stats btree_gc_time;
654 struct time_stats btree_split_time;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700655 struct time_stats btree_read_time;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700656
657 atomic_long_t cache_read_races;
658 atomic_long_t writeback_keys_done;
659 atomic_long_t writeback_keys_failed;
Kent Overstreet77c320e2013-07-11 19:42:51 -0700660
Tang Junhuia728eac2018-02-07 11:41:39 -0800661 atomic_long_t reclaim;
662 atomic_long_t flush_write;
663 atomic_long_t retry_flush_write;
664
Kent Overstreet77c320e2013-07-11 19:42:51 -0700665 enum {
666 ON_ERROR_UNREGISTER,
667 ON_ERROR_PANIC,
668 } on_error;
Coly Li7ba0d832018-02-07 11:41:42 -0800669#define DEFAULT_IO_ERROR_LIMIT 8
Kent Overstreetcafe5632013-03-23 16:11:31 -0700670 unsigned error_limit;
671 unsigned error_decay;
Kent Overstreet77c320e2013-07-11 19:42:51 -0700672
Kent Overstreetcafe5632013-03-23 16:11:31 -0700673 unsigned short journal_delay_ms;
Kent Overstreeta85e9682013-12-20 17:28:16 -0800674 bool expensive_debug_checks;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700675 unsigned verify:1;
676 unsigned key_merging_disabled:1;
677 unsigned gc_always_rewrite:1;
678 unsigned shrinker_disabled:1;
679 unsigned copy_gc_enabled:1;
680
681#define BUCKET_HASH_BITS 12
682 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
Tang Junhuic4dc2492018-02-07 11:41:40 -0800683
684 DECLARE_HEAP(struct btree *, flush_btree);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700685};
686
Kent Overstreetcafe5632013-03-23 16:11:31 -0700687struct bbio {
688 unsigned submit_time_us;
689 union {
690 struct bkey key;
691 uint64_t _pad[3];
692 /*
693 * We only need pad = 3 here because we only ever carry around a
694 * single pointer - i.e. the pointer we're doing io to/from.
695 */
696 };
697 struct bio bio;
698};
699
Kent Overstreetcafe5632013-03-23 16:11:31 -0700700#define BTREE_PRIO USHRT_MAX
Kent Overstreete0a985a2013-11-12 13:49:10 -0800701#define INITIAL_PRIO 32768U
Kent Overstreetcafe5632013-03-23 16:11:31 -0700702
703#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
704#define btree_blocks(b) \
705 ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
706
707#define btree_default_blocks(c) \
708 ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
709
710#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
711#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
712#define block_bytes(c) ((c)->sb.block_size << 9)
713
Kent Overstreetcafe5632013-03-23 16:11:31 -0700714#define prios_per_bucket(c) \
715 ((bucket_bytes(c) - sizeof(struct prio_set)) / \
716 sizeof(struct bucket_disk))
717#define prio_buckets(c) \
718 DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
719
Kent Overstreetcafe5632013-03-23 16:11:31 -0700720static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
721{
722 return s >> c->bucket_bits;
723}
724
725static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
726{
727 return ((sector_t) b) << c->bucket_bits;
728}
729
730static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
731{
732 return s & (c->sb.bucket_size - 1);
733}
734
735static inline struct cache *PTR_CACHE(struct cache_set *c,
736 const struct bkey *k,
737 unsigned ptr)
738{
739 return c->cache[PTR_DEV(k, ptr)];
740}
741
742static inline size_t PTR_BUCKET_NR(struct cache_set *c,
743 const struct bkey *k,
744 unsigned ptr)
745{
746 return sector_to_bucket(c, PTR_OFFSET(k, ptr));
747}
748
749static inline struct bucket *PTR_BUCKET(struct cache_set *c,
750 const struct bkey *k,
751 unsigned ptr)
752{
753 return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
754}
755
Kent Overstreet9a02b7e2013-12-20 17:24:46 -0800756static inline uint8_t gen_after(uint8_t a, uint8_t b)
757{
758 uint8_t r = a - b;
759 return r > 128U ? 0 : r;
760}
761
762static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
763 unsigned i)
764{
765 return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
766}
767
768static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
769 unsigned i)
770{
771 return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
772}
773
Kent Overstreetcafe5632013-03-23 16:11:31 -0700774/* Btree key macros */
775
Kent Overstreetcafe5632013-03-23 16:11:31 -0700776/*
777 * This is used for various on disk data structures - cache_sb, prio_set, bset,
778 * jset: The checksum is _always_ the first 8 bytes of these structs
779 */
780#define csum_set(i) \
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600781 bch_crc64(((void *) (i)) + sizeof(uint64_t), \
Kent Overstreetfafff812013-12-17 21:56:21 -0800782 ((void *) bset_bkey_last(i)) - \
783 (((void *) (i)) + sizeof(uint64_t)))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700784
785/* Error handling macros */
786
787#define btree_bug(b, ...) \
788do { \
789 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
790 dump_stack(); \
791} while (0)
792
793#define cache_bug(c, ...) \
794do { \
795 if (bch_cache_set_error(c, __VA_ARGS__)) \
796 dump_stack(); \
797} while (0)
798
799#define btree_bug_on(cond, b, ...) \
800do { \
801 if (cond) \
802 btree_bug(b, __VA_ARGS__); \
803} while (0)
804
805#define cache_bug_on(cond, c, ...) \
806do { \
807 if (cond) \
808 cache_bug(c, __VA_ARGS__); \
809} while (0)
810
811#define cache_set_err_on(cond, c, ...) \
812do { \
813 if (cond) \
814 bch_cache_set_error(c, __VA_ARGS__); \
815} while (0)
816
817/* Looping macros */
818
819#define for_each_cache(ca, cs, iter) \
820 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
821
822#define for_each_bucket(b, ca) \
823 for (b = (ca)->buckets + (ca)->sb.first_bucket; \
824 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
825
Kent Overstreetcafe5632013-03-23 16:11:31 -0700826static inline void cached_dev_put(struct cached_dev *dc)
827{
Elena Reshetova3b304d22017-10-30 14:46:32 -0700828 if (refcount_dec_and_test(&dc->count))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700829 schedule_work(&dc->detach);
830}
831
832static inline bool cached_dev_get(struct cached_dev *dc)
833{
Elena Reshetova3b304d22017-10-30 14:46:32 -0700834 if (!refcount_inc_not_zero(&dc->count))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700835 return false;
836
837 /* Paired with the mb in cached_dev_attach */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100838 smp_mb__after_atomic();
Kent Overstreetcafe5632013-03-23 16:11:31 -0700839 return true;
840}
841
842/*
843 * bucket_gc_gen() returns the difference between the bucket's current gen and
844 * the oldest gen of any pointer into that bucket in the btree (last_gc).
Kent Overstreetcafe5632013-03-23 16:11:31 -0700845 */
846
847static inline uint8_t bucket_gc_gen(struct bucket *b)
848{
849 return b->gen - b->last_gc;
850}
851
Kent Overstreetcafe5632013-03-23 16:11:31 -0700852#define BUCKET_GC_GEN_MAX 96U
Kent Overstreetcafe5632013-03-23 16:11:31 -0700853
854#define kobj_attribute_write(n, fn) \
855 static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
856
857#define kobj_attribute_rw(n, show, store) \
858 static struct kobj_attribute ksysfs_##n = \
859 __ATTR(n, S_IWUSR|S_IRUSR, show, store)
860
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700861static inline void wake_up_allocators(struct cache_set *c)
862{
863 struct cache *ca;
864 unsigned i;
865
866 for_each_cache(ca, c, i)
867 wake_up_process(ca->alloc_thread);
868}
869
Kent Overstreetcafe5632013-03-23 16:11:31 -0700870/* Forward declarations */
871
Coly Li5138ac62018-01-08 12:21:29 -0800872void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700873void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200874 blk_status_t, const char *);
875void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
876 const char *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700877void bch_bbio_free(struct bio *, struct cache_set *);
878struct bio *bch_bbio_alloc(struct cache_set *);
879
Kent Overstreetcafe5632013-03-23 16:11:31 -0700880void __bch_submit_bbio(struct bio *, struct cache_set *);
881void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
882
883uint8_t bch_inc_gen(struct cache *, struct bucket *);
884void bch_rescale_priorities(struct cache_set *, int);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700885
Kent Overstreet2531d9ee2014-03-17 16:55:55 -0700886bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
887void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
888
889void __bch_bucket_free(struct cache *, struct bucket *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700890void bch_bucket_free(struct cache_set *, struct bkey *);
891
Kent Overstreet2531d9ee2014-03-17 16:55:55 -0700892long bch_bucket_alloc(struct cache *, unsigned, bool);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700893int __bch_bucket_alloc_set(struct cache_set *, unsigned,
Kent Overstreet35fcd842013-07-24 17:29:09 -0700894 struct bkey *, int, bool);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700895int bch_bucket_alloc_set(struct cache_set *, unsigned,
Kent Overstreet35fcd842013-07-24 17:29:09 -0700896 struct bkey *, int, bool);
Kent Overstreet2599b532013-07-24 18:11:11 -0700897bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
898 unsigned, unsigned, bool);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700899
900__printf(2, 3)
901bool bch_cache_set_error(struct cache_set *, const char *, ...);
902
903void bch_prio_write(struct cache *);
904void bch_write_bdev_super(struct cached_dev *, struct closure *);
905
Kent Overstreet72a44512013-10-24 17:19:26 -0700906extern struct workqueue_struct *bcache_wq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700907extern const char * const bch_cache_modes[];
908extern struct mutex bch_register_lock;
909extern struct list_head bch_cache_sets;
910
911extern struct kobj_type bch_cached_dev_ktype;
912extern struct kobj_type bch_flash_dev_ktype;
913extern struct kobj_type bch_cache_set_ktype;
914extern struct kobj_type bch_cache_set_internal_ktype;
915extern struct kobj_type bch_cache_ktype;
916
917void bch_cached_dev_release(struct kobject *);
918void bch_flash_dev_release(struct kobject *);
919void bch_cache_set_release(struct kobject *);
920void bch_cache_release(struct kobject *);
921
922int bch_uuid_write(struct cache_set *);
923void bcache_write_super(struct cache_set *);
924
925int bch_flash_dev_create(struct cache_set *c, uint64_t size);
926
927int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
928void bch_cached_dev_detach(struct cached_dev *);
929void bch_cached_dev_run(struct cached_dev *);
930void bcache_device_stop(struct bcache_device *);
931
932void bch_cache_set_unregister(struct cache_set *);
933void bch_cache_set_stop(struct cache_set *);
934
935struct cache_set *bch_cache_set_alloc(struct cache_sb *);
936void bch_btree_cache_free(struct cache_set *);
937int bch_btree_cache_alloc(struct cache_set *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700938void bch_moving_init_cache_set(struct cache_set *);
Kent Overstreet2599b532013-07-24 18:11:11 -0700939int bch_open_buckets_alloc(struct cache_set *);
940void bch_open_buckets_free(struct cache_set *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700941
Kent Overstreet119ba0f2013-04-24 19:01:12 -0700942int bch_cache_allocator_start(struct cache *ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700943
944void bch_debug_exit(void);
945int bch_debug_init(struct kobject *);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700946void bch_request_exit(void);
947int bch_request_init(void);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700948
949#endif /* _BCACHE_H */