Blame view
fs/ext4/ext4_jbd2.h
17.2 KB
f51667685
|
1 |
// SPDX-License-Identifier: GPL-2.0+ |
470decc61
|
2 |
/* |
3dcf54515
|
3 |
* ext4_jbd2.h |
470decc61
|
4 5 6 7 8 |
* * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1998--1999 Red Hat corp --- All Rights Reserved * |
470decc61
|
9 10 |
* Ext4-specific journaling extensions. */ |
3dcf54515
|
11 12 |
#ifndef _EXT4_JBD2_H #define _EXT4_JBD2_H |
470decc61
|
13 14 |
#include <linux/fs.h> |
f7f4bccb7
|
15 |
#include <linux/jbd2.h> |
3dcf54515
|
16 |
#include "ext4.h" |
470decc61
|
17 18 19 20 21 22 23 24 |
#define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal) /* Define the number of blocks we need to account to a transaction to * modify one block of data. * * We may have to touch one inode, one bitmap buffer, up to three * indirection blocks, the group and superblock summaries, and the data |
a86c61812
|
25 26 |
* block to complete the transaction. * |
d0d856e8b
|
27 |
* For extents-enabled fs we may have to allocate and modify up to |
f45a5ef91
|
28 29 30 |
* 5 levels of tree, data block (for each of these we need bitmap + group * summaries), root which is stored in the inode, sb */ |
470decc61
|
31 |
|
a86c61812
|
32 |
#define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \ |
e2b911c53
|
33 |
(ext4_has_feature_extents(sb) ? 20U : 8U) |
470decc61
|
34 35 36 37 38 39 40 41 42 43 44 45 |
/* Extended attribute operations touch at most two data buffers, * two bitmap buffers, and two group summaries, in addition to the inode * and the superblock, which are already accounted for. */ #define EXT4_XATTR_TRANS_BLOCKS 6U /* Define the minimum size for a transaction which modifies data. This * needs to take into account the fact that we may end up modifying two * quota files too (one for the group, one for the user quota). The * superblock only gets updated once, of course, so don't bother * counting that again for the quota updates. */ |
a86c61812
|
46 |
#define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ |
470decc61
|
47 |
EXT4_XATTR_TRANS_BLOCKS - 2 + \ |
5aca07eb7
|
48 |
EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) |
470decc61
|
49 |
|
a02908f19
|
50 51 52 53 54 55 |
/* * Define the number of metadata blocks we need to account to modify data. * * This include super block, inode block, quota blocks and xattr blocks */ #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ |
5aca07eb7
|
56 |
EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) |
a02908f19
|
57 |
|
470decc61
|
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
/* Define an arbitrary limit for the amount of data we will anticipate * writing to any given transaction. For unbounded transactions such as * write(2) and truncate(2) we can write more than this, but we always * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ #define EXT4_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the * transaction or to start a new one. Reserve enough space here for * inode, bitmap, superblock, group and indirection updates for at least * one block, plus two quota updates. Quota allocations are not * needed. */ #define EXT4_RESERVE_TRANS_BLOCKS 12U |
e08ac99fa
|
74 75 76 77 78 79 80 81 |
/* * Number of credits needed if we need to insert an entry into a * directory. For each new index block, we need 4 blocks (old index * block, new index block, bitmap block, bg summary). For normal * htree directories there are 2 levels; if the largedir feature * enabled it's 3 levels. */ #define EXT4_INDEX_EXTRA_TRANS_BLOCKS 12U |
470decc61
|
82 83 84 |
#ifdef CONFIG_QUOTA /* Amount of blocks needed for quota update - we know that the structure was |
21f976975
|
85 |
* allocated so we need to update only data block */ |
7c319d328
|
86 |
#define EXT4_QUOTA_TRANS_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\ |
e2b911c53
|
87 |
ext4_has_feature_quota(sb)) ? 1 : 0) |
470decc61
|
88 89 |
/* Amount of blocks needed for quota insert/delete - we do some block writes * but inode, sb and group updates are done only once */ |
7c319d328
|
90 |
#define EXT4_QUOTA_INIT_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\ |
e2b911c53
|
91 |
ext4_has_feature_quota(sb)) ?\ |
7c319d328
|
92 93 94 95 |
(DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_INIT_REWRITE) : 0) #define EXT4_QUOTA_DEL_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\ |
e2b911c53
|
96 |
ext4_has_feature_quota(sb)) ?\ |
7c319d328
|
97 98 |
(DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_DEL_REWRITE) : 0) |
470decc61
|
99 100 101 102 103 |
#else #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 #endif |
a2d4a646e
|
104 105 106 |
#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) |
470decc61
|
107 |
|
9924a92a8
|
108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
/* * Ext4 handle operation types -- for logging purposes */ #define EXT4_HT_MISC 0 #define EXT4_HT_INODE 1 #define EXT4_HT_WRITE_PAGE 2 #define EXT4_HT_MAP_BLOCKS 3 #define EXT4_HT_DIR 4 #define EXT4_HT_TRUNCATE 5 #define EXT4_HT_QUOTA 6 #define EXT4_HT_RESIZE 7 #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 |
6b523df4f
|
122 123 |
#define EXT4_HT_EXT_CONVERT 11 #define EXT4_HT_MAX 12 |
9924a92a8
|
124 |
|
18aadd47f
|
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
/** * struct ext4_journal_cb_entry - Base structure for callback information. * * This struct is a 'seed' structure for a using with your own callback * structs. If you are using callbacks you must allocate one of these * or another struct of your own definition which has this struct * as it's first element and pass it to ext4_journal_callback_add(). */ struct ext4_journal_cb_entry { /* list information for other callbacks attached to the same handle */ struct list_head jce_list; /* Function to call with this callback structure */ void (*jce_func)(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error); /* user data goes here */ }; /** * ext4_journal_callback_add: add a function to call after transaction commit * @handle: active journal transaction handle to register callback on * @func: callback function to call after the transaction has committed: * @sb: superblock of current filesystem for transaction * @jce: returned journal callback data * @rc: journal state at commit (0 = transaction committed properly) * @jce: journal callback data (internal and function private data struct) * * The registered function will be called in the context of the journal thread * after the transaction for which the handle was created has completed. * * No locks are held when the callback function is called, so it is safe to * call blocking functions from within the callback, but the callback should * not block or run for too long, or the filesystem will be blocked waiting for * the next transaction to commit. No journaling functions can be used, or * there is a risk of deadlock. * * There is no guaranteed calling order of multiple registered callbacks on * the same transaction. */ |
d08854f5b
|
165 166 167 168 169 170 |
static inline void _ext4_journal_callback_add(handle_t *handle, struct ext4_journal_cb_entry *jce) { /* Add the jce to transaction's private list */ list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list); } |
18aadd47f
|
171 172 173 174 175 176 177 178 179 180 181 182 |
static inline void ext4_journal_callback_add(handle_t *handle, void (*func)(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc), struct ext4_journal_cb_entry *jce) { struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); /* Add the jce to transaction's private list */ jce->jce_func = func; spin_lock(&sbi->s_md_lock); |
d08854f5b
|
183 |
_ext4_journal_callback_add(handle, jce); |
18aadd47f
|
184 185 |
spin_unlock(&sbi->s_md_lock); } |
d08854f5b
|
186 |
|
18aadd47f
|
187 188 189 190 |
/** * ext4_journal_callback_del: delete a registered callback * @handle: active journal transaction handle on which callback was registered * @jce: registered journal callback entry to unregister |
70261f568
|
191 |
* Return true if object was successfully removed |
18aadd47f
|
192 |
*/ |
5d3ee2085
|
193 |
static inline bool ext4_journal_callback_try_del(handle_t *handle, |
18aadd47f
|
194 195 |
struct ext4_journal_cb_entry *jce) { |
5d3ee2085
|
196 |
bool deleted; |
18aadd47f
|
197 198 199 200 |
struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); spin_lock(&sbi->s_md_lock); |
5d3ee2085
|
201 |
deleted = !list_empty(&jce->jce_list); |
18aadd47f
|
202 203 |
list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); |
5d3ee2085
|
204 |
return deleted; |
18aadd47f
|
205 |
} |
470decc61
|
206 207 208 209 210 211 212 213 214 215 216 217 |
int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); |
4209ae12b
|
218 219 220 221 |
#define ext4_mark_inode_dirty(__h, __i) \ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__) int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, const char *func, unsigned int line); |
470decc61
|
222 |
|
c03b45b85
|
223 224 225 |
int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc); |
470decc61
|
226 |
/* |
e4684b3fb
|
227 |
* Wrapper functions with which ext4 calls into JBD. |
470decc61
|
228 |
*/ |
90c7201b9
|
229 230 |
int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct buffer_head *bh); |
470decc61
|
231 |
|
90c7201b9
|
232 233 234 |
int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr); |
d6797d14b
|
235 |
|
90c7201b9
|
236 |
int __ext4_journal_get_create_access(const char *where, unsigned int line, |
8984d137d
|
237 |
handle_t *handle, struct buffer_head *bh); |
470decc61
|
238 |
|
90c7201b9
|
239 240 241 |
int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh); |
470decc61
|
242 |
|
90c7201b9
|
243 |
int __ext4_handle_dirty_super(const char *where, unsigned int line, |
b50924c2c
|
244 |
handle_t *handle, struct super_block *sb); |
a0375156c
|
245 |
|
470decc61
|
246 |
#define ext4_journal_get_write_access(handle, bh) \ |
90c7201b9
|
247 |
__ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh)) |
d6797d14b
|
248 |
#define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ |
90c7201b9
|
249 250 |
__ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \ (bh), (block_nr)) |
470decc61
|
251 |
#define ext4_journal_get_create_access(handle, bh) \ |
90c7201b9
|
252 |
__ext4_journal_get_create_access(__func__, __LINE__, (handle), (bh)) |
0390131ba
|
253 |
#define ext4_handle_dirty_metadata(handle, inode, bh) \ |
90c7201b9
|
254 255 |
__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ (bh)) |
a0375156c
|
256 |
#define ext4_handle_dirty_super(handle, sb) \ |
b50924c2c
|
257 |
__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb)) |
470decc61
|
258 |
|
9924a92a8
|
259 |
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line, |
83448bdfb
|
260 261 |
int type, int blocks, int rsv_blocks, int revoke_creds); |
c398eda0e
|
262 |
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); |
470decc61
|
263 |
|
d3d1faf6a
|
264 |
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) |
0390131ba
|
265 |
|
d3d1faf6a
|
266 267 |
/* Note: Do not use this for NULL handles. This is only to determine if * a properly allocated handle is using a journal or not. */ |
0390131ba
|
268 269 |
static inline int ext4_handle_valid(handle_t *handle) { |
d3d1faf6a
|
270 |
if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT) |
0390131ba
|
271 272 273 274 275 276 277 278 279 |
return 0; return 1; } static inline void ext4_handle_sync(handle_t *handle) { if (ext4_handle_valid(handle)) handle->h_sync = 1; } |
0390131ba
|
280 281 282 283 284 285 |
static inline int ext4_handle_is_aborted(handle_t *handle) { if (ext4_handle_valid(handle)) return is_handle_aborted(handle); return 0; } |
83448bdfb
|
286 287 288 289 290 291 292 293 294 295 296 |
static inline int ext4_free_metadata_revoke_credits(struct super_block *sb, int blocks) { /* Freeing each metadata block can result in freeing one cluster */ return blocks * EXT4_SB(sb)->s_cluster_ratio; } static inline int ext4_trans_default_revoke_credits(struct super_block *sb) { return ext4_free_metadata_revoke_credits(sb, 8); } |
9924a92a8
|
297 |
#define ext4_journal_start_sb(sb, type, nblocks) \ |
83448bdfb
|
298 299 |
__ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0, \ ext4_trans_default_revoke_credits(sb)) |
9924a92a8
|
300 301 |
#define ext4_journal_start(inode, type, nblocks) \ |
83448bdfb
|
302 303 |
__ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \ ext4_trans_default_revoke_credits((inode)->i_sb)) |
5fe2fe895
|
304 |
|
83448bdfb
|
305 306 307 308 309 310 311 |
#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \ (revoke_creds)) |
9924a92a8
|
312 313 314 |
static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, |
83448bdfb
|
315 316 |
int blocks, int rsv_blocks, int revoke_creds) |
470decc61
|
317 |
{ |
5fe2fe895
|
318 |
return __ext4_journal_start_sb(inode->i_sb, line, type, blocks, |
83448bdfb
|
319 |
rsv_blocks, revoke_creds); |
470decc61
|
320 321 322 |
} #define ext4_journal_stop(handle) \ |
c398eda0e
|
323 |
__ext4_journal_stop(__func__, __LINE__, (handle)) |
470decc61
|
324 |
|
6b523df4f
|
325 |
#define ext4_journal_start_reserved(handle, type) \ |
5fe2fe895
|
326 327 328 329 |
__ext4_journal_start_reserved((handle), __LINE__, (type)) handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type); |
470decc61
|
330 331 332 333 |
static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); } |
83448bdfb
|
334 |
static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke) |
470decc61
|
335 |
{ |
0390131ba
|
336 |
if (ext4_handle_valid(handle)) |
83448bdfb
|
337 |
return jbd2_journal_extend(handle, nblocks, revoke); |
0390131ba
|
338 |
return 0; |
470decc61
|
339 |
} |
83448bdfb
|
340 341 |
static inline int ext4_journal_restart(handle_t *handle, int nblocks, int revoke) |
470decc61
|
342 |
{ |
0390131ba
|
343 |
if (ext4_handle_valid(handle)) |
83448bdfb
|
344 |
return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS); |
0390131ba
|
345 |
return 0; |
470decc61
|
346 |
} |
a41303679
|
347 |
int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, |
83448bdfb
|
348 |
int extend_cred, int revoke_cred); |
a41303679
|
349 350 351 352 353 354 355 356 357 358 359 360 |
/* * Ensure @handle has at least @check_creds credits available. If not, * transaction will be extended or restarted to contain at least @extend_cred * credits. Before restarting transaction @fn is executed to allow for cleanup * before the transaction is restarted. * * The return value is < 0 in case of error, 0 in case the handle has enough * credits or transaction extension succeeded, 1 in case transaction had to be * restarted. */ |
83448bdfb
|
361 362 |
#define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \ revoke_cred, fn) \ |
a41303679
|
363 364 365 |
({ \ __label__ __ensure_end; \ int err = __ext4_journal_ensure_credits((handle), (check_cred), \ |
83448bdfb
|
366 |
(extend_cred), (revoke_cred)); \ |
a41303679
|
367 368 369 370 371 372 |
\ if (err <= 0) \ goto __ensure_end; \ err = (fn); \ if (err < 0) \ goto __ensure_end; \ |
83448bdfb
|
373 |
err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \ |
a41303679
|
374 375 376 377 378 379 380 381 |
if (err == 0) \ err = 1; \ __ensure_end: \ err; \ }) /* * Ensure given handle has at least requested amount of credits available, |
83448bdfb
|
382 383 384 385 |
* possibly restarting transaction if needed. We also make sure the transaction * has space for at least ext4_trans_default_revoke_credits(sb) revoke records * as freeing one or two blocks is very common pattern and requesting this is * very cheap. |
a41303679
|
386 |
*/ |
83448bdfb
|
387 388 |
static inline int ext4_journal_ensure_credits(handle_t *handle, int credits, int revoke_creds) |
a41303679
|
389 |
{ |
83448bdfb
|
390 391 |
return ext4_journal_ensure_credits_fn(handle, credits, credits, revoke_creds, 0); |
a41303679
|
392 |
} |
470decc61
|
393 394 |
static inline int ext4_journal_blocks_per_page(struct inode *inode) { |
0390131ba
|
395 396 397 |
if (EXT4_JOURNAL(inode) != NULL) return jbd2_journal_blocks_per_page(inode); return 0; |
470decc61
|
398 399 400 401 |
} static inline int ext4_journal_force_commit(journal_t *journal) { |
0390131ba
|
402 403 404 |
if (journal) return jbd2_journal_force_commit(journal); return 0; |
470decc61
|
405 |
} |
ee0876bc6
|
406 |
static inline int ext4_jbd2_inode_add_write(handle_t *handle, |
73131fbb0
|
407 |
struct inode *inode, loff_t start_byte, loff_t length) |
678aaf481
|
408 |
{ |
0390131ba
|
409 |
if (ext4_handle_valid(handle)) |
73131fbb0
|
410 411 |
return jbd2_journal_inode_ranged_write(handle, EXT4_I(inode)->jinode, start_byte, length); |
0390131ba
|
412 |
return 0; |
678aaf481
|
413 |
} |
ee0876bc6
|
414 |
static inline int ext4_jbd2_inode_add_wait(handle_t *handle, |
73131fbb0
|
415 |
struct inode *inode, loff_t start_byte, loff_t length) |
ee0876bc6
|
416 417 |
{ if (ext4_handle_valid(handle)) |
73131fbb0
|
418 419 |
return jbd2_journal_inode_ranged_wait(handle, EXT4_I(inode)->jinode, start_byte, length); |
ee0876bc6
|
420 421 |
return 0; } |
b436b9bef
|
422 423 424 425 426 |
static inline void ext4_update_inode_fsync_trans(handle_t *handle, struct inode *inode, int datasync) { struct ext4_inode_info *ei = EXT4_I(inode); |
fa30dde38
|
427 |
if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) { |
b436b9bef
|
428 429 430 431 432 |
ei->i_sync_tid = handle->h_transaction->t_tid; if (datasync) ei->i_datasync_tid = handle->h_transaction->t_tid; } } |
470decc61
|
433 434 |
/* super.c */ int ext4_force_commit(struct super_block *sb); |
3d2b15826
|
435 436 437 438 439 440 |
/* * Ext4 inode journal modes */ #define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */ #define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */ #define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */ |
46797ad75
|
441 |
int ext4_inode_journal_mode(struct inode *inode); |
3d2b15826
|
442 443 444 445 |
static inline int ext4_should_journal_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE; |
470decc61
|
446 447 448 449 |
} static inline int ext4_should_order_data(struct inode *inode) { |
3d2b15826
|
450 |
return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE; |
470decc61
|
451 452 453 454 |
} static inline int ext4_should_writeback_data(struct inode *inode) { |
3d2b15826
|
455 |
return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE; |
470decc61
|
456 |
} |
83448bdfb
|
457 458 459 460 461 462 463 464 465 466 467 468 |
static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks) { if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) return 0; if (!ext4_should_journal_data(inode)) return 0; /* * Data blocks in one extent are contiguous, just account for partial * clusters at extent boundaries */ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1); } |
744692dc0
|
469 470 471 472 |
/* * This function controls whether or not we should try to go down the * dioread_nolock code paths, which makes it safe to avoid taking * i_mutex for direct I/O reads. This only works for extent-based |
206f7ab4f
|
473 474 475 476 |
* files, and it doesn't work if data journaling is enabled, since the * dioread_nolock code uses b_private to pass information back to the * I/O completion handler, and this conflicts with the jbd's use of * b_private. |
744692dc0
|
477 478 479 480 481 |
*/ static inline int ext4_should_dioread_nolock(struct inode *inode) { if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) return 0; |
744692dc0
|
482 483 |
if (!S_ISREG(inode->i_mode)) return 0; |
12e9b8920
|
484 |
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
744692dc0
|
485 486 487 |
return 0; if (ext4_should_journal_data(inode)) return 0; |
c8980e198
|
488 489 490 |
/* temporary fix to prevent generic/422 test failures */ if (!test_opt(inode->i_sb, DELALLOC)) return 0; |
744692dc0
|
491 492 |
return 1; } |
3dcf54515
|
493 |
#endif /* _EXT4_JBD2_H */ |