Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * linux/fs/locks.c
4 : *
5 : * We implement four types of file locks: BSD locks, posix locks, open
6 : * file description locks, and leases. For details about BSD locks,
7 : * see the flock(2) man page; for details about the other three, see
8 : * fcntl(2).
9 : *
10 : *
11 : * Locking conflicts and dependencies:
12 : * If multiple threads attempt to lock the same byte (or flock the same file)
13 : * only one can be granted the lock, and other must wait their turn.
14 : * The first lock has been "applied" or "granted", the others are "waiting"
15 : * and are "blocked" by the "applied" lock..
16 : *
17 : * Waiting and applied locks are all kept in trees whose properties are:
18 : *
19 : * - the root of a tree may be an applied or waiting lock.
20 : * - every other node in the tree is a waiting lock that
21 : * conflicts with every ancestor of that node.
22 : *
23 : * Every such tree begins life as a waiting singleton which obviously
24 : * satisfies the above properties.
25 : *
26 : * The only ways we modify trees preserve these properties:
27 : *
28 : * 1. We may add a new leaf node, but only after first verifying that it
29 : * conflicts with all of its ancestors.
30 : * 2. We may remove the root of a tree, creating a new singleton
31 : * tree from the root and N new trees rooted in the immediate
32 : * children.
33 : * 3. If the root of a tree is not currently an applied lock, we may
34 : * apply it (if possible).
35 : * 4. We may upgrade the root of the tree (either extend its range,
36 : * or upgrade its entire range from read to write).
37 : *
38 : * When an applied lock is modified in a way that reduces or downgrades any
39 : * part of its range, we remove all its children (2 above). This particularly
40 : * happens when a lock is unlocked.
41 : *
42 : * For each of those child trees we "wake up" the thread which is
43 : * waiting for the lock so it can continue handling as follows: if the
44 : * root of the tree applies, we do so (3). If it doesn't, it must
45 : * conflict with some applied lock. We remove (wake up) all of its children
46 : * (2), and add it is a new leaf to the tree rooted in the applied
47 : * lock (1). We then repeat the process recursively with those
48 : * children.
49 : *
50 : */
51 :
52 : #include <linux/capability.h>
53 : #include <linux/file.h>
54 : #include <linux/fdtable.h>
55 : #include <linux/filelock.h>
56 : #include <linux/fs.h>
57 : #include <linux/init.h>
58 : #include <linux/security.h>
59 : #include <linux/slab.h>
60 : #include <linux/syscalls.h>
61 : #include <linux/time.h>
62 : #include <linux/rcupdate.h>
63 : #include <linux/pid_namespace.h>
64 : #include <linux/hashtable.h>
65 : #include <linux/percpu.h>
66 : #include <linux/sysctl.h>
67 :
68 : #define CREATE_TRACE_POINTS
69 : #include <trace/events/filelock.h>
70 :
71 : #include <linux/uaccess.h>
72 :
73 : #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
74 : #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
75 : #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
76 : #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
77 : #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
78 :
79 : static bool lease_breaking(struct file_lock *fl)
80 : {
81 0 : return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
82 : }
83 :
84 : static int target_leasetype(struct file_lock *fl)
85 : {
86 0 : if (fl->fl_flags & FL_UNLOCK_PENDING)
87 : return F_UNLCK;
88 0 : if (fl->fl_flags & FL_DOWNGRADE_PENDING)
89 : return F_RDLCK;
90 0 : return fl->fl_type;
91 : }
92 :
93 : static int leases_enable = 1;
94 : static int lease_break_time = 45;
95 :
96 : #ifdef CONFIG_SYSCTL
97 : static struct ctl_table locks_sysctls[] = {
98 : {
99 : .procname = "leases-enable",
100 : .data = &leases_enable,
101 : .maxlen = sizeof(int),
102 : .mode = 0644,
103 : .proc_handler = proc_dointvec,
104 : },
105 : #ifdef CONFIG_MMU
106 : {
107 : .procname = "lease-break-time",
108 : .data = &lease_break_time,
109 : .maxlen = sizeof(int),
110 : .mode = 0644,
111 : .proc_handler = proc_dointvec,
112 : },
113 : #endif /* CONFIG_MMU */
114 : {}
115 : };
116 :
117 1 : static int __init init_fs_locks_sysctls(void)
118 : {
119 1 : register_sysctl_init("fs", locks_sysctls);
120 1 : return 0;
121 : }
122 : early_initcall(init_fs_locks_sysctls);
123 : #endif /* CONFIG_SYSCTL */
124 :
125 : /*
126 : * The global file_lock_list is only used for displaying /proc/locks, so we
127 : * keep a list on each CPU, with each list protected by its own spinlock.
128 : * Global serialization is done using file_rwsem.
129 : *
130 : * Note that alterations to the list also require that the relevant flc_lock is
131 : * held.
132 : */
133 : struct file_lock_list_struct {
134 : spinlock_t lock;
135 : struct hlist_head hlist;
136 : };
137 : static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
138 : DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
139 :
140 :
141 : /*
142 : * The blocked_hash is used to find POSIX lock loops for deadlock detection.
143 : * It is protected by blocked_lock_lock.
144 : *
145 : * We hash locks by lockowner in order to optimize searching for the lock a
146 : * particular lockowner is waiting on.
147 : *
148 : * FIXME: make this value scale via some heuristic? We generally will want more
149 : * buckets when we have more lockowners holding locks, but that's a little
150 : * difficult to determine without knowing what the workload will look like.
151 : */
152 : #define BLOCKED_HASH_BITS 7
153 : static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
154 :
155 : /*
156 : * This lock protects the blocked_hash. Generally, if you're accessing it, you
157 : * want to be holding this lock.
158 : *
159 : * In addition, it also protects the fl->fl_blocked_requests list, and the
160 : * fl->fl_blocker pointer for file_lock structures that are acting as lock
161 : * requests (in contrast to those that are acting as records of acquired locks).
162 : *
163 : * Note that when we acquire this lock in order to change the above fields,
164 : * we often hold the flc_lock as well. In certain cases, when reading the fields
165 : * protected by this lock, we can skip acquiring it iff we already hold the
166 : * flc_lock.
167 : */
168 : static DEFINE_SPINLOCK(blocked_lock_lock);
169 :
170 : static struct kmem_cache *flctx_cache __read_mostly;
171 : static struct kmem_cache *filelock_cache __read_mostly;
172 :
173 : static struct file_lock_context *
174 0 : locks_get_lock_context(struct inode *inode, int type)
175 : {
176 : struct file_lock_context *ctx;
177 :
178 : /* paired with cmpxchg() below */
179 0 : ctx = locks_inode_context(inode);
180 0 : if (likely(ctx) || type == F_UNLCK)
181 : goto out;
182 :
183 0 : ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
184 0 : if (!ctx)
185 : goto out;
186 :
187 0 : spin_lock_init(&ctx->flc_lock);
188 0 : INIT_LIST_HEAD(&ctx->flc_flock);
189 0 : INIT_LIST_HEAD(&ctx->flc_posix);
190 0 : INIT_LIST_HEAD(&ctx->flc_lease);
191 :
192 : /*
193 : * Assign the pointer if it's not already assigned. If it is, then
194 : * free the context we just allocated.
195 : */
196 0 : if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
197 0 : kmem_cache_free(flctx_cache, ctx);
198 0 : ctx = locks_inode_context(inode);
199 : }
200 : out:
201 0 : trace_locks_get_lock_context(inode, type, ctx);
202 0 : return ctx;
203 : }
204 :
205 : static void
206 0 : locks_dump_ctx_list(struct list_head *list, char *list_type)
207 : {
208 : struct file_lock *fl;
209 :
210 0 : list_for_each_entry(fl, list, fl_list) {
211 0 : pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
212 : }
213 0 : }
214 :
215 : static void
216 0 : locks_check_ctx_lists(struct inode *inode)
217 : {
218 0 : struct file_lock_context *ctx = inode->i_flctx;
219 :
220 0 : if (unlikely(!list_empty(&ctx->flc_flock) ||
221 : !list_empty(&ctx->flc_posix) ||
222 : !list_empty(&ctx->flc_lease))) {
223 0 : pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
224 : MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
225 : inode->i_ino);
226 0 : locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
227 0 : locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
228 0 : locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
229 : }
230 0 : }
231 :
232 : static void
233 0 : locks_check_ctx_file_list(struct file *filp, struct list_head *list,
234 : char *list_type)
235 : {
236 : struct file_lock *fl;
237 0 : struct inode *inode = file_inode(filp);
238 :
239 0 : list_for_each_entry(fl, list, fl_list)
240 0 : if (fl->fl_file == filp)
241 0 : pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
242 : " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
243 : list_type, MAJOR(inode->i_sb->s_dev),
244 : MINOR(inode->i_sb->s_dev), inode->i_ino,
245 : fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
246 0 : }
247 :
248 : void
249 44 : locks_free_lock_context(struct inode *inode)
250 : {
251 44 : struct file_lock_context *ctx = locks_inode_context(inode);
252 :
253 44 : if (unlikely(ctx)) {
254 0 : locks_check_ctx_lists(inode);
255 0 : kmem_cache_free(flctx_cache, ctx);
256 : }
257 44 : }
258 :
259 : static void locks_init_lock_heads(struct file_lock *fl)
260 : {
261 0 : INIT_HLIST_NODE(&fl->fl_link);
262 0 : INIT_LIST_HEAD(&fl->fl_list);
263 0 : INIT_LIST_HEAD(&fl->fl_blocked_requests);
264 0 : INIT_LIST_HEAD(&fl->fl_blocked_member);
265 0 : init_waitqueue_head(&fl->fl_wait);
266 : }
267 :
268 : /* Allocate an empty lock structure. */
269 0 : struct file_lock *locks_alloc_lock(void)
270 : {
271 0 : struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
272 :
273 0 : if (fl)
274 : locks_init_lock_heads(fl);
275 :
276 0 : return fl;
277 : }
278 : EXPORT_SYMBOL_GPL(locks_alloc_lock);
279 :
280 0 : void locks_release_private(struct file_lock *fl)
281 : {
282 0 : BUG_ON(waitqueue_active(&fl->fl_wait));
283 0 : BUG_ON(!list_empty(&fl->fl_list));
284 0 : BUG_ON(!list_empty(&fl->fl_blocked_requests));
285 0 : BUG_ON(!list_empty(&fl->fl_blocked_member));
286 0 : BUG_ON(!hlist_unhashed(&fl->fl_link));
287 :
288 0 : if (fl->fl_ops) {
289 0 : if (fl->fl_ops->fl_release_private)
290 0 : fl->fl_ops->fl_release_private(fl);
291 0 : fl->fl_ops = NULL;
292 : }
293 :
294 0 : if (fl->fl_lmops) {
295 0 : if (fl->fl_lmops->lm_put_owner) {
296 0 : fl->fl_lmops->lm_put_owner(fl->fl_owner);
297 0 : fl->fl_owner = NULL;
298 : }
299 0 : fl->fl_lmops = NULL;
300 : }
301 0 : }
302 : EXPORT_SYMBOL_GPL(locks_release_private);
303 :
304 : /**
305 : * locks_owner_has_blockers - Check for blocking lock requests
306 : * @flctx: file lock context
307 : * @owner: lock owner
308 : *
309 : * Return values:
310 : * %true: @owner has at least one blocker
311 : * %false: @owner has no blockers
312 : */
313 0 : bool locks_owner_has_blockers(struct file_lock_context *flctx,
314 : fl_owner_t owner)
315 : {
316 : struct file_lock *fl;
317 :
318 0 : spin_lock(&flctx->flc_lock);
319 0 : list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
320 0 : if (fl->fl_owner != owner)
321 0 : continue;
322 0 : if (!list_empty(&fl->fl_blocked_requests)) {
323 0 : spin_unlock(&flctx->flc_lock);
324 0 : return true;
325 : }
326 : }
327 0 : spin_unlock(&flctx->flc_lock);
328 0 : return false;
329 : }
330 : EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
331 :
332 : /* Free a lock which is not in use. */
333 0 : void locks_free_lock(struct file_lock *fl)
334 : {
335 0 : locks_release_private(fl);
336 0 : kmem_cache_free(filelock_cache, fl);
337 0 : }
338 : EXPORT_SYMBOL(locks_free_lock);
339 :
340 : static void
341 0 : locks_dispose_list(struct list_head *dispose)
342 : {
343 : struct file_lock *fl;
344 :
345 0 : while (!list_empty(dispose)) {
346 0 : fl = list_first_entry(dispose, struct file_lock, fl_list);
347 0 : list_del_init(&fl->fl_list);
348 : locks_free_lock(fl);
349 : }
350 0 : }
351 :
352 0 : void locks_init_lock(struct file_lock *fl)
353 : {
354 0 : memset(fl, 0, sizeof(struct file_lock));
355 0 : locks_init_lock_heads(fl);
356 0 : }
357 : EXPORT_SYMBOL(locks_init_lock);
358 :
359 : /*
360 : * Initialize a new lock from an existing file_lock structure.
361 : */
362 0 : void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
363 : {
364 0 : new->fl_owner = fl->fl_owner;
365 0 : new->fl_pid = fl->fl_pid;
366 0 : new->fl_file = NULL;
367 0 : new->fl_flags = fl->fl_flags;
368 0 : new->fl_type = fl->fl_type;
369 0 : new->fl_start = fl->fl_start;
370 0 : new->fl_end = fl->fl_end;
371 0 : new->fl_lmops = fl->fl_lmops;
372 0 : new->fl_ops = NULL;
373 :
374 0 : if (fl->fl_lmops) {
375 0 : if (fl->fl_lmops->lm_get_owner)
376 0 : fl->fl_lmops->lm_get_owner(fl->fl_owner);
377 : }
378 0 : }
379 : EXPORT_SYMBOL(locks_copy_conflock);
380 :
381 0 : void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
382 : {
383 : /* "new" must be a freshly-initialized lock */
384 0 : WARN_ON_ONCE(new->fl_ops);
385 :
386 0 : locks_copy_conflock(new, fl);
387 :
388 0 : new->fl_file = fl->fl_file;
389 0 : new->fl_ops = fl->fl_ops;
390 :
391 0 : if (fl->fl_ops) {
392 0 : if (fl->fl_ops->fl_copy_lock)
393 0 : fl->fl_ops->fl_copy_lock(new, fl);
394 : }
395 0 : }
396 : EXPORT_SYMBOL(locks_copy_lock);
397 :
398 0 : static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
399 : {
400 : struct file_lock *f;
401 :
402 : /*
403 : * As ctx->flc_lock is held, new requests cannot be added to
404 : * ->fl_blocked_requests, so we don't need a lock to check if it
405 : * is empty.
406 : */
407 0 : if (list_empty(&fl->fl_blocked_requests))
408 : return;
409 0 : spin_lock(&blocked_lock_lock);
410 0 : list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
411 0 : list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
412 0 : f->fl_blocker = new;
413 : spin_unlock(&blocked_lock_lock);
414 : }
415 :
416 : static inline int flock_translate_cmd(int cmd) {
417 0 : switch (cmd) {
418 : case LOCK_SH:
419 : return F_RDLCK;
420 : case LOCK_EX:
421 : return F_WRLCK;
422 : case LOCK_UN:
423 : return F_UNLCK;
424 : }
425 : return -EINVAL;
426 : }
427 :
428 : /* Fill in a file_lock structure with an appropriate FLOCK lock. */
429 : static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
430 : {
431 0 : locks_init_lock(fl);
432 :
433 0 : fl->fl_file = filp;
434 0 : fl->fl_owner = filp;
435 0 : fl->fl_pid = current->tgid;
436 0 : fl->fl_flags = FL_FLOCK;
437 0 : fl->fl_type = type;
438 0 : fl->fl_end = OFFSET_MAX;
439 : }
440 :
441 : static int assign_type(struct file_lock *fl, long type)
442 : {
443 0 : switch (type) {
444 : case F_RDLCK:
445 : case F_WRLCK:
446 : case F_UNLCK:
447 0 : fl->fl_type = type;
448 : break;
449 : default:
450 : return -EINVAL;
451 : }
452 : return 0;
453 : }
454 :
455 0 : static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
456 : struct flock64 *l)
457 : {
458 0 : switch (l->l_whence) {
459 : case SEEK_SET:
460 0 : fl->fl_start = 0;
461 0 : break;
462 : case SEEK_CUR:
463 0 : fl->fl_start = filp->f_pos;
464 0 : break;
465 : case SEEK_END:
466 0 : fl->fl_start = i_size_read(file_inode(filp));
467 0 : break;
468 : default:
469 : return -EINVAL;
470 : }
471 0 : if (l->l_start > OFFSET_MAX - fl->fl_start)
472 : return -EOVERFLOW;
473 0 : fl->fl_start += l->l_start;
474 0 : if (fl->fl_start < 0)
475 : return -EINVAL;
476 :
477 : /* POSIX-1996 leaves the case l->l_len < 0 undefined;
478 : POSIX-2001 defines it. */
479 0 : if (l->l_len > 0) {
480 0 : if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
481 : return -EOVERFLOW;
482 0 : fl->fl_end = fl->fl_start + (l->l_len - 1);
483 :
484 0 : } else if (l->l_len < 0) {
485 0 : if (fl->fl_start + l->l_len < 0)
486 : return -EINVAL;
487 0 : fl->fl_end = fl->fl_start - 1;
488 0 : fl->fl_start += l->l_len;
489 : } else
490 0 : fl->fl_end = OFFSET_MAX;
491 :
492 0 : fl->fl_owner = current->files;
493 0 : fl->fl_pid = current->tgid;
494 0 : fl->fl_file = filp;
495 0 : fl->fl_flags = FL_POSIX;
496 0 : fl->fl_ops = NULL;
497 0 : fl->fl_lmops = NULL;
498 :
499 0 : return assign_type(fl, l->l_type);
500 : }
501 :
502 : /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
503 : * style lock.
504 : */
505 0 : static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
506 : struct flock *l)
507 : {
508 0 : struct flock64 ll = {
509 0 : .l_type = l->l_type,
510 0 : .l_whence = l->l_whence,
511 0 : .l_start = l->l_start,
512 0 : .l_len = l->l_len,
513 : };
514 :
515 0 : return flock64_to_posix_lock(filp, fl, &ll);
516 : }
517 :
518 : /* default lease lock manager operations */
519 : static bool
520 0 : lease_break_callback(struct file_lock *fl)
521 : {
522 0 : kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
523 0 : return false;
524 : }
525 :
526 : static void
527 0 : lease_setup(struct file_lock *fl, void **priv)
528 : {
529 0 : struct file *filp = fl->fl_file;
530 0 : struct fasync_struct *fa = *priv;
531 :
532 : /*
533 : * fasync_insert_entry() returns the old entry if any. If there was no
534 : * old entry, then it used "priv" and inserted it into the fasync list.
535 : * Clear the pointer to indicate that it shouldn't be freed.
536 : */
537 0 : if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
538 0 : *priv = NULL;
539 :
540 0 : __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
541 0 : }
542 :
543 : static const struct lock_manager_operations lease_manager_ops = {
544 : .lm_break = lease_break_callback,
545 : .lm_change = lease_modify,
546 : .lm_setup = lease_setup,
547 : };
548 :
549 : /*
550 : * Initialize a lease, use the default lock manager operations
551 : */
552 : static int lease_init(struct file *filp, long type, struct file_lock *fl)
553 : {
554 0 : if (assign_type(fl, type) != 0)
555 : return -EINVAL;
556 :
557 0 : fl->fl_owner = filp;
558 0 : fl->fl_pid = current->tgid;
559 :
560 0 : fl->fl_file = filp;
561 0 : fl->fl_flags = FL_LEASE;
562 0 : fl->fl_start = 0;
563 0 : fl->fl_end = OFFSET_MAX;
564 0 : fl->fl_ops = NULL;
565 0 : fl->fl_lmops = &lease_manager_ops;
566 : return 0;
567 : }
568 :
569 : /* Allocate a file_lock initialised to this type of lease */
570 0 : static struct file_lock *lease_alloc(struct file *filp, long type)
571 : {
572 0 : struct file_lock *fl = locks_alloc_lock();
573 0 : int error = -ENOMEM;
574 :
575 0 : if (fl == NULL)
576 : return ERR_PTR(error);
577 :
578 0 : error = lease_init(filp, type, fl);
579 0 : if (error) {
580 0 : locks_free_lock(fl);
581 0 : return ERR_PTR(error);
582 : }
583 : return fl;
584 : }
585 :
586 : /* Check if two locks overlap each other.
587 : */
588 : static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
589 : {
590 0 : return ((fl1->fl_end >= fl2->fl_start) &&
591 0 : (fl2->fl_end >= fl1->fl_start));
592 : }
593 :
594 : /*
595 : * Check whether two locks have the same owner.
596 : */
597 : static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
598 : {
599 : return fl1->fl_owner == fl2->fl_owner;
600 : }
601 :
602 : /* Must be called with the flc_lock held! */
603 : static void locks_insert_global_locks(struct file_lock *fl)
604 : {
605 0 : struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
606 :
607 : percpu_rwsem_assert_held(&file_rwsem);
608 :
609 0 : spin_lock(&fll->lock);
610 0 : fl->fl_link_cpu = smp_processor_id();
611 0 : hlist_add_head(&fl->fl_link, &fll->hlist);
612 0 : spin_unlock(&fll->lock);
613 : }
614 :
615 : /* Must be called with the flc_lock held! */
616 : static void locks_delete_global_locks(struct file_lock *fl)
617 : {
618 : struct file_lock_list_struct *fll;
619 :
620 : percpu_rwsem_assert_held(&file_rwsem);
621 :
622 : /*
623 : * Avoid taking lock if already unhashed. This is safe since this check
624 : * is done while holding the flc_lock, and new insertions into the list
625 : * also require that it be held.
626 : */
627 0 : if (hlist_unhashed(&fl->fl_link))
628 : return;
629 :
630 0 : fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
631 0 : spin_lock(&fll->lock);
632 0 : hlist_del_init(&fl->fl_link);
633 0 : spin_unlock(&fll->lock);
634 : }
635 :
636 : static unsigned long
637 : posix_owner_key(struct file_lock *fl)
638 : {
639 0 : return (unsigned long)fl->fl_owner;
640 : }
641 :
642 : static void locks_insert_global_blocked(struct file_lock *waiter)
643 : {
644 : lockdep_assert_held(&blocked_lock_lock);
645 :
646 0 : hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
647 : }
648 :
649 : static void locks_delete_global_blocked(struct file_lock *waiter)
650 : {
651 : lockdep_assert_held(&blocked_lock_lock);
652 :
653 0 : hash_del(&waiter->fl_link);
654 : }
655 :
656 : /* Remove waiter from blocker's block list.
657 : * When blocker ends up pointing to itself then the list is empty.
658 : *
659 : * Must be called with blocked_lock_lock held.
660 : */
661 : static void __locks_delete_block(struct file_lock *waiter)
662 : {
663 0 : locks_delete_global_blocked(waiter);
664 0 : list_del_init(&waiter->fl_blocked_member);
665 : }
666 :
667 0 : static void __locks_wake_up_blocks(struct file_lock *blocker)
668 : {
669 0 : while (!list_empty(&blocker->fl_blocked_requests)) {
670 : struct file_lock *waiter;
671 :
672 0 : waiter = list_first_entry(&blocker->fl_blocked_requests,
673 : struct file_lock, fl_blocked_member);
674 0 : __locks_delete_block(waiter);
675 0 : if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
676 0 : waiter->fl_lmops->lm_notify(waiter);
677 : else
678 0 : wake_up(&waiter->fl_wait);
679 :
680 : /*
681 : * The setting of fl_blocker to NULL marks the "done"
682 : * point in deleting a block. Paired with acquire at the top
683 : * of locks_delete_block().
684 : */
685 0 : smp_store_release(&waiter->fl_blocker, NULL);
686 : }
687 0 : }
688 :
689 : /**
690 : * locks_delete_block - stop waiting for a file lock
691 : * @waiter: the lock which was waiting
692 : *
693 : * lockd/nfsd need to disconnect the lock while working on it.
694 : */
695 0 : int locks_delete_block(struct file_lock *waiter)
696 : {
697 0 : int status = -ENOENT;
698 :
699 : /*
700 : * If fl_blocker is NULL, it won't be set again as this thread "owns"
701 : * the lock and is the only one that might try to claim the lock.
702 : *
703 : * We use acquire/release to manage fl_blocker so that we can
704 : * optimize away taking the blocked_lock_lock in many cases.
705 : *
706 : * The smp_load_acquire guarantees two things:
707 : *
708 : * 1/ that fl_blocked_requests can be tested locklessly. If something
709 : * was recently added to that list it must have been in a locked region
710 : * *before* the locked region when fl_blocker was set to NULL.
711 : *
712 : * 2/ that no other thread is accessing 'waiter', so it is safe to free
713 : * it. __locks_wake_up_blocks is careful not to touch waiter after
714 : * fl_blocker is released.
715 : *
716 : * If a lockless check of fl_blocker shows it to be NULL, we know that
717 : * no new locks can be inserted into its fl_blocked_requests list, and
718 : * can avoid doing anything further if the list is empty.
719 : */
720 0 : if (!smp_load_acquire(&waiter->fl_blocker) &&
721 0 : list_empty(&waiter->fl_blocked_requests))
722 : return status;
723 :
724 0 : spin_lock(&blocked_lock_lock);
725 0 : if (waiter->fl_blocker)
726 0 : status = 0;
727 0 : __locks_wake_up_blocks(waiter);
728 0 : __locks_delete_block(waiter);
729 :
730 : /*
731 : * The setting of fl_blocker to NULL marks the "done" point in deleting
732 : * a block. Paired with acquire at the top of this function.
733 : */
734 0 : smp_store_release(&waiter->fl_blocker, NULL);
735 0 : spin_unlock(&blocked_lock_lock);
736 0 : return status;
737 : }
738 : EXPORT_SYMBOL(locks_delete_block);
739 :
740 : /* Insert waiter into blocker's block list.
741 : * We use a circular list so that processes can be easily woken up in
742 : * the order they blocked. The documentation doesn't require this but
743 : * it seems like the reasonable thing to do.
744 : *
745 : * Must be called with both the flc_lock and blocked_lock_lock held. The
746 : * fl_blocked_requests list itself is protected by the blocked_lock_lock,
747 : * but by ensuring that the flc_lock is also held on insertions we can avoid
748 : * taking the blocked_lock_lock in some cases when we see that the
749 : * fl_blocked_requests list is empty.
750 : *
751 : * Rather than just adding to the list, we check for conflicts with any existing
752 : * waiters, and add beneath any waiter that blocks the new waiter.
753 : * Thus wakeups don't happen until needed.
754 : */
755 0 : static void __locks_insert_block(struct file_lock *blocker,
756 : struct file_lock *waiter,
757 : bool conflict(struct file_lock *,
758 : struct file_lock *))
759 : {
760 : struct file_lock *fl;
761 0 : BUG_ON(!list_empty(&waiter->fl_blocked_member));
762 :
763 : new_blocker:
764 0 : list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
765 0 : if (conflict(fl, waiter)) {
766 : blocker = fl;
767 : goto new_blocker;
768 : }
769 0 : waiter->fl_blocker = blocker;
770 0 : list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
771 0 : if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
772 : locks_insert_global_blocked(waiter);
773 :
774 : /* The requests in waiter->fl_blocked are known to conflict with
775 : * waiter, but might not conflict with blocker, or the requests
776 : * and lock which block it. So they all need to be woken.
777 : */
778 0 : __locks_wake_up_blocks(waiter);
779 0 : }
780 :
781 : /* Must be called with flc_lock held. */
782 : static void locks_insert_block(struct file_lock *blocker,
783 : struct file_lock *waiter,
784 : bool conflict(struct file_lock *,
785 : struct file_lock *))
786 : {
787 0 : spin_lock(&blocked_lock_lock);
788 0 : __locks_insert_block(blocker, waiter, conflict);
789 0 : spin_unlock(&blocked_lock_lock);
790 : }
791 :
792 : /*
793 : * Wake up processes blocked waiting for blocker.
794 : *
795 : * Must be called with the inode->flc_lock held!
796 : */
797 : static void locks_wake_up_blocks(struct file_lock *blocker)
798 : {
799 : /*
800 : * Avoid taking global lock if list is empty. This is safe since new
801 : * blocked requests are only added to the list under the flc_lock, and
802 : * the flc_lock is always held here. Note that removal from the
803 : * fl_blocked_requests list does not require the flc_lock, so we must
804 : * recheck list_empty() after acquiring the blocked_lock_lock.
805 : */
806 0 : if (list_empty(&blocker->fl_blocked_requests))
807 : return;
808 :
809 0 : spin_lock(&blocked_lock_lock);
810 0 : __locks_wake_up_blocks(blocker);
811 : spin_unlock(&blocked_lock_lock);
812 : }
813 :
814 : static void
815 : locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
816 : {
817 0 : list_add_tail(&fl->fl_list, before);
818 0 : locks_insert_global_locks(fl);
819 : }
820 :
821 : static void
822 0 : locks_unlink_lock_ctx(struct file_lock *fl)
823 : {
824 0 : locks_delete_global_locks(fl);
825 0 : list_del_init(&fl->fl_list);
826 0 : locks_wake_up_blocks(fl);
827 0 : }
828 :
829 : static void
830 0 : locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
831 : {
832 0 : locks_unlink_lock_ctx(fl);
833 0 : if (dispose)
834 0 : list_add(&fl->fl_list, dispose);
835 : else
836 : locks_free_lock(fl);
837 0 : }
838 :
839 : /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
840 : * checks for shared/exclusive status of overlapping locks.
841 : */
842 : static bool locks_conflict(struct file_lock *caller_fl,
843 : struct file_lock *sys_fl)
844 : {
845 0 : if (sys_fl->fl_type == F_WRLCK)
846 : return true;
847 0 : if (caller_fl->fl_type == F_WRLCK)
848 : return true;
849 : return false;
850 : }
851 :
852 : /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
853 : * checking before calling the locks_conflict().
854 : */
855 0 : static bool posix_locks_conflict(struct file_lock *caller_fl,
856 : struct file_lock *sys_fl)
857 : {
858 : /* POSIX locks owned by the same process do not conflict with
859 : * each other.
860 : */
861 0 : if (posix_same_owner(caller_fl, sys_fl))
862 : return false;
863 :
864 : /* Check whether they overlap */
865 0 : if (!locks_overlap(caller_fl, sys_fl))
866 : return false;
867 :
868 0 : return locks_conflict(caller_fl, sys_fl);
869 : }
870 :
871 : /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
872 : * checking before calling the locks_conflict().
873 : */
874 0 : static bool flock_locks_conflict(struct file_lock *caller_fl,
875 : struct file_lock *sys_fl)
876 : {
877 : /* FLOCK locks referring to the same filp do not conflict with
878 : * each other.
879 : */
880 0 : if (caller_fl->fl_file == sys_fl->fl_file)
881 : return false;
882 :
883 0 : return locks_conflict(caller_fl, sys_fl);
884 : }
885 :
886 : void
887 0 : posix_test_lock(struct file *filp, struct file_lock *fl)
888 : {
889 : struct file_lock *cfl;
890 : struct file_lock_context *ctx;
891 0 : struct inode *inode = file_inode(filp);
892 : void *owner;
893 : void (*func)(void);
894 :
895 0 : ctx = locks_inode_context(inode);
896 0 : if (!ctx || list_empty_careful(&ctx->flc_posix)) {
897 0 : fl->fl_type = F_UNLCK;
898 0 : return;
899 : }
900 :
901 : retry:
902 0 : spin_lock(&ctx->flc_lock);
903 0 : list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
904 0 : if (!posix_locks_conflict(fl, cfl))
905 0 : continue;
906 0 : if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
907 0 : && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
908 0 : owner = cfl->fl_lmops->lm_mod_owner;
909 0 : func = cfl->fl_lmops->lm_expire_lock;
910 0 : __module_get(owner);
911 0 : spin_unlock(&ctx->flc_lock);
912 0 : (*func)();
913 0 : module_put(owner);
914 : goto retry;
915 : }
916 0 : locks_copy_conflock(fl, cfl);
917 0 : goto out;
918 : }
919 0 : fl->fl_type = F_UNLCK;
920 : out:
921 0 : spin_unlock(&ctx->flc_lock);
922 : return;
923 : }
924 : EXPORT_SYMBOL(posix_test_lock);
925 :
926 : /*
927 : * Deadlock detection:
928 : *
929 : * We attempt to detect deadlocks that are due purely to posix file
930 : * locks.
931 : *
932 : * We assume that a task can be waiting for at most one lock at a time.
933 : * So for any acquired lock, the process holding that lock may be
934 : * waiting on at most one other lock. That lock in turns may be held by
935 : * someone waiting for at most one other lock. Given a requested lock
936 : * caller_fl which is about to wait for a conflicting lock block_fl, we
937 : * follow this chain of waiters to ensure we are not about to create a
938 : * cycle.
939 : *
940 : * Since we do this before we ever put a process to sleep on a lock, we
941 : * are ensured that there is never a cycle; that is what guarantees that
942 : * the while() loop in posix_locks_deadlock() eventually completes.
943 : *
944 : * Note: the above assumption may not be true when handling lock
945 : * requests from a broken NFS client. It may also fail in the presence
946 : * of tasks (such as posix threads) sharing the same open file table.
947 : * To handle those cases, we just bail out after a few iterations.
948 : *
949 : * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
950 : * Because the owner is not even nominally tied to a thread of
951 : * execution, the deadlock detection below can't reasonably work well. Just
952 : * skip it for those.
953 : *
954 : * In principle, we could do a more limited deadlock detection on FL_OFDLCK
955 : * locks that just checks for the case where two tasks are attempting to
956 : * upgrade from read to write locks on the same inode.
957 : */
958 :
959 : #define MAX_DEADLK_ITERATIONS 10
960 :
961 : /* Find a lock that the owner of the given block_fl is blocking on. */
962 0 : static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
963 : {
964 : struct file_lock *fl;
965 :
966 0 : hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
967 0 : if (posix_same_owner(fl, block_fl)) {
968 0 : while (fl->fl_blocker)
969 : fl = fl->fl_blocker;
970 : return fl;
971 : }
972 : }
973 : return NULL;
974 : }
975 :
976 : /* Must be called with the blocked_lock_lock held! */
977 0 : static int posix_locks_deadlock(struct file_lock *caller_fl,
978 : struct file_lock *block_fl)
979 : {
980 0 : int i = 0;
981 :
982 : lockdep_assert_held(&blocked_lock_lock);
983 :
984 : /*
985 : * This deadlock detector can't reasonably detect deadlocks with
986 : * FL_OFDLCK locks, since they aren't owned by a process, per-se.
987 : */
988 0 : if (IS_OFDLCK(caller_fl))
989 : return 0;
990 :
991 0 : while ((block_fl = what_owner_is_waiting_for(block_fl))) {
992 0 : if (i++ > MAX_DEADLK_ITERATIONS)
993 : return 0;
994 0 : if (posix_same_owner(caller_fl, block_fl))
995 : return 1;
996 : }
997 : return 0;
998 : }
999 :
1000 : /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1001 : * after any leases, but before any posix locks.
1002 : *
1003 : * Note that if called with an FL_EXISTS argument, the caller may determine
1004 : * whether or not a lock was successfully freed by testing the return
1005 : * value for -ENOENT.
1006 : */
1007 0 : static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1008 : {
1009 0 : struct file_lock *new_fl = NULL;
1010 : struct file_lock *fl;
1011 : struct file_lock_context *ctx;
1012 0 : int error = 0;
1013 0 : bool found = false;
1014 0 : LIST_HEAD(dispose);
1015 :
1016 0 : ctx = locks_get_lock_context(inode, request->fl_type);
1017 0 : if (!ctx) {
1018 0 : if (request->fl_type != F_UNLCK)
1019 : return -ENOMEM;
1020 0 : return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1021 : }
1022 :
1023 0 : if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1024 0 : new_fl = locks_alloc_lock();
1025 0 : if (!new_fl)
1026 : return -ENOMEM;
1027 : }
1028 :
1029 0 : percpu_down_read(&file_rwsem);
1030 0 : spin_lock(&ctx->flc_lock);
1031 0 : if (request->fl_flags & FL_ACCESS)
1032 : goto find_conflict;
1033 :
1034 0 : list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1035 0 : if (request->fl_file != fl->fl_file)
1036 0 : continue;
1037 0 : if (request->fl_type == fl->fl_type)
1038 : goto out;
1039 0 : found = true;
1040 : locks_delete_lock_ctx(fl, &dispose);
1041 : break;
1042 : }
1043 :
1044 0 : if (request->fl_type == F_UNLCK) {
1045 0 : if ((request->fl_flags & FL_EXISTS) && !found)
1046 0 : error = -ENOENT;
1047 : goto out;
1048 : }
1049 :
1050 : find_conflict:
1051 0 : list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1052 0 : if (!flock_locks_conflict(request, fl))
1053 0 : continue;
1054 0 : error = -EAGAIN;
1055 0 : if (!(request->fl_flags & FL_SLEEP))
1056 : goto out;
1057 0 : error = FILE_LOCK_DEFERRED;
1058 : locks_insert_block(fl, request, flock_locks_conflict);
1059 : goto out;
1060 : }
1061 0 : if (request->fl_flags & FL_ACCESS)
1062 : goto out;
1063 0 : locks_copy_lock(new_fl, request);
1064 0 : locks_move_blocks(new_fl, request);
1065 0 : locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1066 0 : new_fl = NULL;
1067 0 : error = 0;
1068 :
1069 : out:
1070 0 : spin_unlock(&ctx->flc_lock);
1071 0 : percpu_up_read(&file_rwsem);
1072 0 : if (new_fl)
1073 : locks_free_lock(new_fl);
1074 0 : locks_dispose_list(&dispose);
1075 0 : trace_flock_lock_inode(inode, request, error);
1076 0 : return error;
1077 : }
1078 :
1079 0 : static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1080 : struct file_lock *conflock)
1081 : {
1082 : struct file_lock *fl, *tmp;
1083 0 : struct file_lock *new_fl = NULL;
1084 0 : struct file_lock *new_fl2 = NULL;
1085 0 : struct file_lock *left = NULL;
1086 0 : struct file_lock *right = NULL;
1087 : struct file_lock_context *ctx;
1088 : int error;
1089 0 : bool added = false;
1090 0 : LIST_HEAD(dispose);
1091 : void *owner;
1092 : void (*func)(void);
1093 :
1094 0 : ctx = locks_get_lock_context(inode, request->fl_type);
1095 0 : if (!ctx)
1096 0 : return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1097 :
1098 : /*
1099 : * We may need two file_lock structures for this operation,
1100 : * so we get them in advance to avoid races.
1101 : *
1102 : * In some cases we can be sure, that no new locks will be needed
1103 : */
1104 0 : if (!(request->fl_flags & FL_ACCESS) &&
1105 0 : (request->fl_type != F_UNLCK ||
1106 0 : request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1107 0 : new_fl = locks_alloc_lock();
1108 0 : new_fl2 = locks_alloc_lock();
1109 : }
1110 :
1111 : retry:
1112 0 : percpu_down_read(&file_rwsem);
1113 0 : spin_lock(&ctx->flc_lock);
1114 : /*
1115 : * New lock request. Walk all POSIX locks and look for conflicts. If
1116 : * there are any, either return error or put the request on the
1117 : * blocker's list of waiters and the global blocked_hash.
1118 : */
1119 0 : if (request->fl_type != F_UNLCK) {
1120 0 : list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1121 0 : if (!posix_locks_conflict(request, fl))
1122 0 : continue;
1123 0 : if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1124 0 : && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1125 0 : owner = fl->fl_lmops->lm_mod_owner;
1126 0 : func = fl->fl_lmops->lm_expire_lock;
1127 0 : __module_get(owner);
1128 0 : spin_unlock(&ctx->flc_lock);
1129 0 : percpu_up_read(&file_rwsem);
1130 0 : (*func)();
1131 0 : module_put(owner);
1132 : goto retry;
1133 : }
1134 0 : if (conflock)
1135 0 : locks_copy_conflock(conflock, fl);
1136 0 : error = -EAGAIN;
1137 0 : if (!(request->fl_flags & FL_SLEEP))
1138 : goto out;
1139 : /*
1140 : * Deadlock detection and insertion into the blocked
1141 : * locks list must be done while holding the same lock!
1142 : */
1143 0 : error = -EDEADLK;
1144 0 : spin_lock(&blocked_lock_lock);
1145 : /*
1146 : * Ensure that we don't find any locks blocked on this
1147 : * request during deadlock detection.
1148 : */
1149 0 : __locks_wake_up_blocks(request);
1150 0 : if (likely(!posix_locks_deadlock(request, fl))) {
1151 0 : error = FILE_LOCK_DEFERRED;
1152 0 : __locks_insert_block(fl, request,
1153 : posix_locks_conflict);
1154 : }
1155 : spin_unlock(&blocked_lock_lock);
1156 : goto out;
1157 : }
1158 : }
1159 :
1160 : /* If we're just looking for a conflict, we're done. */
1161 0 : error = 0;
1162 0 : if (request->fl_flags & FL_ACCESS)
1163 : goto out;
1164 :
1165 : /* Find the first old lock with the same owner as the new lock */
1166 0 : list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1167 0 : if (posix_same_owner(request, fl))
1168 : break;
1169 : }
1170 :
1171 : /* Process locks with this owner. */
1172 0 : list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1173 0 : if (!posix_same_owner(request, fl))
1174 : break;
1175 :
1176 : /* Detect adjacent or overlapping regions (if same lock type) */
1177 0 : if (request->fl_type == fl->fl_type) {
1178 : /* In all comparisons of start vs end, use
1179 : * "start - 1" rather than "end + 1". If end
1180 : * is OFFSET_MAX, end + 1 will become negative.
1181 : */
1182 0 : if (fl->fl_end < request->fl_start - 1)
1183 0 : continue;
1184 : /* If the next lock in the list has entirely bigger
1185 : * addresses than the new one, insert the lock here.
1186 : */
1187 0 : if (fl->fl_start - 1 > request->fl_end)
1188 : break;
1189 :
1190 : /* If we come here, the new and old lock are of the
1191 : * same type and adjacent or overlapping. Make one
1192 : * lock yielding from the lower start address of both
1193 : * locks to the higher end address.
1194 : */
1195 0 : if (fl->fl_start > request->fl_start)
1196 0 : fl->fl_start = request->fl_start;
1197 : else
1198 0 : request->fl_start = fl->fl_start;
1199 0 : if (fl->fl_end < request->fl_end)
1200 0 : fl->fl_end = request->fl_end;
1201 : else
1202 0 : request->fl_end = fl->fl_end;
1203 0 : if (added) {
1204 0 : locks_delete_lock_ctx(fl, &dispose);
1205 0 : continue;
1206 : }
1207 : request = fl;
1208 : added = true;
1209 : } else {
1210 : /* Processing for different lock types is a bit
1211 : * more complex.
1212 : */
1213 0 : if (fl->fl_end < request->fl_start)
1214 0 : continue;
1215 0 : if (fl->fl_start > request->fl_end)
1216 : break;
1217 0 : if (request->fl_type == F_UNLCK)
1218 0 : added = true;
1219 0 : if (fl->fl_start < request->fl_start)
1220 0 : left = fl;
1221 : /* If the next lock in the list has a higher end
1222 : * address than the new one, insert the new one here.
1223 : */
1224 0 : if (fl->fl_end > request->fl_end) {
1225 : right = fl;
1226 : break;
1227 : }
1228 0 : if (fl->fl_start >= request->fl_start) {
1229 : /* The new lock completely replaces an old
1230 : * one (This may happen several times).
1231 : */
1232 0 : if (added) {
1233 0 : locks_delete_lock_ctx(fl, &dispose);
1234 0 : continue;
1235 : }
1236 : /*
1237 : * Replace the old lock with new_fl, and
1238 : * remove the old one. It's safe to do the
1239 : * insert here since we know that we won't be
1240 : * using new_fl later, and that the lock is
1241 : * just replacing an existing lock.
1242 : */
1243 0 : error = -ENOLCK;
1244 0 : if (!new_fl)
1245 : goto out;
1246 0 : locks_copy_lock(new_fl, request);
1247 0 : locks_move_blocks(new_fl, request);
1248 0 : request = new_fl;
1249 0 : new_fl = NULL;
1250 0 : locks_insert_lock_ctx(request, &fl->fl_list);
1251 0 : locks_delete_lock_ctx(fl, &dispose);
1252 0 : added = true;
1253 : }
1254 : }
1255 : }
1256 :
1257 : /*
1258 : * The above code only modifies existing locks in case of merging or
1259 : * replacing. If new lock(s) need to be inserted all modifications are
1260 : * done below this, so it's safe yet to bail out.
1261 : */
1262 0 : error = -ENOLCK; /* "no luck" */
1263 0 : if (right && left == right && !new_fl2)
1264 : goto out;
1265 :
1266 0 : error = 0;
1267 0 : if (!added) {
1268 0 : if (request->fl_type == F_UNLCK) {
1269 0 : if (request->fl_flags & FL_EXISTS)
1270 0 : error = -ENOENT;
1271 : goto out;
1272 : }
1273 :
1274 0 : if (!new_fl) {
1275 : error = -ENOLCK;
1276 : goto out;
1277 : }
1278 0 : locks_copy_lock(new_fl, request);
1279 0 : locks_move_blocks(new_fl, request);
1280 0 : locks_insert_lock_ctx(new_fl, &fl->fl_list);
1281 0 : fl = new_fl;
1282 0 : new_fl = NULL;
1283 : }
1284 0 : if (right) {
1285 0 : if (left == right) {
1286 : /* The new lock breaks the old one in two pieces,
1287 : * so we have to use the second new lock.
1288 : */
1289 0 : left = new_fl2;
1290 0 : new_fl2 = NULL;
1291 0 : locks_copy_lock(left, right);
1292 0 : locks_insert_lock_ctx(left, &fl->fl_list);
1293 : }
1294 0 : right->fl_start = request->fl_end + 1;
1295 : locks_wake_up_blocks(right);
1296 : }
1297 0 : if (left) {
1298 0 : left->fl_end = request->fl_start - 1;
1299 : locks_wake_up_blocks(left);
1300 : }
1301 : out:
1302 0 : spin_unlock(&ctx->flc_lock);
1303 0 : percpu_up_read(&file_rwsem);
1304 : /*
1305 : * Free any unused locks.
1306 : */
1307 0 : if (new_fl)
1308 : locks_free_lock(new_fl);
1309 0 : if (new_fl2)
1310 : locks_free_lock(new_fl2);
1311 0 : locks_dispose_list(&dispose);
1312 0 : trace_posix_lock_inode(inode, request, error);
1313 :
1314 0 : return error;
1315 : }
1316 :
1317 : /**
1318 : * posix_lock_file - Apply a POSIX-style lock to a file
1319 : * @filp: The file to apply the lock to
1320 : * @fl: The lock to be applied
1321 : * @conflock: Place to return a copy of the conflicting lock, if found.
1322 : *
1323 : * Add a POSIX style lock to a file.
1324 : * We merge adjacent & overlapping locks whenever possible.
1325 : * POSIX locks are sorted by owner task, then by starting address
1326 : *
1327 : * Note that if called with an FL_EXISTS argument, the caller may determine
1328 : * whether or not a lock was successfully freed by testing the return
1329 : * value for -ENOENT.
1330 : */
1331 0 : int posix_lock_file(struct file *filp, struct file_lock *fl,
1332 : struct file_lock *conflock)
1333 : {
1334 0 : return posix_lock_inode(file_inode(filp), fl, conflock);
1335 : }
1336 : EXPORT_SYMBOL(posix_lock_file);
1337 :
1338 : /**
1339 : * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1340 : * @inode: inode of file to which lock request should be applied
1341 : * @fl: The lock to be applied
1342 : *
1343 : * Apply a POSIX style lock request to an inode.
1344 : */
1345 0 : static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1346 : {
1347 : int error;
1348 : might_sleep ();
1349 : for (;;) {
1350 0 : error = posix_lock_inode(inode, fl, NULL);
1351 0 : if (error != FILE_LOCK_DEFERRED)
1352 : break;
1353 0 : error = wait_event_interruptible(fl->fl_wait,
1354 : list_empty(&fl->fl_blocked_member));
1355 0 : if (error)
1356 : break;
1357 : }
1358 0 : locks_delete_block(fl);
1359 0 : return error;
1360 : }
1361 :
1362 : static void lease_clear_pending(struct file_lock *fl, int arg)
1363 : {
1364 0 : switch (arg) {
1365 : case F_UNLCK:
1366 0 : fl->fl_flags &= ~FL_UNLOCK_PENDING;
1367 : fallthrough;
1368 : case F_RDLCK:
1369 0 : fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1370 : }
1371 : }
1372 :
1373 : /* We already had a lease on this file; just change its type */
1374 0 : int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1375 : {
1376 0 : int error = assign_type(fl, arg);
1377 :
1378 0 : if (error)
1379 : return error;
1380 0 : lease_clear_pending(fl, arg);
1381 0 : locks_wake_up_blocks(fl);
1382 0 : if (arg == F_UNLCK) {
1383 0 : struct file *filp = fl->fl_file;
1384 :
1385 0 : f_delown(filp);
1386 0 : filp->f_owner.signum = 0;
1387 0 : fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1388 0 : if (fl->fl_fasync != NULL) {
1389 0 : printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1390 0 : fl->fl_fasync = NULL;
1391 : }
1392 0 : locks_delete_lock_ctx(fl, dispose);
1393 : }
1394 : return 0;
1395 : }
1396 : EXPORT_SYMBOL(lease_modify);
1397 :
1398 : static bool past_time(unsigned long then)
1399 : {
1400 0 : if (!then)
1401 : /* 0 is a special value meaning "this never expires": */
1402 : return false;
1403 0 : return time_after(jiffies, then);
1404 : }
1405 :
1406 0 : static void time_out_leases(struct inode *inode, struct list_head *dispose)
1407 : {
1408 0 : struct file_lock_context *ctx = inode->i_flctx;
1409 : struct file_lock *fl, *tmp;
1410 :
1411 : lockdep_assert_held(&ctx->flc_lock);
1412 :
1413 0 : list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1414 0 : trace_time_out_leases(inode, fl);
1415 0 : if (past_time(fl->fl_downgrade_time))
1416 0 : lease_modify(fl, F_RDLCK, dispose);
1417 0 : if (past_time(fl->fl_break_time))
1418 0 : lease_modify(fl, F_UNLCK, dispose);
1419 : }
1420 0 : }
1421 :
1422 0 : static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1423 : {
1424 : bool rc;
1425 :
1426 0 : if (lease->fl_lmops->lm_breaker_owns_lease
1427 0 : && lease->fl_lmops->lm_breaker_owns_lease(lease))
1428 : return false;
1429 0 : if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1430 : rc = false;
1431 : goto trace;
1432 : }
1433 0 : if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1434 : rc = false;
1435 : goto trace;
1436 : }
1437 :
1438 0 : rc = locks_conflict(breaker, lease);
1439 : trace:
1440 0 : trace_leases_conflict(rc, lease, breaker);
1441 0 : return rc;
1442 : }
1443 :
1444 : static bool
1445 0 : any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1446 : {
1447 0 : struct file_lock_context *ctx = inode->i_flctx;
1448 : struct file_lock *fl;
1449 :
1450 : lockdep_assert_held(&ctx->flc_lock);
1451 :
1452 0 : list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1453 0 : if (leases_conflict(fl, breaker))
1454 : return true;
1455 : }
1456 : return false;
1457 : }
1458 :
1459 : /**
1460 : * __break_lease - revoke all outstanding leases on file
1461 : * @inode: the inode of the file to return
1462 : * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1463 : * break all leases
1464 : * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1465 : * only delegations
1466 : *
1467 : * break_lease (inlined for speed) has checked there already is at least
1468 : * some kind of lock (maybe a lease) on this file. Leases are broken on
1469 : * a call to open() or truncate(). This function can sleep unless you
1470 : * specified %O_NONBLOCK to your open().
1471 : */
1472 0 : int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1473 : {
1474 0 : int error = 0;
1475 : struct file_lock_context *ctx;
1476 : struct file_lock *new_fl, *fl, *tmp;
1477 : unsigned long break_time;
1478 0 : int want_write = (mode & O_ACCMODE) != O_RDONLY;
1479 0 : LIST_HEAD(dispose);
1480 :
1481 0 : new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1482 0 : if (IS_ERR(new_fl))
1483 0 : return PTR_ERR(new_fl);
1484 0 : new_fl->fl_flags = type;
1485 :
1486 : /* typically we will check that ctx is non-NULL before calling */
1487 0 : ctx = locks_inode_context(inode);
1488 0 : if (!ctx) {
1489 0 : WARN_ON_ONCE(1);
1490 : goto free_lock;
1491 : }
1492 :
1493 0 : percpu_down_read(&file_rwsem);
1494 0 : spin_lock(&ctx->flc_lock);
1495 :
1496 0 : time_out_leases(inode, &dispose);
1497 :
1498 0 : if (!any_leases_conflict(inode, new_fl))
1499 : goto out;
1500 :
1501 0 : break_time = 0;
1502 0 : if (lease_break_time > 0) {
1503 0 : break_time = jiffies + lease_break_time * HZ;
1504 0 : if (break_time == 0)
1505 0 : break_time++; /* so that 0 means no break time */
1506 : }
1507 :
1508 0 : list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1509 0 : if (!leases_conflict(fl, new_fl))
1510 0 : continue;
1511 0 : if (want_write) {
1512 0 : if (fl->fl_flags & FL_UNLOCK_PENDING)
1513 0 : continue;
1514 0 : fl->fl_flags |= FL_UNLOCK_PENDING;
1515 0 : fl->fl_break_time = break_time;
1516 : } else {
1517 0 : if (lease_breaking(fl))
1518 0 : continue;
1519 0 : fl->fl_flags |= FL_DOWNGRADE_PENDING;
1520 0 : fl->fl_downgrade_time = break_time;
1521 : }
1522 0 : if (fl->fl_lmops->lm_break(fl))
1523 : locks_delete_lock_ctx(fl, &dispose);
1524 : }
1525 :
1526 0 : if (list_empty(&ctx->flc_lease))
1527 : goto out;
1528 :
1529 0 : if (mode & O_NONBLOCK) {
1530 : trace_break_lease_noblock(inode, new_fl);
1531 : error = -EWOULDBLOCK;
1532 : goto out;
1533 : }
1534 :
1535 : restart:
1536 0 : fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1537 0 : break_time = fl->fl_break_time;
1538 0 : if (break_time != 0)
1539 0 : break_time -= jiffies;
1540 0 : if (break_time == 0)
1541 0 : break_time++;
1542 0 : locks_insert_block(fl, new_fl, leases_conflict);
1543 0 : trace_break_lease_block(inode, new_fl);
1544 0 : spin_unlock(&ctx->flc_lock);
1545 0 : percpu_up_read(&file_rwsem);
1546 :
1547 0 : locks_dispose_list(&dispose);
1548 0 : error = wait_event_interruptible_timeout(new_fl->fl_wait,
1549 : list_empty(&new_fl->fl_blocked_member),
1550 : break_time);
1551 :
1552 0 : percpu_down_read(&file_rwsem);
1553 0 : spin_lock(&ctx->flc_lock);
1554 0 : trace_break_lease_unblock(inode, new_fl);
1555 0 : locks_delete_block(new_fl);
1556 0 : if (error >= 0) {
1557 : /*
1558 : * Wait for the next conflicting lease that has not been
1559 : * broken yet
1560 : */
1561 0 : if (error == 0)
1562 0 : time_out_leases(inode, &dispose);
1563 0 : if (any_leases_conflict(inode, new_fl))
1564 : goto restart;
1565 : error = 0;
1566 : }
1567 : out:
1568 0 : spin_unlock(&ctx->flc_lock);
1569 0 : percpu_up_read(&file_rwsem);
1570 0 : locks_dispose_list(&dispose);
1571 : free_lock:
1572 0 : locks_free_lock(new_fl);
1573 0 : return error;
1574 : }
1575 : EXPORT_SYMBOL(__break_lease);
1576 :
1577 : /**
1578 : * lease_get_mtime - update modified time of an inode with exclusive lease
1579 : * @inode: the inode
1580 : * @time: pointer to a timespec which contains the last modified time
1581 : *
1582 : * This is to force NFS clients to flush their caches for files with
1583 : * exclusive leases. The justification is that if someone has an
1584 : * exclusive lease, then they could be modifying it.
1585 : */
1586 0 : void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1587 : {
1588 0 : bool has_lease = false;
1589 : struct file_lock_context *ctx;
1590 : struct file_lock *fl;
1591 :
1592 0 : ctx = locks_inode_context(inode);
1593 0 : if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1594 0 : spin_lock(&ctx->flc_lock);
1595 0 : fl = list_first_entry_or_null(&ctx->flc_lease,
1596 : struct file_lock, fl_list);
1597 0 : if (fl && (fl->fl_type == F_WRLCK))
1598 0 : has_lease = true;
1599 0 : spin_unlock(&ctx->flc_lock);
1600 : }
1601 :
1602 0 : if (has_lease)
1603 0 : *time = current_time(inode);
1604 0 : }
1605 : EXPORT_SYMBOL(lease_get_mtime);
1606 :
1607 : /**
1608 : * fcntl_getlease - Enquire what lease is currently active
1609 : * @filp: the file
1610 : *
1611 : * The value returned by this function will be one of
1612 : * (if no lease break is pending):
1613 : *
1614 : * %F_RDLCK to indicate a shared lease is held.
1615 : *
1616 : * %F_WRLCK to indicate an exclusive lease is held.
1617 : *
1618 : * %F_UNLCK to indicate no lease is held.
1619 : *
1620 : * (if a lease break is pending):
1621 : *
1622 : * %F_RDLCK to indicate an exclusive lease needs to be
1623 : * changed to a shared lease (or removed).
1624 : *
1625 : * %F_UNLCK to indicate the lease needs to be removed.
1626 : *
1627 : * XXX: sfr & willy disagree over whether F_INPROGRESS
1628 : * should be returned to userspace.
1629 : */
1630 0 : int fcntl_getlease(struct file *filp)
1631 : {
1632 : struct file_lock *fl;
1633 0 : struct inode *inode = file_inode(filp);
1634 : struct file_lock_context *ctx;
1635 0 : int type = F_UNLCK;
1636 0 : LIST_HEAD(dispose);
1637 :
1638 0 : ctx = locks_inode_context(inode);
1639 0 : if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1640 0 : percpu_down_read(&file_rwsem);
1641 0 : spin_lock(&ctx->flc_lock);
1642 0 : time_out_leases(inode, &dispose);
1643 0 : list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1644 0 : if (fl->fl_file != filp)
1645 0 : continue;
1646 0 : type = target_leasetype(fl);
1647 : break;
1648 : }
1649 0 : spin_unlock(&ctx->flc_lock);
1650 0 : percpu_up_read(&file_rwsem);
1651 :
1652 0 : locks_dispose_list(&dispose);
1653 : }
1654 0 : return type;
1655 : }
1656 :
1657 : /**
1658 : * check_conflicting_open - see if the given file points to an inode that has
1659 : * an existing open that would conflict with the
1660 : * desired lease.
1661 : * @filp: file to check
1662 : * @arg: type of lease that we're trying to acquire
1663 : * @flags: current lock flags
1664 : *
1665 : * Check to see if there's an existing open fd on this file that would
1666 : * conflict with the lease we're trying to set.
1667 : */
1668 : static int
1669 0 : check_conflicting_open(struct file *filp, const long arg, int flags)
1670 : {
1671 0 : struct inode *inode = file_inode(filp);
1672 0 : int self_wcount = 0, self_rcount = 0;
1673 :
1674 0 : if (flags & FL_LAYOUT)
1675 : return 0;
1676 0 : if (flags & FL_DELEG)
1677 : /* We leave these checks to the caller */
1678 : return 0;
1679 :
1680 0 : if (arg == F_RDLCK)
1681 0 : return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1682 0 : else if (arg != F_WRLCK)
1683 : return 0;
1684 :
1685 : /*
1686 : * Make sure that only read/write count is from lease requestor.
1687 : * Note that this will result in denying write leases when i_writecount
1688 : * is negative, which is what we want. (We shouldn't grant write leases
1689 : * on files open for execution.)
1690 : */
1691 0 : if (filp->f_mode & FMODE_WRITE)
1692 : self_wcount = 1;
1693 0 : else if (filp->f_mode & FMODE_READ)
1694 0 : self_rcount = 1;
1695 :
1696 0 : if (atomic_read(&inode->i_writecount) != self_wcount ||
1697 0 : atomic_read(&inode->i_readcount) != self_rcount)
1698 : return -EAGAIN;
1699 :
1700 : return 0;
1701 : }
1702 :
1703 : static int
1704 0 : generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1705 : {
1706 0 : struct file_lock *fl, *my_fl = NULL, *lease;
1707 0 : struct inode *inode = file_inode(filp);
1708 : struct file_lock_context *ctx;
1709 0 : bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1710 : int error;
1711 0 : LIST_HEAD(dispose);
1712 :
1713 0 : lease = *flp;
1714 0 : trace_generic_add_lease(inode, lease);
1715 :
1716 : /* Note that arg is never F_UNLCK here */
1717 0 : ctx = locks_get_lock_context(inode, arg);
1718 0 : if (!ctx)
1719 : return -ENOMEM;
1720 :
1721 : /*
1722 : * In the delegation case we need mutual exclusion with
1723 : * a number of operations that take the i_mutex. We trylock
1724 : * because delegations are an optional optimization, and if
1725 : * there's some chance of a conflict--we'd rather not
1726 : * bother, maybe that's a sign this just isn't a good file to
1727 : * hand out a delegation on.
1728 : */
1729 0 : if (is_deleg && !inode_trylock(inode))
1730 : return -EAGAIN;
1731 :
1732 0 : if (is_deleg && arg == F_WRLCK) {
1733 : /* Write delegations are not currently supported: */
1734 0 : inode_unlock(inode);
1735 0 : WARN_ON_ONCE(1);
1736 : return -EINVAL;
1737 : }
1738 :
1739 0 : percpu_down_read(&file_rwsem);
1740 0 : spin_lock(&ctx->flc_lock);
1741 0 : time_out_leases(inode, &dispose);
1742 0 : error = check_conflicting_open(filp, arg, lease->fl_flags);
1743 0 : if (error)
1744 : goto out;
1745 :
1746 : /*
1747 : * At this point, we know that if there is an exclusive
1748 : * lease on this file, then we hold it on this filp
1749 : * (otherwise our open of this file would have blocked).
1750 : * And if we are trying to acquire an exclusive lease,
1751 : * then the file is not open by anyone (including us)
1752 : * except for this filp.
1753 : */
1754 0 : error = -EAGAIN;
1755 0 : list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1756 0 : if (fl->fl_file == filp &&
1757 0 : fl->fl_owner == lease->fl_owner) {
1758 0 : my_fl = fl;
1759 0 : continue;
1760 : }
1761 :
1762 : /*
1763 : * No exclusive leases if someone else has a lease on
1764 : * this file:
1765 : */
1766 0 : if (arg == F_WRLCK)
1767 : goto out;
1768 : /*
1769 : * Modifying our existing lease is OK, but no getting a
1770 : * new lease if someone else is opening for write:
1771 : */
1772 0 : if (fl->fl_flags & FL_UNLOCK_PENDING)
1773 : goto out;
1774 : }
1775 :
1776 0 : if (my_fl != NULL) {
1777 0 : lease = my_fl;
1778 0 : error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1779 0 : if (error)
1780 : goto out;
1781 : goto out_setup;
1782 : }
1783 :
1784 0 : error = -EINVAL;
1785 0 : if (!leases_enable)
1786 : goto out;
1787 :
1788 0 : locks_insert_lock_ctx(lease, &ctx->flc_lease);
1789 : /*
1790 : * The check in break_lease() is lockless. It's possible for another
1791 : * open to race in after we did the earlier check for a conflicting
1792 : * open but before the lease was inserted. Check again for a
1793 : * conflicting open and cancel the lease if there is one.
1794 : *
1795 : * We also add a barrier here to ensure that the insertion of the lock
1796 : * precedes these checks.
1797 : */
1798 0 : smp_mb();
1799 0 : error = check_conflicting_open(filp, arg, lease->fl_flags);
1800 0 : if (error) {
1801 0 : locks_unlink_lock_ctx(lease);
1802 0 : goto out;
1803 : }
1804 :
1805 : out_setup:
1806 0 : if (lease->fl_lmops->lm_setup)
1807 0 : lease->fl_lmops->lm_setup(lease, priv);
1808 : out:
1809 0 : spin_unlock(&ctx->flc_lock);
1810 0 : percpu_up_read(&file_rwsem);
1811 0 : locks_dispose_list(&dispose);
1812 0 : if (is_deleg)
1813 : inode_unlock(inode);
1814 0 : if (!error && !my_fl)
1815 0 : *flp = NULL;
1816 : return error;
1817 : }
1818 :
1819 0 : static int generic_delete_lease(struct file *filp, void *owner)
1820 : {
1821 0 : int error = -EAGAIN;
1822 0 : struct file_lock *fl, *victim = NULL;
1823 0 : struct inode *inode = file_inode(filp);
1824 : struct file_lock_context *ctx;
1825 0 : LIST_HEAD(dispose);
1826 :
1827 0 : ctx = locks_inode_context(inode);
1828 0 : if (!ctx) {
1829 : trace_generic_delete_lease(inode, NULL);
1830 : return error;
1831 : }
1832 :
1833 0 : percpu_down_read(&file_rwsem);
1834 0 : spin_lock(&ctx->flc_lock);
1835 0 : list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1836 0 : if (fl->fl_file == filp &&
1837 0 : fl->fl_owner == owner) {
1838 : victim = fl;
1839 : break;
1840 : }
1841 : }
1842 0 : trace_generic_delete_lease(inode, victim);
1843 0 : if (victim)
1844 0 : error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1845 0 : spin_unlock(&ctx->flc_lock);
1846 0 : percpu_up_read(&file_rwsem);
1847 0 : locks_dispose_list(&dispose);
1848 0 : return error;
1849 : }
1850 :
1851 : /**
1852 : * generic_setlease - sets a lease on an open file
1853 : * @filp: file pointer
1854 : * @arg: type of lease to obtain
1855 : * @flp: input - file_lock to use, output - file_lock inserted
1856 : * @priv: private data for lm_setup (may be NULL if lm_setup
1857 : * doesn't require it)
1858 : *
1859 : * The (input) flp->fl_lmops->lm_break function is required
1860 : * by break_lease().
1861 : */
1862 0 : int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1863 : void **priv)
1864 : {
1865 0 : struct inode *inode = file_inode(filp);
1866 0 : vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
1867 : int error;
1868 :
1869 0 : if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
1870 : return -EACCES;
1871 0 : if (!S_ISREG(inode->i_mode))
1872 : return -EINVAL;
1873 0 : error = security_file_lock(filp, arg);
1874 : if (error)
1875 : return error;
1876 :
1877 0 : switch (arg) {
1878 : case F_UNLCK:
1879 0 : return generic_delete_lease(filp, *priv);
1880 : case F_RDLCK:
1881 : case F_WRLCK:
1882 0 : if (!(*flp)->fl_lmops->lm_break) {
1883 0 : WARN_ON_ONCE(1);
1884 : return -ENOLCK;
1885 : }
1886 :
1887 0 : return generic_add_lease(filp, arg, flp, priv);
1888 : default:
1889 : return -EINVAL;
1890 : }
1891 : }
1892 : EXPORT_SYMBOL(generic_setlease);
1893 :
1894 : /*
1895 : * Kernel subsystems can register to be notified on any attempt to set
1896 : * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1897 : * to close files that it may have cached when there is an attempt to set a
1898 : * conflicting lease.
1899 : */
1900 : static struct srcu_notifier_head lease_notifier_chain;
1901 :
1902 : static inline void
1903 : lease_notifier_chain_init(void)
1904 : {
1905 1 : srcu_init_notifier_head(&lease_notifier_chain);
1906 : }
1907 :
1908 : static inline void
1909 : setlease_notifier(long arg, struct file_lock *lease)
1910 : {
1911 0 : if (arg != F_UNLCK)
1912 0 : srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1913 : }
1914 :
1915 0 : int lease_register_notifier(struct notifier_block *nb)
1916 : {
1917 0 : return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1918 : }
1919 : EXPORT_SYMBOL_GPL(lease_register_notifier);
1920 :
1921 0 : void lease_unregister_notifier(struct notifier_block *nb)
1922 : {
1923 0 : srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1924 0 : }
1925 : EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1926 :
1927 : /**
1928 : * vfs_setlease - sets a lease on an open file
1929 : * @filp: file pointer
1930 : * @arg: type of lease to obtain
1931 : * @lease: file_lock to use when adding a lease
1932 : * @priv: private info for lm_setup when adding a lease (may be
1933 : * NULL if lm_setup doesn't require it)
1934 : *
1935 : * Call this to establish a lease on the file. The "lease" argument is not
1936 : * used for F_UNLCK requests and may be NULL. For commands that set or alter
1937 : * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1938 : * set; if not, this function will return -ENOLCK (and generate a scary-looking
1939 : * stack trace).
1940 : *
1941 : * The "priv" pointer is passed directly to the lm_setup function as-is. It
1942 : * may be NULL if the lm_setup operation doesn't require it.
1943 : */
1944 : int
1945 0 : vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1946 : {
1947 0 : if (lease)
1948 0 : setlease_notifier(arg, *lease);
1949 0 : if (filp->f_op->setlease)
1950 0 : return filp->f_op->setlease(filp, arg, lease, priv);
1951 : else
1952 0 : return generic_setlease(filp, arg, lease, priv);
1953 : }
1954 : EXPORT_SYMBOL_GPL(vfs_setlease);
1955 :
1956 0 : static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1957 : {
1958 : struct file_lock *fl;
1959 : struct fasync_struct *new;
1960 : int error;
1961 :
1962 0 : fl = lease_alloc(filp, arg);
1963 0 : if (IS_ERR(fl))
1964 0 : return PTR_ERR(fl);
1965 :
1966 0 : new = fasync_alloc();
1967 0 : if (!new) {
1968 0 : locks_free_lock(fl);
1969 0 : return -ENOMEM;
1970 : }
1971 0 : new->fa_fd = fd;
1972 :
1973 0 : error = vfs_setlease(filp, arg, &fl, (void **)&new);
1974 0 : if (fl)
1975 0 : locks_free_lock(fl);
1976 0 : if (new)
1977 0 : fasync_free(new);
1978 : return error;
1979 : }
1980 :
1981 : /**
1982 : * fcntl_setlease - sets a lease on an open file
1983 : * @fd: open file descriptor
1984 : * @filp: file pointer
1985 : * @arg: type of lease to obtain
1986 : *
1987 : * Call this fcntl to establish a lease on the file.
1988 : * Note that you also need to call %F_SETSIG to
1989 : * receive a signal when the lease is broken.
1990 : */
1991 0 : int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1992 : {
1993 0 : if (arg == F_UNLCK)
1994 0 : return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1995 0 : return do_fcntl_add_lease(fd, filp, arg);
1996 : }
1997 :
1998 : /**
1999 : * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2000 : * @inode: inode of the file to apply to
2001 : * @fl: The lock to be applied
2002 : *
2003 : * Apply a FLOCK style lock request to an inode.
2004 : */
2005 0 : static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2006 : {
2007 : int error;
2008 : might_sleep();
2009 : for (;;) {
2010 0 : error = flock_lock_inode(inode, fl);
2011 0 : if (error != FILE_LOCK_DEFERRED)
2012 : break;
2013 0 : error = wait_event_interruptible(fl->fl_wait,
2014 : list_empty(&fl->fl_blocked_member));
2015 0 : if (error)
2016 : break;
2017 : }
2018 0 : locks_delete_block(fl);
2019 0 : return error;
2020 : }
2021 :
2022 : /**
2023 : * locks_lock_inode_wait - Apply a lock to an inode
2024 : * @inode: inode of the file to apply to
2025 : * @fl: The lock to be applied
2026 : *
2027 : * Apply a POSIX or FLOCK style lock request to an inode.
2028 : */
2029 0 : int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2030 : {
2031 0 : int res = 0;
2032 0 : switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2033 : case FL_POSIX:
2034 0 : res = posix_lock_inode_wait(inode, fl);
2035 0 : break;
2036 : case FL_FLOCK:
2037 0 : res = flock_lock_inode_wait(inode, fl);
2038 0 : break;
2039 : default:
2040 0 : BUG();
2041 : }
2042 0 : return res;
2043 : }
2044 : EXPORT_SYMBOL(locks_lock_inode_wait);
2045 :
2046 : /**
2047 : * sys_flock: - flock() system call.
2048 : * @fd: the file descriptor to lock.
2049 : * @cmd: the type of lock to apply.
2050 : *
2051 : * Apply a %FL_FLOCK style lock to an open file descriptor.
2052 : * The @cmd can be one of:
2053 : *
2054 : * - %LOCK_SH -- a shared lock.
2055 : * - %LOCK_EX -- an exclusive lock.
2056 : * - %LOCK_UN -- remove an existing lock.
2057 : * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2058 : *
2059 : * %LOCK_MAND support has been removed from the kernel.
2060 : */
2061 0 : SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2062 : {
2063 : int can_sleep, error, type;
2064 : struct file_lock fl;
2065 : struct fd f;
2066 :
2067 : /*
2068 : * LOCK_MAND locks were broken for a long time in that they never
2069 : * conflicted with one another and didn't prevent any sort of open,
2070 : * read or write activity.
2071 : *
2072 : * Just ignore these requests now, to preserve legacy behavior, but
2073 : * throw a warning to let people know that they don't actually work.
2074 : */
2075 0 : if (cmd & LOCK_MAND) {
2076 0 : pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
2077 : return 0;
2078 : }
2079 :
2080 0 : type = flock_translate_cmd(cmd & ~LOCK_NB);
2081 0 : if (type < 0)
2082 0 : return type;
2083 :
2084 0 : error = -EBADF;
2085 0 : f = fdget(fd);
2086 0 : if (!f.file)
2087 : return error;
2088 :
2089 0 : if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2090 : goto out_putf;
2091 :
2092 0 : flock_make_lock(f.file, &fl, type);
2093 :
2094 0 : error = security_file_lock(f.file, fl.fl_type);
2095 : if (error)
2096 : goto out_putf;
2097 :
2098 0 : can_sleep = !(cmd & LOCK_NB);
2099 0 : if (can_sleep)
2100 0 : fl.fl_flags |= FL_SLEEP;
2101 :
2102 0 : if (f.file->f_op->flock)
2103 0 : error = f.file->f_op->flock(f.file,
2104 : (can_sleep) ? F_SETLKW : F_SETLK,
2105 : &fl);
2106 : else
2107 0 : error = locks_lock_file_wait(f.file, &fl);
2108 :
2109 0 : locks_release_private(&fl);
2110 : out_putf:
2111 0 : fdput(f);
2112 :
2113 0 : return error;
2114 : }
2115 :
2116 : /**
2117 : * vfs_test_lock - test file byte range lock
2118 : * @filp: The file to test lock for
2119 : * @fl: The lock to test; also used to hold result
2120 : *
2121 : * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2122 : * setting conf->fl_type to something other than F_UNLCK.
2123 : */
2124 0 : int vfs_test_lock(struct file *filp, struct file_lock *fl)
2125 : {
2126 0 : WARN_ON_ONCE(filp != fl->fl_file);
2127 0 : if (filp->f_op->lock)
2128 0 : return filp->f_op->lock(filp, F_GETLK, fl);
2129 0 : posix_test_lock(filp, fl);
2130 0 : return 0;
2131 : }
2132 : EXPORT_SYMBOL_GPL(vfs_test_lock);
2133 :
2134 : /**
2135 : * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2136 : * @fl: The file_lock who's fl_pid should be translated
2137 : * @ns: The namespace into which the pid should be translated
2138 : *
2139 : * Used to tranlate a fl_pid into a namespace virtual pid number
2140 : */
2141 0 : static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2142 : {
2143 : pid_t vnr;
2144 : struct pid *pid;
2145 :
2146 0 : if (IS_OFDLCK(fl))
2147 : return -1;
2148 0 : if (IS_REMOTELCK(fl))
2149 0 : return fl->fl_pid;
2150 : /*
2151 : * If the flock owner process is dead and its pid has been already
2152 : * freed, the translation below won't work, but we still want to show
2153 : * flock owner pid number in init pidns.
2154 : */
2155 0 : if (ns == &init_pid_ns)
2156 0 : return (pid_t)fl->fl_pid;
2157 :
2158 : rcu_read_lock();
2159 0 : pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2160 0 : vnr = pid_nr_ns(pid, ns);
2161 : rcu_read_unlock();
2162 : return vnr;
2163 : }
2164 :
2165 0 : static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2166 : {
2167 0 : flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2168 : #if BITS_PER_LONG == 32
2169 : /*
2170 : * Make sure we can represent the posix lock via
2171 : * legacy 32bit flock.
2172 : */
2173 : if (fl->fl_start > OFFT_OFFSET_MAX)
2174 : return -EOVERFLOW;
2175 : if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2176 : return -EOVERFLOW;
2177 : #endif
2178 0 : flock->l_start = fl->fl_start;
2179 0 : flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2180 0 : fl->fl_end - fl->fl_start + 1;
2181 0 : flock->l_whence = 0;
2182 0 : flock->l_type = fl->fl_type;
2183 0 : return 0;
2184 : }
2185 :
2186 : #if BITS_PER_LONG == 32
2187 : static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2188 : {
2189 : flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2190 : flock->l_start = fl->fl_start;
2191 : flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2192 : fl->fl_end - fl->fl_start + 1;
2193 : flock->l_whence = 0;
2194 : flock->l_type = fl->fl_type;
2195 : }
2196 : #endif
2197 :
2198 : /* Report the first existing lock that would conflict with l.
2199 : * This implements the F_GETLK command of fcntl().
2200 : */
2201 0 : int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2202 : {
2203 : struct file_lock *fl;
2204 : int error;
2205 :
2206 0 : fl = locks_alloc_lock();
2207 0 : if (fl == NULL)
2208 : return -ENOMEM;
2209 0 : error = -EINVAL;
2210 0 : if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2211 : goto out;
2212 :
2213 0 : error = flock_to_posix_lock(filp, fl, flock);
2214 0 : if (error)
2215 : goto out;
2216 :
2217 0 : if (cmd == F_OFD_GETLK) {
2218 0 : error = -EINVAL;
2219 0 : if (flock->l_pid != 0)
2220 : goto out;
2221 :
2222 0 : fl->fl_flags |= FL_OFDLCK;
2223 0 : fl->fl_owner = filp;
2224 : }
2225 :
2226 0 : error = vfs_test_lock(filp, fl);
2227 0 : if (error)
2228 : goto out;
2229 :
2230 0 : flock->l_type = fl->fl_type;
2231 0 : if (fl->fl_type != F_UNLCK) {
2232 0 : error = posix_lock_to_flock(flock, fl);
2233 : if (error)
2234 : goto out;
2235 : }
2236 : out:
2237 0 : locks_free_lock(fl);
2238 0 : return error;
2239 : }
2240 :
2241 : /**
2242 : * vfs_lock_file - file byte range lock
2243 : * @filp: The file to apply the lock to
2244 : * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2245 : * @fl: The lock to be applied
2246 : * @conf: Place to return a copy of the conflicting lock, if found.
2247 : *
2248 : * A caller that doesn't care about the conflicting lock may pass NULL
2249 : * as the final argument.
2250 : *
2251 : * If the filesystem defines a private ->lock() method, then @conf will
2252 : * be left unchanged; so a caller that cares should initialize it to
2253 : * some acceptable default.
2254 : *
2255 : * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2256 : * locks, the ->lock() interface may return asynchronously, before the lock has
2257 : * been granted or denied by the underlying filesystem, if (and only if)
2258 : * lm_grant is set. Callers expecting ->lock() to return asynchronously
2259 : * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2260 : * the request is for a blocking lock. When ->lock() does return asynchronously,
2261 : * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2262 : * request completes.
2263 : * If the request is for non-blocking lock the file system should return
2264 : * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2265 : * with the result. If the request timed out the callback routine will return a
2266 : * nonzero return code and the file system should release the lock. The file
2267 : * system is also responsible to keep a corresponding posix lock when it
2268 : * grants a lock so the VFS can find out which locks are locally held and do
2269 : * the correct lock cleanup when required.
2270 : * The underlying filesystem must not drop the kernel lock or call
2271 : * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2272 : * return code.
2273 : */
2274 0 : int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2275 : {
2276 0 : WARN_ON_ONCE(filp != fl->fl_file);
2277 0 : if (filp->f_op->lock)
2278 0 : return filp->f_op->lock(filp, cmd, fl);
2279 : else
2280 0 : return posix_lock_file(filp, fl, conf);
2281 : }
2282 : EXPORT_SYMBOL_GPL(vfs_lock_file);
2283 :
2284 0 : static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2285 : struct file_lock *fl)
2286 : {
2287 : int error;
2288 :
2289 0 : error = security_file_lock(filp, fl->fl_type);
2290 : if (error)
2291 : return error;
2292 :
2293 : for (;;) {
2294 0 : error = vfs_lock_file(filp, cmd, fl, NULL);
2295 0 : if (error != FILE_LOCK_DEFERRED)
2296 : break;
2297 0 : error = wait_event_interruptible(fl->fl_wait,
2298 : list_empty(&fl->fl_blocked_member));
2299 0 : if (error)
2300 : break;
2301 : }
2302 0 : locks_delete_block(fl);
2303 :
2304 : return error;
2305 : }
2306 :
2307 : /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2308 : static int
2309 : check_fmode_for_setlk(struct file_lock *fl)
2310 : {
2311 0 : switch (fl->fl_type) {
2312 : case F_RDLCK:
2313 0 : if (!(fl->fl_file->f_mode & FMODE_READ))
2314 : return -EBADF;
2315 : break;
2316 : case F_WRLCK:
2317 0 : if (!(fl->fl_file->f_mode & FMODE_WRITE))
2318 : return -EBADF;
2319 : }
2320 : return 0;
2321 : }
2322 :
2323 : /* Apply the lock described by l to an open file descriptor.
2324 : * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2325 : */
2326 0 : int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2327 : struct flock *flock)
2328 : {
2329 0 : struct file_lock *file_lock = locks_alloc_lock();
2330 0 : struct inode *inode = file_inode(filp);
2331 : struct file *f;
2332 : int error;
2333 :
2334 0 : if (file_lock == NULL)
2335 : return -ENOLCK;
2336 :
2337 0 : error = flock_to_posix_lock(filp, file_lock, flock);
2338 0 : if (error)
2339 : goto out;
2340 :
2341 0 : error = check_fmode_for_setlk(file_lock);
2342 0 : if (error)
2343 : goto out;
2344 :
2345 : /*
2346 : * If the cmd is requesting file-private locks, then set the
2347 : * FL_OFDLCK flag and override the owner.
2348 : */
2349 0 : switch (cmd) {
2350 : case F_OFD_SETLK:
2351 0 : error = -EINVAL;
2352 0 : if (flock->l_pid != 0)
2353 : goto out;
2354 :
2355 0 : cmd = F_SETLK;
2356 0 : file_lock->fl_flags |= FL_OFDLCK;
2357 0 : file_lock->fl_owner = filp;
2358 0 : break;
2359 : case F_OFD_SETLKW:
2360 0 : error = -EINVAL;
2361 0 : if (flock->l_pid != 0)
2362 : goto out;
2363 :
2364 0 : cmd = F_SETLKW;
2365 0 : file_lock->fl_flags |= FL_OFDLCK;
2366 0 : file_lock->fl_owner = filp;
2367 : fallthrough;
2368 : case F_SETLKW:
2369 0 : file_lock->fl_flags |= FL_SLEEP;
2370 : }
2371 :
2372 0 : error = do_lock_file_wait(filp, cmd, file_lock);
2373 :
2374 : /*
2375 : * Attempt to detect a close/fcntl race and recover by releasing the
2376 : * lock that was just acquired. There is no need to do that when we're
2377 : * unlocking though, or for OFD locks.
2378 : */
2379 0 : if (!error && file_lock->fl_type != F_UNLCK &&
2380 0 : !(file_lock->fl_flags & FL_OFDLCK)) {
2381 0 : struct files_struct *files = current->files;
2382 : /*
2383 : * We need that spin_lock here - it prevents reordering between
2384 : * update of i_flctx->flc_posix and check for it done in
2385 : * close(). rcu_read_lock() wouldn't do.
2386 : */
2387 0 : spin_lock(&files->file_lock);
2388 0 : f = files_lookup_fd_locked(files, fd);
2389 0 : spin_unlock(&files->file_lock);
2390 0 : if (f != filp) {
2391 0 : file_lock->fl_type = F_UNLCK;
2392 0 : error = do_lock_file_wait(filp, cmd, file_lock);
2393 0 : WARN_ON_ONCE(error);
2394 : error = -EBADF;
2395 : }
2396 : }
2397 : out:
2398 0 : trace_fcntl_setlk(inode, file_lock, error);
2399 0 : locks_free_lock(file_lock);
2400 0 : return error;
2401 : }
2402 :
2403 : #if BITS_PER_LONG == 32
2404 : /* Report the first existing lock that would conflict with l.
2405 : * This implements the F_GETLK command of fcntl().
2406 : */
2407 : int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2408 : {
2409 : struct file_lock *fl;
2410 : int error;
2411 :
2412 : fl = locks_alloc_lock();
2413 : if (fl == NULL)
2414 : return -ENOMEM;
2415 :
2416 : error = -EINVAL;
2417 : if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2418 : goto out;
2419 :
2420 : error = flock64_to_posix_lock(filp, fl, flock);
2421 : if (error)
2422 : goto out;
2423 :
2424 : if (cmd == F_OFD_GETLK) {
2425 : error = -EINVAL;
2426 : if (flock->l_pid != 0)
2427 : goto out;
2428 :
2429 : fl->fl_flags |= FL_OFDLCK;
2430 : fl->fl_owner = filp;
2431 : }
2432 :
2433 : error = vfs_test_lock(filp, fl);
2434 : if (error)
2435 : goto out;
2436 :
2437 : flock->l_type = fl->fl_type;
2438 : if (fl->fl_type != F_UNLCK)
2439 : posix_lock_to_flock64(flock, fl);
2440 :
2441 : out:
2442 : locks_free_lock(fl);
2443 : return error;
2444 : }
2445 :
2446 : /* Apply the lock described by l to an open file descriptor.
2447 : * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2448 : */
2449 : int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2450 : struct flock64 *flock)
2451 : {
2452 : struct file_lock *file_lock = locks_alloc_lock();
2453 : struct file *f;
2454 : int error;
2455 :
2456 : if (file_lock == NULL)
2457 : return -ENOLCK;
2458 :
2459 : error = flock64_to_posix_lock(filp, file_lock, flock);
2460 : if (error)
2461 : goto out;
2462 :
2463 : error = check_fmode_for_setlk(file_lock);
2464 : if (error)
2465 : goto out;
2466 :
2467 : /*
2468 : * If the cmd is requesting file-private locks, then set the
2469 : * FL_OFDLCK flag and override the owner.
2470 : */
2471 : switch (cmd) {
2472 : case F_OFD_SETLK:
2473 : error = -EINVAL;
2474 : if (flock->l_pid != 0)
2475 : goto out;
2476 :
2477 : cmd = F_SETLK64;
2478 : file_lock->fl_flags |= FL_OFDLCK;
2479 : file_lock->fl_owner = filp;
2480 : break;
2481 : case F_OFD_SETLKW:
2482 : error = -EINVAL;
2483 : if (flock->l_pid != 0)
2484 : goto out;
2485 :
2486 : cmd = F_SETLKW64;
2487 : file_lock->fl_flags |= FL_OFDLCK;
2488 : file_lock->fl_owner = filp;
2489 : fallthrough;
2490 : case F_SETLKW64:
2491 : file_lock->fl_flags |= FL_SLEEP;
2492 : }
2493 :
2494 : error = do_lock_file_wait(filp, cmd, file_lock);
2495 :
2496 : /*
2497 : * Attempt to detect a close/fcntl race and recover by releasing the
2498 : * lock that was just acquired. There is no need to do that when we're
2499 : * unlocking though, or for OFD locks.
2500 : */
2501 : if (!error && file_lock->fl_type != F_UNLCK &&
2502 : !(file_lock->fl_flags & FL_OFDLCK)) {
2503 : struct files_struct *files = current->files;
2504 : /*
2505 : * We need that spin_lock here - it prevents reordering between
2506 : * update of i_flctx->flc_posix and check for it done in
2507 : * close(). rcu_read_lock() wouldn't do.
2508 : */
2509 : spin_lock(&files->file_lock);
2510 : f = files_lookup_fd_locked(files, fd);
2511 : spin_unlock(&files->file_lock);
2512 : if (f != filp) {
2513 : file_lock->fl_type = F_UNLCK;
2514 : error = do_lock_file_wait(filp, cmd, file_lock);
2515 : WARN_ON_ONCE(error);
2516 : error = -EBADF;
2517 : }
2518 : }
2519 : out:
2520 : locks_free_lock(file_lock);
2521 : return error;
2522 : }
2523 : #endif /* BITS_PER_LONG == 32 */
2524 :
2525 : /*
2526 : * This function is called when the file is being removed
2527 : * from the task's fd array. POSIX locks belonging to this task
2528 : * are deleted at this time.
2529 : */
2530 0 : void locks_remove_posix(struct file *filp, fl_owner_t owner)
2531 : {
2532 : int error;
2533 0 : struct inode *inode = file_inode(filp);
2534 : struct file_lock lock;
2535 : struct file_lock_context *ctx;
2536 :
2537 : /*
2538 : * If there are no locks held on this file, we don't need to call
2539 : * posix_lock_file(). Another process could be setting a lock on this
2540 : * file at the same time, but we wouldn't remove that lock anyway.
2541 : */
2542 0 : ctx = locks_inode_context(inode);
2543 0 : if (!ctx || list_empty(&ctx->flc_posix))
2544 0 : return;
2545 :
2546 0 : locks_init_lock(&lock);
2547 0 : lock.fl_type = F_UNLCK;
2548 0 : lock.fl_flags = FL_POSIX | FL_CLOSE;
2549 0 : lock.fl_start = 0;
2550 0 : lock.fl_end = OFFSET_MAX;
2551 0 : lock.fl_owner = owner;
2552 0 : lock.fl_pid = current->tgid;
2553 0 : lock.fl_file = filp;
2554 0 : lock.fl_ops = NULL;
2555 0 : lock.fl_lmops = NULL;
2556 :
2557 0 : error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2558 :
2559 0 : if (lock.fl_ops && lock.fl_ops->fl_release_private)
2560 0 : lock.fl_ops->fl_release_private(&lock);
2561 0 : trace_locks_remove_posix(inode, &lock, error);
2562 : }
2563 : EXPORT_SYMBOL(locks_remove_posix);
2564 :
2565 : /* The i_flctx must be valid when calling into here */
2566 : static void
2567 0 : locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2568 : {
2569 : struct file_lock fl;
2570 0 : struct inode *inode = file_inode(filp);
2571 :
2572 0 : if (list_empty(&flctx->flc_flock))
2573 0 : return;
2574 :
2575 0 : flock_make_lock(filp, &fl, F_UNLCK);
2576 0 : fl.fl_flags |= FL_CLOSE;
2577 :
2578 0 : if (filp->f_op->flock)
2579 0 : filp->f_op->flock(filp, F_SETLKW, &fl);
2580 : else
2581 0 : flock_lock_inode(inode, &fl);
2582 :
2583 0 : if (fl.fl_ops && fl.fl_ops->fl_release_private)
2584 0 : fl.fl_ops->fl_release_private(&fl);
2585 : }
2586 :
2587 : /* The i_flctx must be valid when calling into here */
2588 : static void
2589 0 : locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2590 : {
2591 : struct file_lock *fl, *tmp;
2592 0 : LIST_HEAD(dispose);
2593 :
2594 0 : if (list_empty(&ctx->flc_lease))
2595 0 : return;
2596 :
2597 0 : percpu_down_read(&file_rwsem);
2598 0 : spin_lock(&ctx->flc_lock);
2599 0 : list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2600 0 : if (filp == fl->fl_file)
2601 0 : lease_modify(fl, F_UNLCK, &dispose);
2602 0 : spin_unlock(&ctx->flc_lock);
2603 0 : percpu_up_read(&file_rwsem);
2604 :
2605 0 : locks_dispose_list(&dispose);
2606 : }
2607 :
2608 : /*
2609 : * This function is called on the last close of an open file.
2610 : */
2611 0 : void locks_remove_file(struct file *filp)
2612 : {
2613 : struct file_lock_context *ctx;
2614 :
2615 0 : ctx = locks_inode_context(file_inode(filp));
2616 0 : if (!ctx)
2617 : return;
2618 :
2619 : /* remove any OFD locks */
2620 0 : locks_remove_posix(filp, filp);
2621 :
2622 : /* remove flock locks */
2623 0 : locks_remove_flock(filp, ctx);
2624 :
2625 : /* remove any leases */
2626 0 : locks_remove_lease(filp, ctx);
2627 :
2628 0 : spin_lock(&ctx->flc_lock);
2629 0 : locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2630 0 : locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2631 0 : locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2632 0 : spin_unlock(&ctx->flc_lock);
2633 : }
2634 :
2635 : /**
2636 : * vfs_cancel_lock - file byte range unblock lock
2637 : * @filp: The file to apply the unblock to
2638 : * @fl: The lock to be unblocked
2639 : *
2640 : * Used by lock managers to cancel blocked requests
2641 : */
2642 0 : int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2643 : {
2644 0 : WARN_ON_ONCE(filp != fl->fl_file);
2645 0 : if (filp->f_op->lock)
2646 0 : return filp->f_op->lock(filp, F_CANCELLK, fl);
2647 : return 0;
2648 : }
2649 : EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2650 :
2651 : /**
2652 : * vfs_inode_has_locks - are any file locks held on @inode?
2653 : * @inode: inode to check for locks
2654 : *
2655 : * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2656 : * set on @inode.
2657 : */
2658 0 : bool vfs_inode_has_locks(struct inode *inode)
2659 : {
2660 : struct file_lock_context *ctx;
2661 : bool ret;
2662 :
2663 0 : ctx = locks_inode_context(inode);
2664 0 : if (!ctx)
2665 : return false;
2666 :
2667 0 : spin_lock(&ctx->flc_lock);
2668 0 : ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2669 0 : spin_unlock(&ctx->flc_lock);
2670 0 : return ret;
2671 : }
2672 : EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2673 :
2674 : #ifdef CONFIG_PROC_FS
2675 : #include <linux/proc_fs.h>
2676 : #include <linux/seq_file.h>
2677 :
2678 : struct locks_iterator {
2679 : int li_cpu;
2680 : loff_t li_pos;
2681 : };
2682 :
2683 0 : static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2684 : loff_t id, char *pfx, int repeat)
2685 : {
2686 0 : struct inode *inode = NULL;
2687 : unsigned int fl_pid;
2688 0 : struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2689 : int type;
2690 :
2691 0 : fl_pid = locks_translate_pid(fl, proc_pidns);
2692 : /*
2693 : * If lock owner is dead (and pid is freed) or not visible in current
2694 : * pidns, zero is shown as a pid value. Check lock info from
2695 : * init_pid_ns to get saved lock pid value.
2696 : */
2697 :
2698 0 : if (fl->fl_file != NULL)
2699 0 : inode = file_inode(fl->fl_file);
2700 :
2701 0 : seq_printf(f, "%lld: ", id);
2702 :
2703 0 : if (repeat)
2704 0 : seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2705 :
2706 0 : if (IS_POSIX(fl)) {
2707 0 : if (fl->fl_flags & FL_ACCESS)
2708 0 : seq_puts(f, "ACCESS");
2709 0 : else if (IS_OFDLCK(fl))
2710 0 : seq_puts(f, "OFDLCK");
2711 : else
2712 0 : seq_puts(f, "POSIX ");
2713 :
2714 0 : seq_printf(f, " %s ",
2715 : (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2716 0 : } else if (IS_FLOCK(fl)) {
2717 0 : seq_puts(f, "FLOCK ADVISORY ");
2718 0 : } else if (IS_LEASE(fl)) {
2719 0 : if (fl->fl_flags & FL_DELEG)
2720 0 : seq_puts(f, "DELEG ");
2721 : else
2722 0 : seq_puts(f, "LEASE ");
2723 :
2724 0 : if (lease_breaking(fl))
2725 0 : seq_puts(f, "BREAKING ");
2726 0 : else if (fl->fl_file)
2727 0 : seq_puts(f, "ACTIVE ");
2728 : else
2729 0 : seq_puts(f, "BREAKER ");
2730 : } else {
2731 0 : seq_puts(f, "UNKNOWN UNKNOWN ");
2732 : }
2733 0 : type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2734 :
2735 0 : seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2736 0 : (type == F_RDLCK) ? "READ" : "UNLCK");
2737 0 : if (inode) {
2738 : /* userspace relies on this representation of dev_t */
2739 0 : seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2740 0 : MAJOR(inode->i_sb->s_dev),
2741 0 : MINOR(inode->i_sb->s_dev), inode->i_ino);
2742 : } else {
2743 0 : seq_printf(f, "%d <none>:0 ", fl_pid);
2744 : }
2745 0 : if (IS_POSIX(fl)) {
2746 0 : if (fl->fl_end == OFFSET_MAX)
2747 0 : seq_printf(f, "%Ld EOF\n", fl->fl_start);
2748 : else
2749 0 : seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2750 : } else {
2751 0 : seq_puts(f, "0 EOF\n");
2752 : }
2753 0 : }
2754 :
2755 : static struct file_lock *get_next_blocked_member(struct file_lock *node)
2756 : {
2757 : struct file_lock *tmp;
2758 :
2759 : /* NULL node or root node */
2760 0 : if (node == NULL || node->fl_blocker == NULL)
2761 : return NULL;
2762 :
2763 : /* Next member in the linked list could be itself */
2764 0 : tmp = list_next_entry(node, fl_blocked_member);
2765 0 : if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2766 0 : || tmp == node) {
2767 : return NULL;
2768 : }
2769 :
2770 : return tmp;
2771 : }
2772 :
2773 0 : static int locks_show(struct seq_file *f, void *v)
2774 : {
2775 0 : struct locks_iterator *iter = f->private;
2776 : struct file_lock *cur, *tmp;
2777 0 : struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2778 0 : int level = 0;
2779 :
2780 0 : cur = hlist_entry(v, struct file_lock, fl_link);
2781 :
2782 0 : if (locks_translate_pid(cur, proc_pidns) == 0)
2783 : return 0;
2784 :
2785 : /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2786 : * is the left child of current node, the next silibing in fl_blocked_member is the
2787 : * right child, we can alse get the parent of current node from fl_blocker, so this
2788 : * question becomes traversal of a binary tree
2789 : */
2790 0 : while (cur != NULL) {
2791 0 : if (level)
2792 0 : lock_get_status(f, cur, iter->li_pos, "-> ", level);
2793 : else
2794 0 : lock_get_status(f, cur, iter->li_pos, "", level);
2795 :
2796 0 : if (!list_empty(&cur->fl_blocked_requests)) {
2797 : /* Turn left */
2798 0 : cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2799 : struct file_lock, fl_blocked_member);
2800 0 : level++;
2801 : } else {
2802 : /* Turn right */
2803 : tmp = get_next_blocked_member(cur);
2804 : /* Fall back to parent node */
2805 0 : while (tmp == NULL && cur->fl_blocker != NULL) {
2806 0 : cur = cur->fl_blocker;
2807 0 : level--;
2808 : tmp = get_next_blocked_member(cur);
2809 : }
2810 : cur = tmp;
2811 : }
2812 : }
2813 :
2814 : return 0;
2815 : }
2816 :
2817 0 : static void __show_fd_locks(struct seq_file *f,
2818 : struct list_head *head, int *id,
2819 : struct file *filp, struct files_struct *files)
2820 : {
2821 : struct file_lock *fl;
2822 :
2823 0 : list_for_each_entry(fl, head, fl_list) {
2824 :
2825 0 : if (filp != fl->fl_file)
2826 0 : continue;
2827 0 : if (fl->fl_owner != files &&
2828 : fl->fl_owner != filp)
2829 0 : continue;
2830 :
2831 0 : (*id)++;
2832 0 : seq_puts(f, "lock:\t");
2833 0 : lock_get_status(f, fl, *id, "", 0);
2834 : }
2835 0 : }
2836 :
2837 0 : void show_fd_locks(struct seq_file *f,
2838 : struct file *filp, struct files_struct *files)
2839 : {
2840 0 : struct inode *inode = file_inode(filp);
2841 : struct file_lock_context *ctx;
2842 0 : int id = 0;
2843 :
2844 0 : ctx = locks_inode_context(inode);
2845 0 : if (!ctx)
2846 0 : return;
2847 :
2848 0 : spin_lock(&ctx->flc_lock);
2849 0 : __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2850 0 : __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2851 0 : __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2852 0 : spin_unlock(&ctx->flc_lock);
2853 : }
2854 :
2855 0 : static void *locks_start(struct seq_file *f, loff_t *pos)
2856 : __acquires(&blocked_lock_lock)
2857 : {
2858 0 : struct locks_iterator *iter = f->private;
2859 :
2860 0 : iter->li_pos = *pos + 1;
2861 0 : percpu_down_write(&file_rwsem);
2862 0 : spin_lock(&blocked_lock_lock);
2863 0 : return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2864 : }
2865 :
2866 0 : static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2867 : {
2868 0 : struct locks_iterator *iter = f->private;
2869 :
2870 0 : ++iter->li_pos;
2871 0 : return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2872 : }
2873 :
2874 0 : static void locks_stop(struct seq_file *f, void *v)
2875 : __releases(&blocked_lock_lock)
2876 : {
2877 0 : spin_unlock(&blocked_lock_lock);
2878 0 : percpu_up_write(&file_rwsem);
2879 0 : }
2880 :
2881 : static const struct seq_operations locks_seq_operations = {
2882 : .start = locks_start,
2883 : .next = locks_next,
2884 : .stop = locks_stop,
2885 : .show = locks_show,
2886 : };
2887 :
2888 1 : static int __init proc_locks_init(void)
2889 : {
2890 1 : proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2891 : sizeof(struct locks_iterator), NULL);
2892 1 : return 0;
2893 : }
2894 : fs_initcall(proc_locks_init);
2895 : #endif
2896 :
2897 1 : static int __init filelock_init(void)
2898 : {
2899 : int i;
2900 :
2901 1 : flctx_cache = kmem_cache_create("file_lock_ctx",
2902 : sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2903 :
2904 1 : filelock_cache = kmem_cache_create("file_lock_cache",
2905 : sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2906 :
2907 2 : for_each_possible_cpu(i) {
2908 1 : struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2909 :
2910 1 : spin_lock_init(&fll->lock);
2911 1 : INIT_HLIST_HEAD(&fll->hlist);
2912 : }
2913 :
2914 : lease_notifier_chain_init();
2915 1 : return 0;
2916 : }
2917 : core_initcall(filelock_init);
|