LCOV - code coverage report
Current view: top level - kernel/rcu - tiny.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 53 75 70.7 %
Date: 2023-08-24 13:40:31 Functions: 6 12 50.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0+
       2             : /*
       3             :  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
       4             :  *
       5             :  * Copyright IBM Corporation, 2008
       6             :  *
       7             :  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
       8             :  *
       9             :  * For detailed explanation of Read-Copy Update mechanism see -
      10             :  *              Documentation/RCU
      11             :  */
      12             : #include <linux/completion.h>
      13             : #include <linux/interrupt.h>
      14             : #include <linux/notifier.h>
      15             : #include <linux/rcupdate_wait.h>
      16             : #include <linux/kernel.h>
      17             : #include <linux/export.h>
      18             : #include <linux/mutex.h>
      19             : #include <linux/sched.h>
      20             : #include <linux/types.h>
      21             : #include <linux/init.h>
      22             : #include <linux/time.h>
      23             : #include <linux/cpu.h>
      24             : #include <linux/prefetch.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/mm.h>
      27             : 
      28             : #include "rcu.h"
      29             : 
      30             : /* Global control variables for rcupdate callback mechanism. */
      31             : struct rcu_ctrlblk {
      32             :         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
      33             :         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
      34             :         struct rcu_head **curtail;      /* ->next pointer of last CB. */
      35             :         unsigned long gp_seq;           /* Grace-period counter. */
      36             : };
      37             : 
      38             : /* Definition for rcupdate control block. */
      39             : static struct rcu_ctrlblk rcu_ctrlblk = {
      40             :         .donetail       = &rcu_ctrlblk.rcucblist,
      41             :         .curtail        = &rcu_ctrlblk.rcucblist,
      42             :         .gp_seq         = 0 - 300UL,
      43             : };
      44             : 
      45           0 : void rcu_barrier(void)
      46             : {
      47           0 :         wait_rcu_gp(call_rcu_hurry);
      48           0 : }
      49             : EXPORT_SYMBOL(rcu_barrier);
      50             : 
      51             : /* Record an rcu quiescent state.  */
      52        1354 : void rcu_qs(void)
      53             : {
      54             :         unsigned long flags;
      55             : 
      56        1354 :         local_irq_save(flags);
      57        1354 :         if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
      58         480 :                 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
      59         480 :                 raise_softirq_irqoff(RCU_SOFTIRQ);
      60             :         }
      61        1354 :         WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
      62        2708 :         local_irq_restore(flags);
      63        1354 : }
      64             : 
      65             : /*
      66             :  * Check to see if the scheduling-clock interrupt came from an extended
      67             :  * quiescent state, and, if so, tell RCU about it.  This function must
      68             :  * be called from hardirq context.  It is normally called from the
      69             :  * scheduling-clock interrupt.
      70             :  */
      71           5 : void rcu_sched_clock_irq(int user)
      72             : {
      73           5 :         if (user) {
      74           4 :                 rcu_qs();
      75           1 :         } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
      76           1 :                 set_tsk_need_resched(current);
      77             :                 set_preempt_need_resched();
      78             :         }
      79           5 : }
      80             : 
      81             : /*
      82             :  * Reclaim the specified callback, either by invoking it for non-kfree cases or
      83             :  * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
      84             :  */
      85         755 : static inline bool rcu_reclaim_tiny(struct rcu_head *head)
      86             : {
      87             :         rcu_callback_t f;
      88         755 :         unsigned long offset = (unsigned long)head->func;
      89             : 
      90             :         rcu_lock_acquire(&rcu_callback_map);
      91         755 :         if (__is_kvfree_rcu_offset(offset)) {
      92           0 :                 trace_rcu_invoke_kvfree_callback("", head, offset);
      93           0 :                 kvfree((void *)head - offset);
      94             :                 rcu_lock_release(&rcu_callback_map);
      95           0 :                 return true;
      96             :         }
      97             : 
      98         755 :         trace_rcu_invoke_callback("", head);
      99         755 :         f = head->func;
     100         755 :         WRITE_ONCE(head->func, (rcu_callback_t)0L);
     101         755 :         f(head);
     102             :         rcu_lock_release(&rcu_callback_map);
     103         755 :         return false;
     104             : }
     105             : 
     106             : /* Invoke the RCU callbacks whose grace period has elapsed.  */
     107         320 : static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
     108             : {
     109             :         struct rcu_head *next, *list;
     110             :         unsigned long flags;
     111             : 
     112             :         /* Move the ready-to-invoke callbacks to a local list. */
     113         320 :         local_irq_save(flags);
     114         320 :         if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
     115             :                 /* No callbacks ready, so just leave. */
     116           0 :                 local_irq_restore(flags);
     117             :                 return;
     118             :         }
     119         320 :         list = rcu_ctrlblk.rcucblist;
     120         320 :         rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
     121         320 :         *rcu_ctrlblk.donetail = NULL;
     122         320 :         if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
     123         320 :                 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
     124         320 :         rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
     125         320 :         local_irq_restore(flags);
     126             : 
     127             :         /* Invoke the callbacks on the local list. */
     128        1075 :         while (list) {
     129         755 :                 next = list->next;
     130         755 :                 prefetch(next);
     131         755 :                 debug_rcu_head_unqueue(list);
     132         755 :                 local_bh_disable();
     133         755 :                 rcu_reclaim_tiny(list);
     134             :                 local_bh_enable();
     135         755 :                 list = next;
     136             :         }
     137             : }
     138             : 
     139             : /*
     140             :  * Wait for a grace period to elapse.  But it is illegal to invoke
     141             :  * synchronize_rcu() from within an RCU read-side critical section.
     142             :  * Therefore, any legal call to synchronize_rcu() is a quiescent state,
     143             :  * and so on a UP system, synchronize_rcu() need do nothing, other than
     144             :  * let the polled APIs know that another grace period elapsed.
     145             :  *
     146             :  * (But Lai Jiangshan points out the benefits of doing might_sleep()
     147             :  * to reduce latency.)
     148             :  *
     149             :  * Cool, huh?  (Due to Josh Triplett.)
     150             :  */
     151           0 : void synchronize_rcu(void)
     152             : {
     153             :         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
     154             :                          lock_is_held(&rcu_lock_map) ||
     155             :                          lock_is_held(&rcu_sched_lock_map),
     156             :                          "Illegal synchronize_rcu() in RCU read-side critical section");
     157           0 :         WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
     158           0 : }
     159             : EXPORT_SYMBOL_GPL(synchronize_rcu);
     160             : 
     161             : static void tiny_rcu_leak_callback(struct rcu_head *rhp)
     162             : {
     163             : }
     164             : 
     165             : /*
     166             :  * Post an RCU callback to be invoked after the end of an RCU grace
     167             :  * period.  But since we have but one CPU, that would be after any
     168             :  * quiescent state.
     169             :  */
     170         759 : void call_rcu(struct rcu_head *head, rcu_callback_t func)
     171             : {
     172             :         static atomic_t doublefrees;
     173             :         unsigned long flags;
     174             : 
     175         759 :         if (debug_rcu_head_queue(head)) {
     176             :                 if (atomic_inc_return(&doublefrees) < 4) {
     177             :                         pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
     178             :                         mem_dump_obj(head);
     179             :                 }
     180             : 
     181             :                 if (!__is_kvfree_rcu_offset((unsigned long)head->func))
     182             :                         WRITE_ONCE(head->func, tiny_rcu_leak_callback);
     183             :                 return;
     184             :         }
     185             : 
     186         759 :         head->func = func;
     187         759 :         head->next = NULL;
     188             : 
     189         759 :         local_irq_save(flags);
     190         759 :         *rcu_ctrlblk.curtail = head;
     191         759 :         rcu_ctrlblk.curtail = &head->next;
     192        1518 :         local_irq_restore(flags);
     193             : 
     194        1518 :         if (unlikely(is_idle_task(current))) {
     195             :                 /* force scheduling for rcu_qs() */
     196          16 :                 resched_cpu(0);
     197             :         }
     198             : }
     199             : EXPORT_SYMBOL_GPL(call_rcu);
     200             : 
     201             : /*
     202             :  * Store a grace-period-counter "cookie".  For more information,
     203             :  * see the Tree RCU header comment.
     204             :  */
     205           0 : void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
     206             : {
     207           0 :         rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
     208           0 : }
     209             : EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
     210             : 
     211             : /*
     212             :  * Return a grace-period-counter "cookie".  For more information,
     213             :  * see the Tree RCU header comment.
     214             :  */
     215           0 : unsigned long get_state_synchronize_rcu(void)
     216             : {
     217           0 :         return READ_ONCE(rcu_ctrlblk.gp_seq);
     218             : }
     219             : EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
     220             : 
     221             : /*
     222             :  * Return a grace-period-counter "cookie" and ensure that a future grace
     223             :  * period completes.  For more information, see the Tree RCU header comment.
     224             :  */
     225           0 : unsigned long start_poll_synchronize_rcu(void)
     226             : {
     227           0 :         unsigned long gp_seq = get_state_synchronize_rcu();
     228             : 
     229           0 :         if (unlikely(is_idle_task(current))) {
     230             :                 /* force scheduling for rcu_qs() */
     231           0 :                 resched_cpu(0);
     232             :         }
     233           0 :         return gp_seq;
     234             : }
     235             : EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
     236             : 
     237             : /*
     238             :  * Return true if the grace period corresponding to oldstate has completed
     239             :  * and false otherwise.  For more information, see the Tree RCU header
     240             :  * comment.
     241             :  */
     242           0 : bool poll_state_synchronize_rcu(unsigned long oldstate)
     243             : {
     244           0 :         return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
     245             : }
     246             : EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
     247             : 
     248             : #ifdef CONFIG_KASAN_GENERIC
     249             : void kvfree_call_rcu(struct rcu_head *head, void *ptr)
     250             : {
     251             :         if (head)
     252             :                 kasan_record_aux_stack_noalloc(ptr);
     253             : 
     254             :         __kvfree_call_rcu(head, ptr);
     255             : }
     256             : EXPORT_SYMBOL_GPL(kvfree_call_rcu);
     257             : #endif
     258             : 
     259           1 : void __init rcu_init(void)
     260             : {
     261           1 :         open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
     262           1 :         rcu_early_boot_tests();
     263           1 : }

Generated by: LCOV version 1.14