LCOV - code coverage report
Current view: top level - kernel/rcu - tiny.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 54 75 72.0 %
Date: 2023-07-19 18:55:55 Functions: 6 12 50.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0+
       2             : /*
       3             :  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
       4             :  *
       5             :  * Copyright IBM Corporation, 2008
       6             :  *
       7             :  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
       8             :  *
       9             :  * For detailed explanation of Read-Copy Update mechanism see -
      10             :  *              Documentation/RCU
      11             :  */
      12             : #include <linux/completion.h>
      13             : #include <linux/interrupt.h>
      14             : #include <linux/notifier.h>
      15             : #include <linux/rcupdate_wait.h>
      16             : #include <linux/kernel.h>
      17             : #include <linux/export.h>
      18             : #include <linux/mutex.h>
      19             : #include <linux/sched.h>
      20             : #include <linux/types.h>
      21             : #include <linux/init.h>
      22             : #include <linux/time.h>
      23             : #include <linux/cpu.h>
      24             : #include <linux/prefetch.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/mm.h>
      27             : 
      28             : #include "rcu.h"
      29             : 
      30             : /* Global control variables for rcupdate callback mechanism. */
      31             : struct rcu_ctrlblk {
      32             :         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
      33             :         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
      34             :         struct rcu_head **curtail;      /* ->next pointer of last CB. */
      35             :         unsigned long gp_seq;           /* Grace-period counter. */
      36             : };
      37             : 
      38             : /* Definition for rcupdate control block. */
      39             : static struct rcu_ctrlblk rcu_ctrlblk = {
      40             :         .donetail       = &rcu_ctrlblk.rcucblist,
      41             :         .curtail        = &rcu_ctrlblk.rcucblist,
      42             :         .gp_seq         = 0 - 300UL,
      43             : };
      44             : 
      45           0 : void rcu_barrier(void)
      46             : {
      47           0 :         wait_rcu_gp(call_rcu_hurry);
      48           0 : }
      49             : EXPORT_SYMBOL(rcu_barrier);
      50             : 
      51             : /* Record an rcu quiescent state.  */
      52        6205 : void rcu_qs(void)
      53             : {
      54             :         unsigned long flags;
      55             : 
      56        6205 :         local_irq_save(flags);
      57        6205 :         if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
      58        1123 :                 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
      59        1123 :                 raise_softirq_irqoff(RCU_SOFTIRQ);
      60             :         }
      61        6205 :         WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
      62       12410 :         local_irq_restore(flags);
      63        6205 : }
      64             : 
      65             : /*
      66             :  * Check to see if the scheduling-clock interrupt came from an extended
      67             :  * quiescent state, and, if so, tell RCU about it.  This function must
      68             :  * be called from hardirq context.  It is normally called from the
      69             :  * scheduling-clock interrupt.
      70             :  */
      71        2943 : void rcu_sched_clock_irq(int user)
      72             : {
      73        2943 :         if (user) {
      74        2937 :                 rcu_qs();
      75           6 :         } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
      76           0 :                 set_tsk_need_resched(current);
      77             :                 set_preempt_need_resched();
      78             :         }
      79        2943 : }
      80             : 
      81             : /*
      82             :  * Reclaim the specified callback, either by invoking it for non-kfree cases or
      83             :  * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
      84             :  */
      85        1918 : static inline bool rcu_reclaim_tiny(struct rcu_head *head)
      86             : {
      87             :         rcu_callback_t f;
      88        1918 :         unsigned long offset = (unsigned long)head->func;
      89             : 
      90             :         rcu_lock_acquire(&rcu_callback_map);
      91        1918 :         if (__is_kvfree_rcu_offset(offset)) {
      92           1 :                 trace_rcu_invoke_kvfree_callback("", head, offset);
      93           1 :                 kvfree((void *)head - offset);
      94             :                 rcu_lock_release(&rcu_callback_map);
      95           1 :                 return true;
      96             :         }
      97             : 
      98        1917 :         trace_rcu_invoke_callback("", head);
      99        1917 :         f = head->func;
     100        1917 :         WRITE_ONCE(head->func, (rcu_callback_t)0L);
     101        1917 :         f(head);
     102             :         rcu_lock_release(&rcu_callback_map);
     103        1917 :         return false;
     104             : }
     105             : 
     106             : /* Invoke the RCU callbacks whose grace period has elapsed.  */
     107         756 : static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
     108             : {
     109             :         struct rcu_head *next, *list;
     110             :         unsigned long flags;
     111             : 
     112             :         /* Move the ready-to-invoke callbacks to a local list. */
     113         756 :         local_irq_save(flags);
     114         756 :         if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
     115             :                 /* No callbacks ready, so just leave. */
     116           0 :                 local_irq_restore(flags);
     117             :                 return;
     118             :         }
     119         756 :         list = rcu_ctrlblk.rcucblist;
     120         756 :         rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
     121         756 :         *rcu_ctrlblk.donetail = NULL;
     122         756 :         if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
     123         756 :                 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
     124         756 :         rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
     125         756 :         local_irq_restore(flags);
     126             : 
     127             :         /* Invoke the callbacks on the local list. */
     128        2674 :         while (list) {
     129        1918 :                 next = list->next;
     130        1918 :                 prefetch(next);
     131        1918 :                 debug_rcu_head_unqueue(list);
     132        1918 :                 local_bh_disable();
     133        1918 :                 rcu_reclaim_tiny(list);
     134             :                 local_bh_enable();
     135        1918 :                 list = next;
     136             :         }
     137             : }
     138             : 
     139             : /*
     140             :  * Wait for a grace period to elapse.  But it is illegal to invoke
     141             :  * synchronize_rcu() from within an RCU read-side critical section.
     142             :  * Therefore, any legal call to synchronize_rcu() is a quiescent state,
     143             :  * and so on a UP system, synchronize_rcu() need do nothing, other than
     144             :  * let the polled APIs know that another grace period elapsed.
     145             :  *
     146             :  * (But Lai Jiangshan points out the benefits of doing might_sleep()
     147             :  * to reduce latency.)
     148             :  *
     149             :  * Cool, huh?  (Due to Josh Triplett.)
     150             :  */
     151           0 : void synchronize_rcu(void)
     152             : {
     153             :         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
     154             :                          lock_is_held(&rcu_lock_map) ||
     155             :                          lock_is_held(&rcu_sched_lock_map),
     156             :                          "Illegal synchronize_rcu() in RCU read-side critical section");
     157           0 :         WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
     158           0 : }
     159             : EXPORT_SYMBOL_GPL(synchronize_rcu);
     160             : 
     161             : static void tiny_rcu_leak_callback(struct rcu_head *rhp)
     162             : {
     163             : }
     164             : 
     165             : /*
     166             :  * Post an RCU callback to be invoked after the end of an RCU grace
     167             :  * period.  But since we have but one CPU, that would be after any
     168             :  * quiescent state.
     169             :  */
     170        1922 : void call_rcu(struct rcu_head *head, rcu_callback_t func)
     171             : {
     172             :         static atomic_t doublefrees;
     173             :         unsigned long flags;
     174             : 
     175        1922 :         if (debug_rcu_head_queue(head)) {
     176             :                 if (atomic_inc_return(&doublefrees) < 4) {
     177             :                         pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
     178             :                         mem_dump_obj(head);
     179             :                 }
     180             : 
     181             :                 if (!__is_kvfree_rcu_offset((unsigned long)head->func))
     182             :                         WRITE_ONCE(head->func, tiny_rcu_leak_callback);
     183             :                 return;
     184             :         }
     185             : 
     186        1922 :         head->func = func;
     187        1922 :         head->next = NULL;
     188             : 
     189        1922 :         local_irq_save(flags);
     190        1922 :         *rcu_ctrlblk.curtail = head;
     191        1922 :         rcu_ctrlblk.curtail = &head->next;
     192        3844 :         local_irq_restore(flags);
     193             : 
     194        3844 :         if (unlikely(is_idle_task(current))) {
     195             :                 /* force scheduling for rcu_qs() */
     196           0 :                 resched_cpu(0);
     197             :         }
     198             : }
     199             : EXPORT_SYMBOL_GPL(call_rcu);
     200             : 
     201             : /*
     202             :  * Store a grace-period-counter "cookie".  For more information,
     203             :  * see the Tree RCU header comment.
     204             :  */
     205           0 : void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
     206             : {
     207           0 :         rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
     208           0 : }
     209             : EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
     210             : 
     211             : /*
     212             :  * Return a grace-period-counter "cookie".  For more information,
     213             :  * see the Tree RCU header comment.
     214             :  */
     215           0 : unsigned long get_state_synchronize_rcu(void)
     216             : {
     217           0 :         return READ_ONCE(rcu_ctrlblk.gp_seq);
     218             : }
     219             : EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
     220             : 
     221             : /*
     222             :  * Return a grace-period-counter "cookie" and ensure that a future grace
     223             :  * period completes.  For more information, see the Tree RCU header comment.
     224             :  */
     225           0 : unsigned long start_poll_synchronize_rcu(void)
     226             : {
     227           0 :         unsigned long gp_seq = get_state_synchronize_rcu();
     228             : 
     229           0 :         if (unlikely(is_idle_task(current))) {
     230             :                 /* force scheduling for rcu_qs() */
     231           0 :                 resched_cpu(0);
     232             :         }
     233           0 :         return gp_seq;
     234             : }
     235             : EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
     236             : 
     237             : /*
     238             :  * Return true if the grace period corresponding to oldstate has completed
     239             :  * and false otherwise.  For more information, see the Tree RCU header
     240             :  * comment.
     241             :  */
     242           0 : bool poll_state_synchronize_rcu(unsigned long oldstate)
     243             : {
     244           0 :         return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
     245             : }
     246             : EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
     247             : 
     248             : #ifdef CONFIG_KASAN_GENERIC
     249             : void kvfree_call_rcu(struct rcu_head *head, void *ptr)
     250             : {
     251             :         if (head)
     252             :                 kasan_record_aux_stack_noalloc(ptr);
     253             : 
     254             :         __kvfree_call_rcu(head, ptr);
     255             : }
     256             : EXPORT_SYMBOL_GPL(kvfree_call_rcu);
     257             : #endif
     258             : 
     259           1 : void __init rcu_init(void)
     260             : {
     261           1 :         open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
     262           1 :         rcu_early_boot_tests();
     263           1 : }

Generated by: LCOV version 1.14