Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0+ */ 2 : /* 3 : * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 4 : * 5 : * Copyright IBM Corporation, 2008 6 : * 7 : * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 : * 9 : * For detailed explanation of Read-Copy Update mechanism see - 10 : * Documentation/RCU 11 : */ 12 : #ifndef __LINUX_TINY_H 13 : #define __LINUX_TINY_H 14 : 15 : #include <asm/param.h> /* for HZ */ 16 : 17 : struct rcu_gp_oldstate { 18 : unsigned long rgos_norm; 19 : }; 20 : 21 : // Maximum number of rcu_gp_oldstate values corresponding to 22 : // not-yet-completed RCU grace periods. 23 : #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2 24 : 25 : /* 26 : * Are the two oldstate values the same? See the Tree RCU version for 27 : * docbook header. 28 : */ 29 : static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1, 30 : struct rcu_gp_oldstate *rgosp2) 31 : { 32 : return rgosp1->rgos_norm == rgosp2->rgos_norm; 33 : } 34 : 35 : unsigned long get_state_synchronize_rcu(void); 36 : 37 : static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 38 : { 39 : rgosp->rgos_norm = get_state_synchronize_rcu(); 40 : } 41 : 42 : unsigned long start_poll_synchronize_rcu(void); 43 : 44 : static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 45 : { 46 : rgosp->rgos_norm = start_poll_synchronize_rcu(); 47 : } 48 : 49 : bool poll_state_synchronize_rcu(unsigned long oldstate); 50 : 51 : static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 52 : { 53 : return poll_state_synchronize_rcu(rgosp->rgos_norm); 54 : } 55 : 56 : static inline void cond_synchronize_rcu(unsigned long oldstate) 57 : { 58 : might_sleep(); 59 : } 60 : 61 : static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 62 : { 63 : cond_synchronize_rcu(rgosp->rgos_norm); 64 : } 65 : 66 : static inline unsigned long start_poll_synchronize_rcu_expedited(void) 67 : { 68 : return start_poll_synchronize_rcu(); 69 : } 70 : 71 : static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 72 : { 73 : rgosp->rgos_norm = start_poll_synchronize_rcu_expedited(); 74 : } 75 : 76 : static inline void cond_synchronize_rcu_expedited(unsigned long oldstate) 77 : { 78 : cond_synchronize_rcu(oldstate); 79 : } 80 : 81 : static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp) 82 : { 83 : cond_synchronize_rcu_expedited(rgosp->rgos_norm); 84 : } 85 : 86 : extern void rcu_barrier(void); 87 : 88 : static inline void synchronize_rcu_expedited(void) 89 : { 90 0 : synchronize_rcu(); 91 : } 92 : 93 : /* 94 : * Add one more declaration of kvfree() here. It is 95 : * not so straight forward to just include <linux/mm.h> 96 : * where it is defined due to getting many compile 97 : * errors caused by that include. 98 : */ 99 : extern void kvfree(const void *addr); 100 : 101 0 : static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr) 102 : { 103 0 : if (head) { 104 0 : call_rcu(head, (rcu_callback_t) ((void *) head - ptr)); 105 0 : return; 106 : } 107 : 108 : // kvfree_rcu(one_arg) call. 109 : might_sleep(); 110 0 : synchronize_rcu(); 111 0 : kvfree(ptr); 112 : } 113 : 114 : #ifdef CONFIG_KASAN_GENERIC 115 : void kvfree_call_rcu(struct rcu_head *head, void *ptr); 116 : #else 117 : static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr) 118 : { 119 0 : __kvfree_call_rcu(head, ptr); 120 : } 121 : #endif 122 : 123 : void rcu_qs(void); 124 : 125 : static inline void rcu_softirq_qs(void) 126 : { 127 318 : rcu_qs(); 128 : } 129 : 130 : #define rcu_note_context_switch(preempt) \ 131 : do { \ 132 : rcu_qs(); \ 133 : rcu_tasks_qs(current, (preempt)); \ 134 : } while (0) 135 : 136 : static inline int rcu_needs_cpu(void) 137 : { 138 : return 0; 139 : } 140 : 141 : /* 142 : * Take advantage of the fact that there is only one CPU, which 143 : * allows us to ignore virtualization-based context switches. 144 : */ 145 : static inline void rcu_virt_note_context_switch(void) { } 146 : static inline void rcu_cpu_stall_reset(void) { } 147 : static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } 148 : static inline void rcu_irq_exit_check_preempt(void) { } 149 : static inline void exit_rcu(void) { } 150 : static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) 151 : { 152 : return false; 153 : } 154 : static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } 155 : void rcu_scheduler_starting(void); 156 : static inline void rcu_end_inkernel_boot(void) { } 157 : static inline bool rcu_inkernel_boot_has_ended(void) { return true; } 158 : static inline bool rcu_is_watching(void) { return true; } 159 : static inline void rcu_momentary_dyntick_idle(void) { } 160 : static inline void kfree_rcu_scheduler_running(void) { } 161 : static inline bool rcu_gp_might_be_stalled(void) { return false; } 162 : 163 : /* Avoid RCU read-side critical sections leaking across. */ 164 719 : static inline void rcu_all_qs(void) { barrier(); } 165 : 166 : /* RCUtree hotplug events */ 167 : #define rcutree_prepare_cpu NULL 168 : #define rcutree_online_cpu NULL 169 : #define rcutree_offline_cpu NULL 170 : #define rcutree_dead_cpu NULL 171 : #define rcutree_dying_cpu NULL 172 : static inline void rcu_cpu_starting(unsigned int cpu) { } 173 : 174 : #endif /* __LINUX_RCUTINY_H */