Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef _LINUX_CPUSET_H 3 : #define _LINUX_CPUSET_H 4 : /* 5 : * cpuset interface 6 : * 7 : * Copyright (C) 2003 BULL SA 8 : * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 : * 10 : */ 11 : 12 : #include <linux/sched.h> 13 : #include <linux/sched/topology.h> 14 : #include <linux/sched/task.h> 15 : #include <linux/cpumask.h> 16 : #include <linux/nodemask.h> 17 : #include <linux/mm.h> 18 : #include <linux/mmu_context.h> 19 : #include <linux/jump_label.h> 20 : 21 : #ifdef CONFIG_CPUSETS 22 : 23 : /* 24 : * Static branch rewrites can happen in an arbitrary order for a given 25 : * key. In code paths where we need to loop with read_mems_allowed_begin() and 26 : * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need 27 : * to ensure that begin() always gets rewritten before retry() in the 28 : * disabled -> enabled transition. If not, then if local irqs are disabled 29 : * around the loop, we can deadlock since retry() would always be 30 : * comparing the latest value of the mems_allowed seqcount against 0 as 31 : * begin() still would see cpusets_enabled() as false. The enabled -> disabled 32 : * transition should happen in reverse order for the same reasons (want to stop 33 : * looking at real value of mems_allowed.sequence in retry() first). 34 : */ 35 : extern struct static_key_false cpusets_pre_enable_key; 36 : extern struct static_key_false cpusets_enabled_key; 37 : extern struct static_key_false cpusets_insane_config_key; 38 : 39 : static inline bool cpusets_enabled(void) 40 : { 41 : return static_branch_unlikely(&cpusets_enabled_key); 42 : } 43 : 44 : static inline void cpuset_inc(void) 45 : { 46 : static_branch_inc_cpuslocked(&cpusets_pre_enable_key); 47 : static_branch_inc_cpuslocked(&cpusets_enabled_key); 48 : } 49 : 50 : static inline void cpuset_dec(void) 51 : { 52 : static_branch_dec_cpuslocked(&cpusets_enabled_key); 53 : static_branch_dec_cpuslocked(&cpusets_pre_enable_key); 54 : } 55 : 56 : /* 57 : * This will get enabled whenever a cpuset configuration is considered 58 : * unsupportable in general. E.g. movable only node which cannot satisfy 59 : * any non movable allocations (see update_nodemask). Page allocator 60 : * needs to make additional checks for those configurations and this 61 : * check is meant to guard those checks without any overhead for sane 62 : * configurations. 63 : */ 64 : static inline bool cpusets_insane_config(void) 65 : { 66 : return static_branch_unlikely(&cpusets_insane_config_key); 67 : } 68 : 69 : extern int cpuset_init(void); 70 : extern void cpuset_init_smp(void); 71 : extern void cpuset_force_rebuild(void); 72 : extern void cpuset_update_active_cpus(void); 73 : extern void cpuset_wait_for_hotplug(void); 74 : extern void cpuset_read_lock(void); 75 : extern void cpuset_read_unlock(void); 76 : extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 77 : extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); 78 : extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 79 : #define cpuset_current_mems_allowed (current->mems_allowed) 80 : void cpuset_init_current_mems_allowed(void); 81 : int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 82 : 83 : extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 84 : 85 : static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 86 : { 87 : return cpuset_node_allowed(zone_to_nid(z), gfp_mask); 88 : } 89 : 90 : static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 91 : { 92 : if (cpusets_enabled()) 93 : return __cpuset_zone_allowed(z, gfp_mask); 94 : return true; 95 : } 96 : 97 : extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 98 : const struct task_struct *tsk2); 99 : 100 : #define cpuset_memory_pressure_bump() \ 101 : do { \ 102 : if (cpuset_memory_pressure_enabled) \ 103 : __cpuset_memory_pressure_bump(); \ 104 : } while (0) 105 : extern int cpuset_memory_pressure_enabled; 106 : extern void __cpuset_memory_pressure_bump(void); 107 : 108 : extern void cpuset_task_status_allowed(struct seq_file *m, 109 : struct task_struct *task); 110 : extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 111 : struct pid *pid, struct task_struct *tsk); 112 : 113 : extern int cpuset_mem_spread_node(void); 114 : extern int cpuset_slab_spread_node(void); 115 : 116 : static inline int cpuset_do_page_mem_spread(void) 117 : { 118 : return task_spread_page(current); 119 : } 120 : 121 : static inline int cpuset_do_slab_mem_spread(void) 122 : { 123 : return task_spread_slab(current); 124 : } 125 : 126 : extern bool current_cpuset_is_being_rebound(void); 127 : 128 : extern void rebuild_sched_domains(void); 129 : 130 : extern void cpuset_print_current_mems_allowed(void); 131 : 132 : /* 133 : * read_mems_allowed_begin is required when making decisions involving 134 : * mems_allowed such as during page allocation. mems_allowed can be updated in 135 : * parallel and depending on the new value an operation can fail potentially 136 : * causing process failure. A retry loop with read_mems_allowed_begin and 137 : * read_mems_allowed_retry prevents these artificial failures. 138 : */ 139 : static inline unsigned int read_mems_allowed_begin(void) 140 : { 141 : if (!static_branch_unlikely(&cpusets_pre_enable_key)) 142 : return 0; 143 : 144 : return read_seqcount_begin(¤t->mems_allowed_seq); 145 : } 146 : 147 : /* 148 : * If this returns true, the operation that took place after 149 : * read_mems_allowed_begin may have failed artificially due to a concurrent 150 : * update of mems_allowed. It is up to the caller to retry the operation if 151 : * appropriate. 152 : */ 153 : static inline bool read_mems_allowed_retry(unsigned int seq) 154 : { 155 : if (!static_branch_unlikely(&cpusets_enabled_key)) 156 : return false; 157 : 158 : return read_seqcount_retry(¤t->mems_allowed_seq, seq); 159 : } 160 : 161 : static inline void set_mems_allowed(nodemask_t nodemask) 162 : { 163 : unsigned long flags; 164 : 165 : task_lock(current); 166 : local_irq_save(flags); 167 : write_seqcount_begin(¤t->mems_allowed_seq); 168 : current->mems_allowed = nodemask; 169 : write_seqcount_end(¤t->mems_allowed_seq); 170 : local_irq_restore(flags); 171 : task_unlock(current); 172 : } 173 : 174 : #else /* !CONFIG_CPUSETS */ 175 : 176 : static inline bool cpusets_enabled(void) { return false; } 177 : 178 : static inline bool cpusets_insane_config(void) { return false; } 179 : 180 : static inline int cpuset_init(void) { return 0; } 181 : static inline void cpuset_init_smp(void) {} 182 : 183 : static inline void cpuset_force_rebuild(void) { } 184 : 185 : static inline void cpuset_update_active_cpus(void) 186 : { 187 : partition_sched_domains(1, NULL, NULL); 188 : } 189 : 190 : static inline void cpuset_wait_for_hotplug(void) { } 191 : 192 : static inline void cpuset_read_lock(void) { } 193 : static inline void cpuset_read_unlock(void) { } 194 : 195 : static inline void cpuset_cpus_allowed(struct task_struct *p, 196 : struct cpumask *mask) 197 : { 198 0 : cpumask_copy(mask, task_cpu_possible_mask(p)); 199 : } 200 : 201 : static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) 202 : { 203 : return false; 204 : } 205 : 206 : static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 207 : { 208 : return node_possible_map; 209 : } 210 : 211 : #define cpuset_current_mems_allowed (node_states[N_MEMORY]) 212 : static inline void cpuset_init_current_mems_allowed(void) {} 213 : 214 : static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 215 : { 216 : return 1; 217 : } 218 : 219 : static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 220 : { 221 : return true; 222 : } 223 : 224 : static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 225 : { 226 : return true; 227 : } 228 : 229 : static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 230 : const struct task_struct *tsk2) 231 : { 232 : return 1; 233 : } 234 : 235 : static inline void cpuset_memory_pressure_bump(void) {} 236 : 237 : static inline void cpuset_task_status_allowed(struct seq_file *m, 238 : struct task_struct *task) 239 : { 240 : } 241 : 242 : static inline int cpuset_mem_spread_node(void) 243 : { 244 : return 0; 245 : } 246 : 247 : static inline int cpuset_slab_spread_node(void) 248 : { 249 : return 0; 250 : } 251 : 252 : static inline int cpuset_do_page_mem_spread(void) 253 : { 254 : return 0; 255 : } 256 : 257 : static inline int cpuset_do_slab_mem_spread(void) 258 : { 259 : return 0; 260 : } 261 : 262 : static inline bool current_cpuset_is_being_rebound(void) 263 : { 264 : return false; 265 : } 266 : 267 : static inline void rebuild_sched_domains(void) 268 : { 269 : partition_sched_domains(1, NULL, NULL); 270 : } 271 : 272 : static inline void cpuset_print_current_mems_allowed(void) 273 : { 274 : } 275 : 276 : static inline void set_mems_allowed(nodemask_t nodemask) 277 : { 278 : } 279 : 280 : static inline unsigned int read_mems_allowed_begin(void) 281 : { 282 : return 0; 283 : } 284 : 285 : static inline bool read_mems_allowed_retry(unsigned int seq) 286 : { 287 : return false; 288 : } 289 : 290 : #endif /* !CONFIG_CPUSETS */ 291 : 292 : #endif /* _LINUX_CPUSET_H */