LCOV - code coverage report
Current view: top level - lib - flex_proportions.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 7 97 7.2 %
Date: 2023-03-27 20:00:47 Functions: 2 14 14.3 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  Floating proportions with flexible aging period
       4             :  *
       5             :  *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
       6             :  *
       7             :  * The goal of this code is: Given different types of event, measure proportion
       8             :  * of each type of event over time. The proportions are measured with
       9             :  * exponentially decaying history to give smooth transitions. A formula
      10             :  * expressing proportion of event of type 'j' is:
      11             :  *
      12             :  *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
      13             :  *
      14             :  * Where x_{i,j} is j's number of events in i-th last time period and x_i is
      15             :  * total number of events in i-th last time period.
      16             :  *
      17             :  * Note that p_{j}'s are normalised, i.e.
      18             :  *
      19             :  *   \Sum_{j} p_{j} = 1,
      20             :  *
      21             :  * This formula can be straightforwardly computed by maintaining denominator
      22             :  * (let's call it 'd') and for each event type its numerator (let's call it
      23             :  * 'n_j'). When an event of type 'j' happens, we simply need to do:
      24             :  *   n_j++; d++;
      25             :  *
      26             :  * When a new period is declared, we could do:
      27             :  *   d /= 2
      28             :  *   for each j
      29             :  *     n_j /= 2
      30             :  *
      31             :  * To avoid iteration over all event types, we instead shift numerator of event
      32             :  * j lazily when someone asks for a proportion of event j or when event j
      33             :  * occurs. This can bit trivially implemented by remembering last period in
      34             :  * which something happened with proportion of type j.
      35             :  */
      36             : #include <linux/flex_proportions.h>
      37             : 
      38           1 : int fprop_global_init(struct fprop_global *p, gfp_t gfp)
      39             : {
      40             :         int err;
      41             : 
      42           1 :         p->period = 0;
      43             :         /* Use 1 to avoid dealing with periods with 0 events... */
      44           2 :         err = percpu_counter_init(&p->events, 1, gfp);
      45             :         if (err)
      46             :                 return err;
      47           2 :         seqcount_init(&p->sequence);
      48             :         return 0;
      49             : }
      50             : 
      51           0 : void fprop_global_destroy(struct fprop_global *p)
      52             : {
      53           0 :         percpu_counter_destroy(&p->events);
      54           0 : }
      55             : 
      56             : /*
      57             :  * Declare @periods new periods. It is upto the caller to make sure period
      58             :  * transitions cannot happen in parallel.
      59             :  *
      60             :  * The function returns true if the proportions are still defined and false
      61             :  * if aging zeroed out all events. This can be used to detect whether declaring
      62             :  * further periods has any effect.
      63             :  */
      64           0 : bool fprop_new_period(struct fprop_global *p, int periods)
      65             : {
      66           0 :         s64 events = percpu_counter_sum(&p->events);
      67             : 
      68             :         /*
      69             :          * Don't do anything if there are no events.
      70             :          */
      71           0 :         if (events <= 1)
      72             :                 return false;
      73             :         preempt_disable_nested();
      74           0 :         write_seqcount_begin(&p->sequence);
      75           0 :         if (periods < 64)
      76           0 :                 events -= events >> periods;
      77             :         /* Use addition to avoid losing events happening between sum and set */
      78           0 :         percpu_counter_add(&p->events, -events);
      79           0 :         p->period += periods;
      80           0 :         write_seqcount_end(&p->sequence);
      81             :         preempt_enable_nested();
      82             : 
      83           0 :         return true;
      84             : }
      85             : 
      86             : /*
      87             :  * ---- SINGLE ----
      88             :  */
      89             : 
      90           0 : int fprop_local_init_single(struct fprop_local_single *pl)
      91             : {
      92           0 :         pl->events = 0;
      93           0 :         pl->period = 0;
      94             :         raw_spin_lock_init(&pl->lock);
      95           0 :         return 0;
      96             : }
      97             : 
      98           0 : void fprop_local_destroy_single(struct fprop_local_single *pl)
      99             : {
     100           0 : }
     101             : 
     102           0 : static void fprop_reflect_period_single(struct fprop_global *p,
     103             :                                         struct fprop_local_single *pl)
     104             : {
     105           0 :         unsigned int period = p->period;
     106             :         unsigned long flags;
     107             : 
     108             :         /* Fast path - period didn't change */
     109           0 :         if (pl->period == period)
     110             :                 return;
     111           0 :         raw_spin_lock_irqsave(&pl->lock, flags);
     112             :         /* Someone updated pl->period while we were spinning? */
     113           0 :         if (pl->period >= period) {
     114           0 :                 raw_spin_unlock_irqrestore(&pl->lock, flags);
     115             :                 return;
     116             :         }
     117             :         /* Aging zeroed our fraction? */
     118           0 :         if (period - pl->period < BITS_PER_LONG)
     119           0 :                 pl->events >>= period - pl->period;
     120             :         else
     121           0 :                 pl->events = 0;
     122           0 :         pl->period = period;
     123           0 :         raw_spin_unlock_irqrestore(&pl->lock, flags);
     124             : }
     125             : 
     126             : /* Event of type pl happened */
     127           0 : void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
     128             : {
     129           0 :         fprop_reflect_period_single(p, pl);
     130           0 :         pl->events++;
     131           0 :         percpu_counter_add(&p->events, 1);
     132           0 : }
     133             : 
     134             : /* Return fraction of events of type pl */
     135           0 : void fprop_fraction_single(struct fprop_global *p,
     136             :                            struct fprop_local_single *pl,
     137             :                            unsigned long *numerator, unsigned long *denominator)
     138             : {
     139             :         unsigned int seq;
     140             :         s64 num, den;
     141             : 
     142             :         do {
     143           0 :                 seq = read_seqcount_begin(&p->sequence);
     144           0 :                 fprop_reflect_period_single(p, pl);
     145           0 :                 num = pl->events;
     146           0 :                 den = percpu_counter_read_positive(&p->events);
     147           0 :         } while (read_seqcount_retry(&p->sequence, seq));
     148             : 
     149             :         /*
     150             :          * Make fraction <= 1 and denominator > 0 even in presence of percpu
     151             :          * counter errors
     152             :          */
     153           0 :         if (den <= num) {
     154           0 :                 if (num)
     155             :                         den = num;
     156             :                 else
     157           0 :                         den = 1;
     158             :         }
     159           0 :         *denominator = den;
     160           0 :         *numerator = num;
     161           0 : }
     162             : 
     163             : /*
     164             :  * ---- PERCPU ----
     165             :  */
     166             : #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
     167             : 
     168           1 : int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
     169             : {
     170             :         int err;
     171             : 
     172           2 :         err = percpu_counter_init(&pl->events, 0, gfp);
     173             :         if (err)
     174             :                 return err;
     175           1 :         pl->period = 0;
     176             :         raw_spin_lock_init(&pl->lock);
     177             :         return 0;
     178             : }
     179             : 
     180           0 : void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
     181             : {
     182           0 :         percpu_counter_destroy(&pl->events);
     183           0 : }
     184             : 
     185           0 : static void fprop_reflect_period_percpu(struct fprop_global *p,
     186             :                                         struct fprop_local_percpu *pl)
     187             : {
     188           0 :         unsigned int period = p->period;
     189             :         unsigned long flags;
     190             : 
     191             :         /* Fast path - period didn't change */
     192           0 :         if (pl->period == period)
     193             :                 return;
     194           0 :         raw_spin_lock_irqsave(&pl->lock, flags);
     195             :         /* Someone updated pl->period while we were spinning? */
     196           0 :         if (pl->period >= period) {
     197           0 :                 raw_spin_unlock_irqrestore(&pl->lock, flags);
     198             :                 return;
     199             :         }
     200             :         /* Aging zeroed our fraction? */
     201           0 :         if (period - pl->period < BITS_PER_LONG) {
     202           0 :                 s64 val = percpu_counter_read(&pl->events);
     203             : 
     204             :                 if (val < (nr_cpu_ids * PROP_BATCH))
     205             :                         val = percpu_counter_sum(&pl->events);
     206             : 
     207           0 :                 percpu_counter_add_batch(&pl->events,
     208           0 :                         -val + (val >> (period-pl->period)), PROP_BATCH);
     209             :         } else
     210           0 :                 percpu_counter_set(&pl->events, 0);
     211           0 :         pl->period = period;
     212           0 :         raw_spin_unlock_irqrestore(&pl->lock, flags);
     213             : }
     214             : 
     215             : /* Event of type pl happened */
     216           0 : void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
     217             :                 long nr)
     218             : {
     219           0 :         fprop_reflect_period_percpu(p, pl);
     220           0 :         percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
     221           0 :         percpu_counter_add(&p->events, nr);
     222           0 : }
     223             : 
     224           0 : void fprop_fraction_percpu(struct fprop_global *p,
     225             :                            struct fprop_local_percpu *pl,
     226             :                            unsigned long *numerator, unsigned long *denominator)
     227             : {
     228             :         unsigned int seq;
     229             :         s64 num, den;
     230             : 
     231             :         do {
     232           0 :                 seq = read_seqcount_begin(&p->sequence);
     233           0 :                 fprop_reflect_period_percpu(p, pl);
     234           0 :                 num = percpu_counter_read_positive(&pl->events);
     235           0 :                 den = percpu_counter_read_positive(&p->events);
     236           0 :         } while (read_seqcount_retry(&p->sequence, seq));
     237             : 
     238             :         /*
     239             :          * Make fraction <= 1 and denominator > 0 even in presence of percpu
     240             :          * counter errors
     241             :          */
     242           0 :         if (den <= num) {
     243           0 :                 if (num)
     244             :                         den = num;
     245             :                 else
     246           0 :                         den = 1;
     247             :         }
     248           0 :         *denominator = den;
     249           0 :         *numerator = num;
     250           0 : }
     251             : 
     252             : /*
     253             :  * Like __fprop_add_percpu() except that event is counted only if the given
     254             :  * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
     255             :  */
     256           0 : void __fprop_add_percpu_max(struct fprop_global *p,
     257             :                 struct fprop_local_percpu *pl, int max_frac, long nr)
     258             : {
     259           0 :         if (unlikely(max_frac < FPROP_FRAC_BASE)) {
     260             :                 unsigned long numerator, denominator;
     261             :                 s64 tmp;
     262             : 
     263           0 :                 fprop_fraction_percpu(p, pl, &numerator, &denominator);
     264             :                 /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
     265           0 :                 tmp = (u64)denominator * max_frac -
     266           0 :                                         ((u64)numerator << FPROP_FRAC_SHIFT);
     267           0 :                 if (tmp < 0) {
     268             :                         /* Maximum fraction already exceeded? */
     269           0 :                         return;
     270           0 :                 } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
     271             :                         /* Add just enough for the fraction to saturate */
     272           0 :                         nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
     273             :                                         FPROP_FRAC_BASE - max_frac);
     274             :                 }
     275             :         }
     276             : 
     277           0 :         __fprop_add_percpu(p, pl, nr);
     278             : }

Generated by: LCOV version 1.14