@@ -31,7 +31,7 @@ extern "C" {
3131#define CACHE_ENTRIES (cache ) (sizeof(cache)/sizeof(_Py_CODEUNIT))
3232
3333typedef struct {
34- uint16_t counter ;
34+ _Py_BackoffCounter counter ;
3535 uint16_t module_keys_version ;
3636 uint16_t builtin_keys_version ;
3737 uint16_t index ;
@@ -40,44 +40,44 @@ typedef struct {
4040#define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache)
4141
4242typedef struct {
43- uint16_t counter ;
43+ _Py_BackoffCounter counter ;
4444} _PyBinaryOpCache ;
4545
4646#define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache)
4747
4848typedef struct {
49- uint16_t counter ;
49+ _Py_BackoffCounter counter ;
5050} _PyUnpackSequenceCache ;
5151
5252#define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
5353 CACHE_ENTRIES(_PyUnpackSequenceCache)
5454
5555typedef struct {
56- uint16_t counter ;
56+ _Py_BackoffCounter counter ;
5757} _PyCompareOpCache ;
5858
5959#define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache)
6060
6161typedef struct {
62- uint16_t counter ;
62+ _Py_BackoffCounter counter ;
6363} _PyBinarySubscrCache ;
6464
6565#define INLINE_CACHE_ENTRIES_BINARY_SUBSCR CACHE_ENTRIES(_PyBinarySubscrCache)
6666
6767typedef struct {
68- uint16_t counter ;
68+ _Py_BackoffCounter counter ;
6969} _PySuperAttrCache ;
7070
7171#define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache)
7272
7373typedef struct {
74- uint16_t counter ;
74+ _Py_BackoffCounter counter ;
7575 uint16_t version [2 ];
7676 uint16_t index ;
7777} _PyAttrCache ;
7878
7979typedef struct {
80- uint16_t counter ;
80+ _Py_BackoffCounter counter ;
8181 uint16_t type_version [2 ];
8282 union {
8383 uint16_t keys_version [2 ];
@@ -93,39 +93,39 @@ typedef struct {
9393#define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache)
9494
9595typedef struct {
96- uint16_t counter ;
96+ _Py_BackoffCounter counter ;
9797 uint16_t func_version [2 ];
9898} _PyCallCache ;
9999
100100#define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache)
101101
102102typedef struct {
103- uint16_t counter ;
103+ _Py_BackoffCounter counter ;
104104} _PyStoreSubscrCache ;
105105
106106#define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache)
107107
108108typedef struct {
109- uint16_t counter ;
109+ _Py_BackoffCounter counter ;
110110} _PyForIterCache ;
111111
112112#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
113113
114114typedef struct {
115- uint16_t counter ;
115+ _Py_BackoffCounter counter ;
116116} _PySendCache ;
117117
118118#define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache)
119119
120120typedef struct {
121- uint16_t counter ;
121+ _Py_BackoffCounter counter ;
122122 uint16_t version [2 ];
123123} _PyToBoolCache ;
124124
125125#define INLINE_CACHE_ENTRIES_TO_BOOL CACHE_ENTRIES(_PyToBoolCache)
126126
127127typedef struct {
128- uint16_t counter ;
128+ _Py_BackoffCounter counter ;
129129} _PyContainsOpCache ;
130130
131131#define INLINE_CACHE_ENTRIES_CONTAINS_OP CACHE_ENTRIES(_PyContainsOpCache)
@@ -451,18 +451,14 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
451451
452452/** Counters
453453 * The first 16-bit value in each inline cache is a counter.
454- * When counting misses, the counter is treated as a simple unsigned value.
455454 *
456455 * When counting executions until the next specialization attempt,
457456 * exponential backoff is used to reduce the number of specialization failures.
458- * The high 12 bits store the counter, the low 4 bits store the backoff exponent.
459- * On a specialization failure, the backoff exponent is incremented and the
460- * counter set to (2**backoff - 1).
461- * Backoff == 6 -> starting counter == 63, backoff == 10 -> starting counter == 1023.
457+ * See pycore_backoff.h for more details.
458+ * On a specialization failure, the backoff counter is restarted.
462459 */
463460
464- /* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */
465- #define ADAPTIVE_BACKOFF_BITS 4
461+ #include "pycore_backoff.h"
466462
467463// A value of 1 means that we attempt to specialize the *second* time each
468464// instruction is executed. Executing twice is a much better indicator of
@@ -480,36 +476,30 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
480476#define ADAPTIVE_COOLDOWN_VALUE 52
481477#define ADAPTIVE_COOLDOWN_BACKOFF 0
482478
483- #define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS)
479+ // Can't assert this in pycore_backoff.h because of header order dependencies
480+ static_assert (COLD_EXIT_INITIAL_VALUE > ADAPTIVE_COOLDOWN_VALUE ,
481+ "Cold exit value should be larger than adaptive cooldown value" );
484482
485-
486- static inline uint16_t
483+ static inline _Py_BackoffCounter
487484adaptive_counter_bits (uint16_t value , uint16_t backoff ) {
488- return ((value << ADAPTIVE_BACKOFF_BITS )
489- | (backoff & ((1 << ADAPTIVE_BACKOFF_BITS ) - 1 )));
485+ return make_backoff_counter (value , backoff );
490486}
491487
492- static inline uint16_t
488+ static inline _Py_BackoffCounter
493489adaptive_counter_warmup (void ) {
494490 return adaptive_counter_bits (ADAPTIVE_WARMUP_VALUE ,
495491 ADAPTIVE_WARMUP_BACKOFF );
496492}
497493
498- static inline uint16_t
494+ static inline _Py_BackoffCounter
499495adaptive_counter_cooldown (void ) {
500496 return adaptive_counter_bits (ADAPTIVE_COOLDOWN_VALUE ,
501497 ADAPTIVE_COOLDOWN_BACKOFF );
502498}
503499
504- static inline uint16_t
505- adaptive_counter_backoff (uint16_t counter ) {
506- uint16_t backoff = counter & ((1 << ADAPTIVE_BACKOFF_BITS ) - 1 );
507- backoff ++ ;
508- if (backoff > MAX_BACKOFF_VALUE ) {
509- backoff = MAX_BACKOFF_VALUE ;
510- }
511- uint16_t value = (uint16_t )(1 << backoff ) - 1 ;
512- return adaptive_counter_bits (value , backoff );
500+ static inline _Py_BackoffCounter
501+ adaptive_counter_backoff (_Py_BackoffCounter counter ) {
502+ return restart_backoff_counter (counter );
513503}
514504
515505
0 commit comments