|
38 | 38 | <instr N-1> |
39 | 39 | */ |
40 | 40 |
|
| 41 | +/* Map from opcode to adaptive opcode. |
| 42 | + Values of zero are ignored. */ |
| 43 | +static uint8_t adaptive_opcodes[256] = { |
| 44 | + [LOAD_ATTR] = LOAD_ATTR_ADAPTIVE, |
| 45 | + [LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE, |
| 46 | + [LOAD_METHOD] = LOAD_METHOD_ADAPTIVE, |
| 47 | + [BINARY_SUBSCR] = BINARY_SUBSCR_ADAPTIVE, |
| 48 | + [STORE_SUBSCR] = STORE_SUBSCR_ADAPTIVE, |
| 49 | + [CALL] = CALL_ADAPTIVE, |
| 50 | + [STORE_ATTR] = STORE_ATTR_ADAPTIVE, |
| 51 | + [BINARY_OP] = BINARY_OP_ADAPTIVE, |
| 52 | + [COMPARE_OP] = COMPARE_OP_ADAPTIVE, |
| 53 | +}; |
| 54 | + |
| 55 | +/* The number of cache entries required for a "family" of instructions. */ |
| 56 | +static uint8_t cache_requirements[256] = { |
| 57 | + [LOAD_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */ |
| 58 | + [LOAD_GLOBAL] = 2, /* _PyAdaptiveEntry and _PyLoadGlobalCache */ |
| 59 | + [LOAD_METHOD] = 3, /* _PyAdaptiveEntry, _PyAttrCache and _PyObjectCache */ |
| 60 | + [BINARY_SUBSCR] = 2, /* _PyAdaptiveEntry, _PyObjectCache */ |
| 61 | + [STORE_SUBSCR] = 0, |
| 62 | + [CALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */ |
| 63 | + [STORE_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */ |
| 64 | + [BINARY_OP] = 1, // _PyAdaptiveEntry |
| 65 | + [COMPARE_OP] = 1, /* _PyAdaptiveEntry */ |
| 66 | +}; |
| 67 | + |
41 | 68 | Py_ssize_t _Py_QuickenedCount = 0; |
42 | 69 | #ifdef Py_STATS |
43 | 70 | PyStats _py_stats = { 0 }; |
@@ -144,7 +171,14 @@ _Py_GetSpecializationStats(void) { |
144 | 171 | static void |
145 | 172 | print_spec_stats(FILE *out, OpcodeStats *stats) |
146 | 173 | { |
| 174 | + /* Mark some opcodes as specializable for stats, |
| 175 | + * even though we don't specialize them yet. */ |
| 176 | + fprintf(out, " opcode[%d].specializable : 1\n", FOR_ITER); |
| 177 | + fprintf(out, " opcode[%d].specializable : 1\n", UNPACK_SEQUENCE); |
147 | 178 | for (int i = 0; i < 256; i++) { |
| 179 | + if (adaptive_opcodes[i]) { |
| 180 | + fprintf(out, " opcode[%d].specializable : 1\n", i); |
| 181 | + } |
148 | 182 | PRINT_STAT(i, specialization.success); |
149 | 183 | PRINT_STAT(i, specialization.failure); |
150 | 184 | PRINT_STAT(i, specialization.hit); |
@@ -266,33 +300,6 @@ get_cache_count(SpecializedCacheOrInstruction *quickened) { |
266 | 300 | return quickened[0].entry.zero.cache_count; |
267 | 301 | } |
268 | 302 |
|
269 | | -/* Map from opcode to adaptive opcode. |
270 | | - Values of zero are ignored. */ |
271 | | -static uint8_t adaptive_opcodes[256] = { |
272 | | - [LOAD_ATTR] = LOAD_ATTR_ADAPTIVE, |
273 | | - [LOAD_GLOBAL] = LOAD_GLOBAL_ADAPTIVE, |
274 | | - [LOAD_METHOD] = LOAD_METHOD_ADAPTIVE, |
275 | | - [BINARY_SUBSCR] = BINARY_SUBSCR_ADAPTIVE, |
276 | | - [STORE_SUBSCR] = STORE_SUBSCR_ADAPTIVE, |
277 | | - [CALL] = CALL_ADAPTIVE, |
278 | | - [STORE_ATTR] = STORE_ATTR_ADAPTIVE, |
279 | | - [BINARY_OP] = BINARY_OP_ADAPTIVE, |
280 | | - [COMPARE_OP] = COMPARE_OP_ADAPTIVE, |
281 | | -}; |
282 | | - |
283 | | -/* The number of cache entries required for a "family" of instructions. */ |
284 | | -static uint8_t cache_requirements[256] = { |
285 | | - [LOAD_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */ |
286 | | - [LOAD_GLOBAL] = 2, /* _PyAdaptiveEntry and _PyLoadGlobalCache */ |
287 | | - [LOAD_METHOD] = 3, /* _PyAdaptiveEntry, _PyAttrCache and _PyObjectCache */ |
288 | | - [BINARY_SUBSCR] = 2, /* _PyAdaptiveEntry, _PyObjectCache */ |
289 | | - [STORE_SUBSCR] = 0, |
290 | | - [CALL] = 2, /* _PyAdaptiveEntry and _PyObjectCache/_PyCallCache */ |
291 | | - [STORE_ATTR] = 2, /* _PyAdaptiveEntry and _PyAttrCache */ |
292 | | - [BINARY_OP] = 1, // _PyAdaptiveEntry |
293 | | - [COMPARE_OP] = 1, /* _PyAdaptiveEntry */ |
294 | | -}; |
295 | | - |
296 | 303 | /* Return the oparg for the cache_offset and instruction index. |
297 | 304 | * |
298 | 305 | * If no cache is needed then return the original oparg. |
|
0 commit comments