Skip to content
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
e7e819b
Baby steps: reimplement thresholds using adaptive counter abstractions
gvanrossum Mar 22, 2024
8c74dfa
Make temperature an adaptive counter like the rest
gvanrossum Mar 22, 2024
79036ff
Fix tests
gvanrossum Mar 22, 2024
54c1f8e
Remove dead adaptive_counter_jump_init()
gvanrossum Mar 22, 2024
015bb00
Fix no-GIL build failure in _COLD_EXIT
gvanrossum Mar 22, 2024
1730295
Use the right named constant in initial temperature
gvanrossum Mar 23, 2024
95f93b7
Add pycore_backoff.h, and include it, but don't use it yet
gvanrossum Mar 26, 2024
7df0f10
Reimplement adaptive counters in terms of backoff_counter
gvanrossum Mar 26, 2024
925cae7
Redefine T2 temperature as a backoff counter
gvanrossum Mar 27, 2024
f0c7fb0
Don't increment branch cache (bitmask) in INSTRUMENTED_INSTRUCTION
gvanrossum Mar 27, 2024
8f79a60
Don't increment branch cache (bitmask) in INSTRUMENTED_LINE
gvanrossum Mar 27, 2024
149e9c4
Don't update unreachable counters
gvanrossum Mar 27, 2024
1d76112
Simplify dynamic counter initialization for JUMP_BACKWARD
gvanrossum Mar 27, 2024
a5ffe02
Revert "Don't increment branch cache (bitmask) in INSTRUMENTED_LINE"
gvanrossum Mar 27, 2024
ce7726c
Different approach to avoid incrementing bitmask in INSTRUMENTED_LINE
gvanrossum Mar 27, 2024
e2c39f2
Different approach to avoid incrementing bitmask in INSTRUMENTED_INST…
gvanrossum Mar 27, 2024
8d22790
Fix dynamic counter initialization for JUMP_BACKWARD
gvanrossum Mar 27, 2024
cd8264e
Get rid of (unused) resume_threshold
gvanrossum Mar 27, 2024
d72c2ef
Hopeful fix for non-clang builds
gvanrossum Mar 27, 2024
f6bf194
In no-GIL mode, make INCREMENT_ADAPTIVE_COUNTER() a no-op
gvanrossum Mar 27, 2024
b991843
Revert "In no-GIL mode, make INCREMENT_ADAPTIVE_COUNTER() a no-op"
gvanrossum Mar 27, 2024
9b9f26a
Fix comment and fix size of optimizer_backedge_threshold
gvanrossum Mar 27, 2024
354cd81
_COLD_EXIT is not conditional on ENABLE_SPECIALIZATION
gvanrossum Mar 27, 2024
c1de44f
Rewrite _COLD_EXIT using backoff_counter_t directly
gvanrossum Mar 27, 2024
1fe27c9
Rename increment,decrement to advance,pause
gvanrossum Mar 27, 2024
225ea17
Give up on restricting backoff to <= 12
gvanrossum Mar 27, 2024
63d8bc7
Rip out backoff thresholds
gvanrossum Mar 29, 2024
8aa2c75
Fix tests
gvanrossum Mar 29, 2024
7bd4daa
Fix initial temperature, clean up
gvanrossum Mar 29, 2024
3f1c58a
Merge branch 'main' into exp-backoff
gvanrossum Mar 29, 2024
8ce8068
Remove unused variable
gvanrossum Mar 29, 2024
7bb5618
Admit defeat: advance_backoff_counter() may be entered when value == 0
gvanrossum Mar 29, 2024
63e286c
Merge branch 'main' into exp-backoff
gvanrossum Apr 1, 2024
7f64392
Merge remote-tracking branch 'origin/main' into exp-backoff
gvanrossum Apr 2, 2024
8eee1b4
Put backoff field before value
gvanrossum Apr 2, 2024
42c1f26
Small cleanup in .h files
gvanrossum Apr 3, 2024
a80cd0a
Rename DECREMENT_ADAPTIVE_COUNTER to ADVANCE_...
gvanrossum Apr 3, 2024
545c60e
Rename ADAPTIVE_COUNTER_IS_ZERO to ..._TRIGGERS
gvanrossum Apr 3, 2024
6c0bb30
Rename backoff_counter_is_zero to ..._triggers
gvanrossum Apr 3, 2024
a7c9b6d
Rename reset_background_counter to restart_...
gvanrossum Apr 3, 2024
3fee35f
Make _Py_BackoffCounter a member of _Py_CODEUNIT
gvanrossum Apr 3, 2024
df6f34c
Refactor initial counter values.
gvanrossum Apr 3, 2024
72f6b0d
Export tier 2 threshold from _testinternalcapi
gvanrossum Apr 3, 2024
dcee362
Merge remote-tracking branch 'origin/main' into exp-backoff
gvanrossum Apr 3, 2024
f38d922
Add news
gvanrossum Apr 3, 2024
ef6366b
Fix blurb formatting (I hope)
gvanrossum Apr 4, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 3 additions & 13 deletions Include/cpython/optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ static inline uint16_t uop_get_error_target(const _PyUOpInstruction *inst)

typedef struct _exit_data {
uint32_t target;
int16_t temperature;
uint16_t temperature;
const struct _PyExecutorObject *executor;
} _PyExitData;

Expand All @@ -115,11 +115,9 @@ typedef int (*optimize_func)(
struct _PyOptimizerObject {
PyObject_HEAD
optimize_func optimize;
/* These thresholds are treated as signed so do not exceed INT16_MAX
* Use INT16_MAX to indicate that the optimizer should never be called */
uint16_t resume_threshold;
uint16_t side_threshold;
/* Initial values for adaptive-style counters */
uint16_t backedge_threshold;
uint16_t side_threshold;
/* Data needed by the optimizer goes here, but is opaque to the VM */
};

Expand Down Expand Up @@ -151,14 +149,6 @@ extern void _Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_inval
PyAPI_FUNC(PyObject *)PyUnstable_Optimizer_NewCounter(void);
PyAPI_FUNC(PyObject *)PyUnstable_Optimizer_NewUOpOptimizer(void);

#define OPTIMIZER_BITS_IN_COUNTER 4
/* Minimum of 16 additional executions before retry */
#define MIN_TIER2_BACKOFF 4
#define MAX_TIER2_BACKOFF (15 - OPTIMIZER_BITS_IN_COUNTER)
#define OPTIMIZER_BITS_MASK ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1)
/* A value <= UINT16_MAX but large enough that when shifted is > UINT16_MAX */
#define OPTIMIZER_UNREACHABLE_THRESHOLD UINT16_MAX

#define _Py_MAX_ALLOWED_BUILTINS_MODIFICATIONS 3
#define _Py_MAX_ALLOWED_GLOBALS_MODIFICATIONS 6

Expand Down
117 changes: 117 additions & 0 deletions Include/internal/pycore_backoff.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@

#ifndef Py_INTERNAL_BACKOFF_H
#define Py_INTERNAL_BACKOFF_H
#ifdef __cplusplus
extern "C" {
#endif

#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif

#include <assert.h>
#include <stdbool.h>
#include <stdint.h>

/* 16-bit countdown counters using exponential backoff.
These are used by the adaptive specializer to count down until
it is time to specialize an instruction. If specialization fails
the counter is reset using exponential backoff.
Another use is for the Tier 2 optimizer to decide when to create
a new Tier 2 trace (executor). Again, exponential backoff is used.
The 16-bit counter is structured as a 12-bit unsigned 'value'
and a 4-bit 'backoff' field. When resetting the counter, the
backoff field is incremented (until it reaches a limit) and the
value is set to a bit mask representing the value 2**backoff - 1.
The maximum backoff is 12 (the number of value bits).
There is an exceptional value which must not be updated, 0xFFFF.
*/

typedef struct {
union {
uint16_t counter;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is looks like this is just used for is_unreachable_backoff_counter. Maybe have backoff == 15 && value == MAX indicate that it is unreachable?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's also used to get the full 16-bit value as an int. And I like that the unreachable value cannot be constructed from a (value, backoff) pair because the backoff needs to be <= 12.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(I had to break down and remove the assert(backoff <= 12) because there were corner cases where the "unreachable" value would be put back in line this. But I still feel that .counter is useful -- it is how a backoff_counter_t value is converted to a plain uint16_t to store in certain (more) public spots.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still think that dropping the counter here and allowing backoff_counter_t in more "public spots" would make the code considerably simpler.

struct {
uint16_t value : 12;
uint16_t backoff : 4;
};
};
} backoff_counter_t;

static_assert(sizeof(backoff_counter_t) == 2, "backoff counter size should be 2 bytes");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
static_assert(sizeof(backoff_counter_t) == 2, "backoff counter size should be 2 bytes");
static_assert(sizeof(backoff_counter_t) == sizeof(_Py_CODEUNIT), "backoff counter size should be the same size as a code unit");


#define UNREACHABLE_BACKOFF 0xFFFF
#define UNREACHABLE_BACKOFF_COUNTER ((backoff_counter_t){.counter = UNREACHABLE_BACKOFF})

/* Alias used by optimizer */
#define OPTIMIZER_UNREACHABLE_THRESHOLD UNREACHABLE_BACKOFF

static inline bool
is_unreachable_backoff_counter(backoff_counter_t counter)
{
return counter.counter == 0xFFFF;
}

static inline backoff_counter_t
make_backoff_counter(uint16_t value, uint16_t backoff)
{
assert(backoff <= 12);
assert(value <= 0xFFF);
return (backoff_counter_t){.value = value, .backoff = backoff};
}

static inline backoff_counter_t
forge_backoff_counter(uint16_t counter)
{
return (backoff_counter_t){.counter = counter};
}

static inline backoff_counter_t
reset_backoff_counter(backoff_counter_t counter)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This used to restart the counter, increasing the backoff, rather than resetting it?
Maybe restart_counter_and_backoff?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, it increments backoff and restarts the counter at 2**(backoff). I renamed it to restart_backoff_counter. (All the APIs here have backoff_counter in their name, to distinguish them from other forms of counters.)

{
assert(!is_unreachable_backoff_counter(counter));
if (counter.backoff < 12) {
return make_backoff_counter((1 << (counter.backoff + 1)) - 1, counter.backoff + 1);
}
else {
return make_backoff_counter((1 << 12) - 1, 12);
}
}

static inline backoff_counter_t
increment_backoff_counter(backoff_counter_t counter)
{
if (!is_unreachable_backoff_counter(counter)) {
assert(counter.value != 0xFFF);
return make_backoff_counter(counter.value + 1, counter.backoff);
}
else {
return counter;
}
}

static inline backoff_counter_t
decrement_backoff_counter(backoff_counter_t counter)
{
if (!is_unreachable_backoff_counter(counter)) {
assert(counter.value != 0);
return make_backoff_counter(counter.value - 1, counter.backoff);
}
else {
return counter;
}
}

static inline bool
backoff_counter_is_zero(backoff_counter_t counter)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To avoid implementation details, maybe just counter_triggers?
Or merge into the "advance" which could return a bool indicating whether it has reached the threshold:

bool backoff_counter_advance(backoff_counter_t *counter)

{
return counter.value == 0;
}

#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BACKOFF_H */
24 changes: 5 additions & 19 deletions Include/internal/pycore_code.h
Original file line number Diff line number Diff line change
Expand Up @@ -448,18 +448,14 @@ write_location_entry_start(uint8_t *ptr, int code, int length)

/** Counters
* The first 16-bit value in each inline cache is a counter.
* When counting misses, the counter is treated as a simple unsigned value.
*
* When counting executions until the next specialization attempt,
* exponential backoff is used to reduce the number of specialization failures.
* The high 12 bits store the counter, the low 4 bits store the backoff exponent.
* On a specialization failure, the backoff exponent is incremented and the
* counter set to (2**backoff - 1).
* Backoff == 6 -> starting counter == 63, backoff == 10 -> starting counter == 1023.
* See pycore_backoff.h for more details.
* On a specialization failure, the backoff counter is reset.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
* On a specialization failure, the backoff counter is reset.
* On a specialization failure, the backoff counter is restarted.

*/

/* With a 16-bit counter, we have 12 bits for the counter value, and 4 bits for the backoff */
#define ADAPTIVE_BACKOFF_BITS 4
#include "pycore_backoff.h"

// A value of 1 means that we attempt to specialize the *second* time each
// instruction is executed. Executing twice is a much better indicator of
Expand All @@ -477,13 +473,9 @@ write_location_entry_start(uint8_t *ptr, int code, int length)
#define ADAPTIVE_COOLDOWN_VALUE 52
#define ADAPTIVE_COOLDOWN_BACKOFF 0

#define MAX_BACKOFF_VALUE (16 - ADAPTIVE_BACKOFF_BITS)


static inline uint16_t
adaptive_counter_bits(uint16_t value, uint16_t backoff) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe drop the adaptive_counter functions and replace them with the backoff_counter equivalents?

To do that cleanly we will probably need to change _Py_CODEUNIT to:

typedef union {
    uint16_t cache;
    struct {
        uint8_t code;
        uint8_t arg;
    } op;
    backoff_counter_t counter;
} _Py_CODEUNIT;

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I decided to keep them and not expose the backoff counter structure to other places.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I could dispense with adaptive_counter_bits(), using make_backoff_counter() directly below, but I would oppose getting rid of the cooldown() and warmup() helpers, because they are used in several/many places.

return ((value << ADAPTIVE_BACKOFF_BITS)
| (backoff & ((1 << ADAPTIVE_BACKOFF_BITS) - 1)));
return make_backoff_counter(value, backoff).counter;
}

static inline uint16_t
Expand All @@ -500,13 +492,7 @@ adaptive_counter_cooldown(void) {

static inline uint16_t
adaptive_counter_backoff(uint16_t counter) {
uint16_t backoff = counter & ((1 << ADAPTIVE_BACKOFF_BITS) - 1);
backoff++;
if (backoff > MAX_BACKOFF_VALUE) {
backoff = MAX_BACKOFF_VALUE;
}
uint16_t value = (uint16_t)(1 << backoff) - 1;
return adaptive_counter_bits(value, backoff);
return reset_backoff_counter(forge_backoff_counter(counter)).counter;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I find this confusing. Is there a way to do this with a single call, rather than two calls and a field read?

}


Expand Down
4 changes: 1 addition & 3 deletions Include/internal/pycore_interp.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,10 +239,8 @@ struct _is {
_PyOptimizerObject *optimizer;
_PyExecutorObject *executor_list_head;

/* These two values are shifted and offset to speed up check in JUMP_BACKWARD */
uint32_t optimizer_resume_threshold;
/* These three values are shifted and offset to speed up check in JUMP_BACKWARD */
uint32_t optimizer_backedge_threshold;

uint16_t optimizer_side_threshold;

_rare_events rare_events;
Expand Down
1 change: 1 addition & 0 deletions Makefile.pre.in
Original file line number Diff line number Diff line change
Expand Up @@ -1117,6 +1117,7 @@ PYTHON_HEADERS= \
$(srcdir)/Include/internal/pycore_ast.h \
$(srcdir)/Include/internal/pycore_ast_state.h \
$(srcdir)/Include/internal/pycore_atexit.h \
$(srcdir)/Include/internal/pycore_backoff.h \
$(srcdir)/Include/internal/pycore_bitutils.h \
$(srcdir)/Include/internal/pycore_blocks_output_buffer.h \
$(srcdir)/Include/internal/pycore_brc.h \
Expand Down
1 change: 1 addition & 0 deletions PCbuild/pythoncore.vcxproj
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,7 @@
<ClInclude Include="..\Include\internal\pycore_ast.h" />
<ClInclude Include="..\Include\internal\pycore_ast_state.h" />
<ClInclude Include="..\Include\internal\pycore_atexit.h" />
<ClInclude Include="..\Include\internal\pycore_backoff.h" />
<ClInclude Include="..\Include\internal\pycore_bitutils.h" />
<ClInclude Include="..\Include\internal\pycore_brc.h" />
<ClInclude Include="..\Include\internal\pycore_bytes_methods.h" />
Expand Down
110 changes: 56 additions & 54 deletions Python/bytecodes.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "Python.h"
#include "pycore_abstract.h" // _PyIndex_Check()
#include "pycore_backoff.h"
#include "pycore_code.h"
#include "pycore_emscripten_signal.h" // _Py_CHECK_EMSCRIPTEN_SIGNALS
#include "pycore_function.h"
Expand Down Expand Up @@ -2348,41 +2349,35 @@ dummy_func(
JUMPBY(-oparg);
#if ENABLE_SPECIALIZATION
uint16_t counter = this_instr[1].cache;
this_instr[1].cache = counter + (1 << OPTIMIZER_BITS_IN_COUNTER);
/* We are using unsigned values, but we really want signed values, so
* do the 2s complement adjustment manually */
uint32_t offset_counter = counter ^ (1 << 15);
uint32_t threshold = tstate->interp->optimizer_backedge_threshold;
assert((threshold & OPTIMIZER_BITS_MASK) == 0);
// Use '>=' not '>' so that the optimizer/backoff bits do not effect the result.
// Double-check that the opcode isn't instrumented or something:
if (offset_counter >= threshold && this_instr->op.code == JUMP_BACKWARD) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
oparg >>= 8;
start--;
}
_PyExecutorObject *executor;
int optimized = _PyOptimizer_Optimize(frame, start, stack_pointer, &executor);
ERROR_IF(optimized < 0, error);
if (optimized) {
assert(tstate->previous_executor == NULL);
tstate->previous_executor = Py_None;
GOTO_TIER_TWO(executor);
if (ADAPTIVE_COUNTER_IS_ZERO(counter)) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could initialize this in _PyCode_Quicken, which would save the extra branch here.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's tricky, because we can turn different optimizers off dynamically. The default (dummy) optimizer wants to initialize this to UNREACHABLE_BACKOFF, the uops optimizer uses value=16, and the counter optimizer (used in tests) uses value=0. The old strategy was more flexible, at least until the backoff kicked in. My current strategy is medium in flexibility -- the backoff sequence is initialized when the function first reaches the JUMP_BACKWARD. I've tried to minimize the cost of the extra branch by putting the ADAPTIVE_COUNTER_IS_ZERO() check before it, so it only hits when we're about to create a trace, which surely overwhelms the cost of an extra branch.

if (counter == 0) {
// Dynamically initialize the counter
counter = tstate->interp->optimizer_backedge_threshold;
this_instr[1].cache = counter;
}
else {
int backoff = this_instr[1].cache & OPTIMIZER_BITS_MASK;
backoff++;
if (backoff < MIN_TIER2_BACKOFF) {
backoff = MIN_TIER2_BACKOFF;
if (ADAPTIVE_COUNTER_IS_ZERO(counter) && this_instr->op.code == JUMP_BACKWARD) {
_Py_CODEUNIT *start = this_instr;
/* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */
while (oparg > 255) {
oparg >>= 8;
start--;
}
_PyExecutorObject *executor;
int optimized = _PyOptimizer_Optimize(frame, start, stack_pointer, &executor);
ERROR_IF(optimized < 0, error);
if (optimized) {
assert(tstate->previous_executor == NULL);
tstate->previous_executor = Py_None;
GOTO_TIER_TWO(executor);
}
else if (backoff > MAX_TIER2_BACKOFF) {
backoff = MAX_TIER2_BACKOFF;
else {
this_instr[1].cache = adaptive_counter_backoff(counter);
}
this_instr[1].cache = ((UINT16_MAX << OPTIMIZER_BITS_IN_COUNTER) << backoff) | backoff;
}
}
else {
DECREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
}
#endif /* ENABLE_SPECIALIZATION */
}

Expand Down Expand Up @@ -3973,7 +3968,12 @@ dummy_func(
ERROR_IF(next_opcode < 0, error);
next_instr = this_instr;
if (_PyOpcode_Caches[next_opcode]) {
INCREMENT_ADAPTIVE_COUNTER(this_instr[1].cache);
if (next_opcode != POP_JUMP_IF_FALSE &&
next_opcode != POP_JUMP_IF_TRUE &&
next_opcode != POP_JUMP_IF_NOT_NONE &&
next_opcode != POP_JUMP_IF_NONE) {
INCREMENT_ADAPTIVE_COUNTER(next_instr[1].cache);
}
}
assert(next_opcode > 0 && next_opcode < 256);
opcode = next_opcode;
Expand Down Expand Up @@ -4159,34 +4159,36 @@ dummy_func(
tier2 op(_COLD_EXIT, (--)) {
_PyExecutorObject *previous = (_PyExecutorObject *)tstate->previous_executor;
_PyExitData *exit = &previous->exits[oparg];
exit->temperature++;
PyCodeObject *code = _PyFrame_GetCode(frame);
_Py_CODEUNIT *target = _PyCode_CODE(code) + exit->target;
if (exit->temperature < (int32_t)tstate->interp->optimizer_side_threshold) {
GOTO_TIER_ONE(target);
}
_PyExecutorObject *executor;
if (target->op.code == ENTER_EXECUTOR) {
executor = code->co_executors->executors[target->op.arg];
Py_INCREF(executor);
} else {
int optimized = _PyOptimizer_Optimize(frame, target, stack_pointer, &executor);
if (optimized <= 0) {
int32_t new_temp = -1 * tstate->interp->optimizer_side_threshold;
exit->temperature = (new_temp < INT16_MIN) ? INT16_MIN : new_temp;
if (optimized < 0) {
Py_DECREF(previous);
tstate->previous_executor = Py_None;
GOTO_UNWIND();
#if ENABLE_SPECIALIZATION
if (ADAPTIVE_COUNTER_IS_ZERO(exit->temperature)) {
_PyExecutorObject *executor;
if (target->op.code == ENTER_EXECUTOR) {
executor = code->co_executors->executors[target->op.arg];
Py_INCREF(executor);
}
else {
int optimized = _PyOptimizer_Optimize(frame, target, stack_pointer, &executor);
if (optimized <= 0) {
exit->temperature = adaptive_counter_backoff(exit->temperature);
if (optimized < 0) {
Py_DECREF(previous);
tstate->previous_executor = Py_None;
GOTO_UNWIND();
}
GOTO_TIER_ONE(target);
}
GOTO_TIER_ONE(target);
}
/* We need two references. One to store in exit->executor and
* one to keep the executor alive when executing. */
Py_INCREF(executor);
exit->executor = executor;
GOTO_TIER_TWO(executor);
}
/* We need two references. One to store in exit->executor and
* one to keep the executor alive when executing. */
Py_INCREF(executor);
exit->executor = executor;
GOTO_TIER_TWO(executor);
DECREMENT_ADAPTIVE_COUNTER(exit->temperature);
#endif
GOTO_TIER_ONE(target);
}

tier2 op(_START_EXECUTOR, (executor/4 --)) {
Expand Down
Loading