diff --git a/Include/internal/pycore_uops.h b/Include/internal/pycore_uops.h index e2b94894681f44..ea8f90bf8c1d8f 100644 --- a/Include/internal/pycore_uops.h +++ b/Include/internal/pycore_uops.h @@ -24,7 +24,7 @@ typedef struct { _PyUOpInstruction trace[1]; } _PyUOpExecutorObject; -_PyInterpreterFrame *_PyUopExecute( +_PyInterpreterFrame *_PyUOpExecute( _PyExecutorObject *executor, _PyInterpreterFrame *frame, PyObject **stack_pointer); diff --git a/Python/bytecodes.c b/Python/bytecodes.c index a1ca66e457e47d..2e5f6c8d0d10e2 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -2357,7 +2357,7 @@ dummy_func( JUMPBY(1-original_oparg); frame->instr_ptr = next_instr; Py_INCREF(executor); - if (executor->execute == _PyUopExecute) { + if (executor->execute == _PyUOpExecute) { current_executor = (_PyUOpExecutorObject *)executor; GOTO_TIER_TWO(); } diff --git a/Python/ceval.c b/Python/ceval.c index def75fd114cb86..1806ceb7fa9681 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -647,7 +647,7 @@ static const _Py_CODEUNIT _Py_INTERPRETER_TRAMPOLINE_INSTRUCTIONS[] = { extern const struct _PyCode_DEF(8) _Py_InitCleanup; -extern const char *_PyUopName(int index); +extern const char *_PyUOpName(int index); /* Disable unused label warnings. They are handy for debugging, even if computed gotos aren't used. */ @@ -1002,7 +1002,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int DPRINTF(3, "%4d: uop %s, oparg %d, operand %" PRIu64 ", target %d, stack_level %d\n", (int)(next_uop - current_executor->trace), - _PyUopName(uopcode), + _PyUOpName(uopcode), next_uop->oparg, next_uop->operand, next_uop->target, @@ -1051,8 +1051,8 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int pop_1_error_tier_two: STACK_SHRINK(1); error_tier_two: - DPRINTF(2, "Error: [Uop %d (%s), oparg %d, operand %" PRIu64 ", target %d @ %d]\n", - uopcode, _PyUopName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, next_uop[-1].target, + DPRINTF(2, "Error: [UOp %d (%s), oparg %d, operand %" PRIu64 ", target %d @ %d]\n", + uopcode, _PyUOpName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, next_uop[-1].target, (int)(next_uop - current_executor->trace - 1)); OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); frame->return_offset = 0; // Don't leave this random @@ -1064,8 +1064,8 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int deoptimize: // On DEOPT_IF we just repeat the last instruction. // This presumes nothing was popped from the stack (nor pushed). - DPRINTF(2, "DEOPT: [Uop %d (%s), oparg %d, operand %" PRIu64 ", target %d @ %d]\n", - uopcode, _PyUopName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, next_uop[-1].target, + DPRINTF(2, "DEOPT: [UOp %d (%s), oparg %d, operand %" PRIu64 ", target %d @ %d]\n", + uopcode, _PyUOpName(uopcode), next_uop[-1].oparg, next_uop[-1].operand, next_uop[-1].target, (int)(next_uop - current_executor->trace - 1)); OPT_HIST(trace_uop_execution_counter, trace_run_length_hist); UOP_STAT_INC(uopcode, miss); diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index dedd793111b7ff..0ac99e759deb12 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -2331,7 +2331,7 @@ JUMPBY(1-original_oparg); frame->instr_ptr = next_instr; Py_INCREF(executor); - if (executor->execute == _PyUopExecute) { + if (executor->execute == _PyUOpExecute) { current_executor = (_PyUOpExecutorObject *)executor; GOTO_TIER_TWO(); } diff --git a/Python/optimizer.c b/Python/optimizer.c index ec59fea26fbc70..dd24fbebbfd2a9 100644 --- a/Python/optimizer.c +++ b/Python/optimizer.c @@ -325,7 +325,7 @@ uop_dealloc(_PyUOpExecutorObject *self) { } const char * -_PyUopName(int index) +_PyUOpName(int index) { if (index <= MAX_REAL_OPCODE) { return _PyOpcode_OpName[index]; @@ -347,7 +347,7 @@ uop_item(_PyUOpExecutorObject *self, Py_ssize_t index) PyErr_SetNone(PyExc_IndexError); return NULL; } - const char *name = _PyUopName(self->trace[index].opcode); + const char *name = _PyUOpName(self->trace[index].opcode); if (name == NULL) { name = ""; } @@ -388,7 +388,7 @@ PyTypeObject _PyUOpExecutor_Type = { /* TO DO -- Generate these tables */ static const uint16_t -_PyUop_Replacements[OPCODE_METADATA_SIZE] = { +_PyUOp_Replacements[OPCODE_METADATA_SIZE] = { [_ITER_JUMP_RANGE] = _GUARD_NOT_EXHAUSTED_RANGE, [_ITER_JUMP_LIST] = _GUARD_NOT_EXHAUSTED_LIST, [_ITER_JUMP_TUPLE] = _GUARD_NOT_EXHAUSTED_TUPLE, @@ -451,7 +451,7 @@ translate_bytecode_to_trace( #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ DPRINTF(2, \ " ADD_TO_TRACE(%s, %d, %" PRIu64 ")\n", \ - _PyUopName(OPCODE), \ + _PyUOpName(OPCODE), \ (OPARG), \ (uint64_t)(OPERAND)); \ assert(trace_length < max_length); \ @@ -474,7 +474,7 @@ translate_bytecode_to_trace( } // Reserve space for N uops, plus 3 for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE -#define RESERVE(needed) RESERVE_RAW((needed) + 3, _PyUopName(opcode)) +#define RESERVE(needed) RESERVE_RAW((needed) + 3, _PyUOpName(opcode)) // Trace stack operations (used by _PUSH_FRAME, _POP_FRAME) #define TRACE_STACK_PUSH() \ @@ -546,8 +546,8 @@ translate_bytecode_to_trace( uint32_t uopcode = BRANCH_TO_GUARD[opcode - POP_JUMP_IF_FALSE][jump_likely]; _Py_CODEUNIT *next_instr = instr + 1 + _PyOpcode_Caches[_PyOpcode_Deopt[opcode]]; DPRINTF(4, "%s(%d): counter=%x, bitcount=%d, likely=%d, uopcode=%s\n", - _PyUopName(opcode), oparg, - counter, bitcount, jump_likely, _PyUopName(uopcode)); + _PyUOpName(opcode), oparg, + counter, bitcount, jump_likely, _PyUOpName(uopcode)); ADD_TO_TRACE(uopcode, max_length, 0, target); if (jump_likely) { _Py_CODEUNIT *target_instr = next_instr + oparg; @@ -615,8 +615,8 @@ translate_bytecode_to_trace( oparg += extras; } } - if (_PyUop_Replacements[uop]) { - uop = _PyUop_Replacements[uop]; + if (_PyUOp_Replacements[uop]) { + uop = _PyUOp_Replacements[uop]; if (uop == _FOR_ITER_TIER_TWO) { target += 1 + INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1; assert(_PyCode_CODE(code)[target-1].op.code == END_FOR || @@ -712,7 +712,7 @@ translate_bytecode_to_trace( } break; } - DPRINTF(2, "Unsupported opcode %s\n", _PyUopName(opcode)); + DPRINTF(2, "Unsupported opcode %s\n", _PyUOpName(opcode)); OPT_UNSUPPORTED_OPCODE(opcode); goto done; // Break out of loop } // End default @@ -832,7 +832,7 @@ make_executor_from_uops(_PyUOpInstruction *buffer, _PyBloomFilter *dependencies) dest--; } assert(dest == -1); - executor->base.execute = _PyUopExecute; + executor->base.execute = _PyUOpExecute; _Py_ExecutorInit((_PyExecutorObject *)executor, dependencies); #ifdef Py_DEBUG char *python_lltrace = Py_GETENV("PYTHON_LLTRACE"); @@ -845,7 +845,7 @@ make_executor_from_uops(_PyUOpInstruction *buffer, _PyBloomFilter *dependencies) for (int i = 0; i < length; i++) { printf("%4d %s(%d, %d, %" PRIu64 ")\n", i, - _PyUopName(executor->trace[i].opcode), + _PyUOpName(executor->trace[i].opcode), executor->trace[i].oparg, executor->trace[i].target, executor->trace[i].operand); @@ -888,11 +888,11 @@ uop_optimize( return 1; } -/* Dummy execute() function for Uop Executor. +/* Dummy execute() function for UOp Executor. * The actual implementation is inlined in ceval.c, * in _PyEval_EvalFrameDefault(). */ _PyInterpreterFrame * -_PyUopExecute(_PyExecutorObject *executor, _PyInterpreterFrame *frame, PyObject **stack_pointer) +_PyUOpExecute(_PyExecutorObject *executor, _PyInterpreterFrame *frame, PyObject **stack_pointer) { Py_FatalError("Tier 2 is now inlined into Tier 1"); }