72
72
73
73
/* Pseudo-instructions used in the compiler,
74
74
* but turned into NOPs by the assembler. */
75
- #define SETUP_FINALLY 255
76
- #define SETUP_CLEANUP 254
77
- #define SETUP_WITH 253
78
- #define POP_BLOCK 252
79
- #define JUMP 251
75
+ #define SETUP_FINALLY -1
76
+ #define SETUP_CLEANUP -2
77
+ #define SETUP_WITH -3
78
+ #define POP_BLOCK -4
79
+ #define JUMP -5
80
+
81
+ #define MIN_VIRTUAL_OPCODE -5
82
+ #define MAX_ALLOWED_OPCODE 254
83
+
84
+ #define IS_WITHIN_OPCODE_RANGE (opcode ) \
85
+ ((opcode) >= MIN_VIRTUAL_OPCODE && (opcode) <= MAX_ALLOWED_OPCODE)
86
+
87
+ #define IS_VIRTUAL_OPCODE (opcode ) ((opcode) < 0)
80
88
81
89
#define IS_TOP_LEVEL_AWAIT (c ) ( \
82
90
(c->c_flags->cf_flags & PyCF_ALLOW_TOP_LEVEL_AWAIT) \
83
91
&& (c->u->u_ste->ste_type == ModuleBlock))
84
92
85
93
struct instr {
86
- unsigned char i_opcode ;
94
+ int i_opcode ;
87
95
int i_oparg ;
88
96
/* target block (if jump instruction) */
89
97
struct basicblock_ * i_target ;
@@ -115,8 +123,13 @@ is_bit_set_in_table(const uint32_t *table, int bitindex) {
115
123
* Word is indexed by (bitindex>>ln(size of int in bits)).
116
124
* Bit within word is the low bits of bitindex.
117
125
*/
118
- uint32_t word = table [bitindex >> LOG_BITS_PER_INT ];
119
- return (word >> (bitindex & MASK_LOW_LOG_BITS )) & 1 ;
126
+ if (bitindex >= 0 && bitindex < 256 ) {
127
+ uint32_t word = table [bitindex >> LOG_BITS_PER_INT ];
128
+ return (word >> (bitindex & MASK_LOW_LOG_BITS )) & 1 ;
129
+ }
130
+ else {
131
+ return 0 ;
132
+ }
120
133
}
121
134
122
135
static inline int
@@ -125,18 +138,25 @@ is_relative_jump(struct instr *i)
125
138
return is_bit_set_in_table (_PyOpcode_RelativeJump , i -> i_opcode );
126
139
}
127
140
141
+ static inline int
142
+ is_block_push (struct instr * instr )
143
+ {
144
+ int opcode = instr -> i_opcode ;
145
+ return opcode == SETUP_FINALLY || opcode == SETUP_WITH || opcode == SETUP_CLEANUP ;
146
+ }
147
+
128
148
static inline int
129
149
is_jump (struct instr * i )
130
150
{
131
- return i -> i_opcode >= SETUP_WITH ||
132
- i -> i_opcode == JUMP ||
151
+ return i -> i_opcode == JUMP ||
133
152
is_bit_set_in_table (_PyOpcode_Jump , i -> i_opcode );
134
153
}
135
154
136
155
static int
137
156
instr_size (struct instr * instruction )
138
157
{
139
158
int opcode = instruction -> i_opcode ;
159
+ assert (!IS_VIRTUAL_OPCODE (opcode ));
140
160
int oparg = HAS_ARG (opcode ) ? instruction -> i_oparg : 0 ;
141
161
int extended_args = (0xFFFFFF < oparg ) + (0xFFFF < oparg ) + (0xFF < oparg );
142
162
int caches = _PyOpcode_Caches [opcode ];
@@ -147,6 +167,7 @@ static void
147
167
write_instr (_Py_CODEUNIT * codestr , struct instr * instruction , int ilen )
148
168
{
149
169
int opcode = instruction -> i_opcode ;
170
+ assert (!IS_VIRTUAL_OPCODE (opcode ));
150
171
int oparg = HAS_ARG (opcode ) ? instruction -> i_oparg : 0 ;
151
172
int caches = _PyOpcode_Caches [opcode ];
152
173
switch (ilen - caches ) {
@@ -1177,6 +1198,7 @@ static int
1177
1198
compiler_addop_line (struct compiler * c , int opcode , int line ,
1178
1199
int end_line , int col_offset , int end_col_offset )
1179
1200
{
1201
+ assert (IS_WITHIN_OPCODE_RANGE (opcode ));
1180
1202
assert (!HAS_ARG (opcode ) || IS_ARTIFICIAL (opcode ));
1181
1203
1182
1204
if (compiler_use_new_implicit_block_if_needed (c ) < 0 ) {
@@ -1419,6 +1441,7 @@ compiler_addop_i_line(struct compiler *c, int opcode, Py_ssize_t oparg,
1419
1441
The argument of a concrete bytecode instruction is limited to 8-bit.
1420
1442
EXTENDED_ARG is used for 16, 24, and 32-bit arguments. */
1421
1443
1444
+ assert (IS_WITHIN_OPCODE_RANGE (opcode ));
1422
1445
assert (HAS_ARG (opcode ));
1423
1446
assert (0 <= oparg && oparg <= 2147483647 );
1424
1447
@@ -1462,7 +1485,8 @@ static int add_jump_to_block(struct compiler *c, int opcode,
1462
1485
int col_offset , int end_col_offset ,
1463
1486
basicblock * target )
1464
1487
{
1465
- assert (HAS_ARG (opcode ));
1488
+ assert (IS_WITHIN_OPCODE_RANGE (opcode ));
1489
+ assert (HAS_ARG (opcode ) || IS_VIRTUAL_OPCODE (opcode ));
1466
1490
assert (target != NULL );
1467
1491
1468
1492
if (compiler_use_new_implicit_block_if_needed (c ) < 0 ) {
@@ -7040,7 +7064,7 @@ stackdepth(struct compiler *c)
7040
7064
maxdepth = new_depth ;
7041
7065
}
7042
7066
assert (depth >= 0 ); /* invalid code or bug in stackdepth() */
7043
- if (is_jump (instr )) {
7067
+ if (is_jump (instr ) || is_block_push ( instr ) ) {
7044
7068
effect = stack_effect (instr -> i_opcode , instr -> i_oparg , 1 );
7045
7069
assert (effect != PY_INVALID_STACK_EFFECT );
7046
7070
int target_depth = depth + effect ;
@@ -7159,13 +7183,6 @@ assemble_emit_table_pair(struct assembler* a, PyObject** table, int* offset,
7159
7183
return 1 ;
7160
7184
}
7161
7185
7162
- static int
7163
- is_block_push (struct instr * instr )
7164
- {
7165
- int opcode = instr -> i_opcode ;
7166
- return opcode == SETUP_FINALLY || opcode == SETUP_WITH || opcode == SETUP_CLEANUP ;
7167
- }
7168
-
7169
7186
static basicblock *
7170
7187
push_except_block (ExceptStack * stack , struct instr * setup ) {
7171
7188
assert (is_block_push (setup ));
@@ -8600,6 +8617,7 @@ apply_static_swaps(basicblock *block, int i)
8600
8617
static bool
8601
8618
jump_thread (struct instr * inst , struct instr * target , int opcode )
8602
8619
{
8620
+ assert (!IS_VIRTUAL_OPCODE (opcode ) || opcode == JUMP );
8603
8621
assert (is_jump (inst ));
8604
8622
assert (is_jump (target ));
8605
8623
// bpo-45773: If inst->i_target == target->i_target, then nothing actually
@@ -8629,7 +8647,7 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
8629
8647
struct instr * inst = & bb -> b_instr [i ];
8630
8648
int oparg = inst -> i_oparg ;
8631
8649
int nextop = i + 1 < bb -> b_iused ? bb -> b_instr [i + 1 ].i_opcode : 0 ;
8632
- if (is_jump (inst )) {
8650
+ if (is_jump (inst ) || is_block_push ( inst ) ) {
8633
8651
/* Skip over empty basic blocks. */
8634
8652
while (inst -> i_target -> b_iused == 0 ) {
8635
8653
inst -> i_target = inst -> i_target -> b_next ;
@@ -8996,8 +9014,9 @@ mark_reachable(struct assembler *a) {
8996
9014
}
8997
9015
for (int i = 0 ; i < b -> b_iused ; i ++ ) {
8998
9016
basicblock * target ;
8999
- if (is_jump (& b -> b_instr [i ])) {
9000
- target = b -> b_instr [i ].i_target ;
9017
+ struct instr * instr = & b -> b_instr [i ];
9018
+ if (is_jump (instr ) || is_block_push (instr )) {
9019
+ target = instr -> i_target ;
9001
9020
if (target -> b_predecessors == 0 ) {
9002
9021
* sp ++ = target ;
9003
9022
}
@@ -9073,13 +9092,6 @@ propagate_line_numbers(struct assembler *a) {
9073
9092
}
9074
9093
}
9075
9094
if (is_jump (& b -> b_instr [b -> b_iused - 1 ])) {
9076
- switch (b -> b_instr [b -> b_iused - 1 ].i_opcode ) {
9077
- /* Note: Only actual jumps, not exception handlers */
9078
- case SETUP_WITH :
9079
- case SETUP_FINALLY :
9080
- case SETUP_CLEANUP :
9081
- continue ;
9082
- }
9083
9095
basicblock * target = b -> b_instr [b -> b_iused - 1 ].i_target ;
9084
9096
if (target -> b_predecessors == 1 ) {
9085
9097
if (target -> b_instr [0 ].i_lineno < 0 ) {
@@ -9205,13 +9217,6 @@ duplicate_exits_without_lineno(struct compiler *c)
9205
9217
*/
9206
9218
for (basicblock * b = c -> u -> u_blocks ; b != NULL ; b = b -> b_list ) {
9207
9219
if (b -> b_iused > 0 && is_jump (& b -> b_instr [b -> b_iused - 1 ])) {
9208
- switch (b -> b_instr [b -> b_iused - 1 ].i_opcode ) {
9209
- /* Note: Only actual jumps, not exception handlers */
9210
- case SETUP_WITH :
9211
- case SETUP_FINALLY :
9212
- case SETUP_CLEANUP :
9213
- continue ;
9214
- }
9215
9220
basicblock * target = b -> b_instr [b -> b_iused - 1 ].i_target ;
9216
9221
if (is_exit_without_lineno (target ) && target -> b_predecessors > 1 ) {
9217
9222
basicblock * new_target = compiler_copy_block (c , target );
0 commit comments