Ruby 3.1.3p185 (2022-11-24 revision 1a6b16756e0ba6b95ab71a441357ed5484e33498)
vm_callinfo.h
1#ifndef RUBY_VM_CALLINFO_H /*-*-C-*-vi:se ft=c:*/
2#define RUBY_VM_CALLINFO_H
11#include "debug_counter.h"
12
13enum vm_call_flag_bits {
14 VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
15 VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
16 VM_CALL_FCALL_bit, /* m(...) */
17 VM_CALL_VCALL_bit, /* m */
18 VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
19 VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
20 VM_CALL_KWARG_bit, /* has kwarg */
21 VM_CALL_KW_SPLAT_bit, /* m(**opts) */
22 VM_CALL_TAILCALL_bit, /* located at tail position */
23 VM_CALL_SUPER_bit, /* super */
24 VM_CALL_ZSUPER_bit, /* zsuper */
25 VM_CALL_OPT_SEND_bit, /* internal flag */
26 VM_CALL_KW_SPLAT_MUT_bit, /* kw splat hash can be modified (to avoid allocating a new one) */
27 VM_CALL__END
28};
29
30#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
31#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
32#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
33#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
34#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
35#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
36#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
37#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
38#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
39#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
40#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
41#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
42#define VM_CALL_KW_SPLAT_MUT (0x01 << VM_CALL_KW_SPLAT_MUT_bit)
43
44struct rb_callinfo_kwarg {
45 int keyword_len;
46 VALUE keywords[];
47};
48
49static inline size_t
50rb_callinfo_kwarg_bytes(int keyword_len)
51{
52 return rb_size_mul_add_or_raise(
53 keyword_len,
54 sizeof(VALUE),
55 sizeof(struct rb_callinfo_kwarg),
56 rb_eRuntimeError);
57}
58
59// imemo_callinfo
60struct rb_callinfo {
61 VALUE flags;
62 const struct rb_callinfo_kwarg *kwarg;
63 VALUE mid;
64 VALUE flag;
65 VALUE argc;
66};
67
68#ifndef USE_EMBED_CI
69#define USE_EMBED_CI 1
70#endif
71
72#if SIZEOF_VALUE == 8
73#define CI_EMBED_TAG_bits 1
74#define CI_EMBED_ARGC_bits 15
75#define CI_EMBED_FLAG_bits 16
76#define CI_EMBED_ID_bits 32
77#elif SIZEOF_VALUE == 4
78#define CI_EMBED_TAG_bits 1
79#define CI_EMBED_ARGC_bits 3
80#define CI_EMBED_FLAG_bits 13
81#define CI_EMBED_ID_bits 15
82#endif
83
84#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
85#error
86#endif
87
88#define CI_EMBED_FLAG 0x01
89#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
90#define CI_EMBED_ARGC_MASK ((((VALUE)1)<<CI_EMBED_ARGC_bits) - 1)
91#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
92#define CI_EMBED_FLAG_MASK ((((VALUE)1)<<CI_EMBED_FLAG_bits) - 1)
93#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
94#define CI_EMBED_ID_MASK ((((VALUE)1)<<CI_EMBED_ID_bits) - 1)
95
96static inline bool
97vm_ci_packed_p(const struct rb_callinfo *ci)
98{
99#if USE_EMBED_CI
100 if (LIKELY(((VALUE)ci) & 0x01)) {
101 return 1;
102 }
103 else {
104 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
105 return 0;
106 }
107#else
108 return 0;
109#endif
110}
111
112static inline bool
113vm_ci_p(const struct rb_callinfo *ci)
114{
115 if (vm_ci_packed_p(ci) || IMEMO_TYPE_P(ci, imemo_callinfo)) {
116 return 1;
117 }
118 else {
119 return 0;
120 }
121}
122
123static inline ID
124vm_ci_mid(const struct rb_callinfo *ci)
125{
126 if (vm_ci_packed_p(ci)) {
127 return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
128 }
129 else {
130 return (ID)ci->mid;
131 }
132}
133
134static inline unsigned int
135vm_ci_flag(const struct rb_callinfo *ci)
136{
137 if (vm_ci_packed_p(ci)) {
138 return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
139 }
140 else {
141 return (unsigned int)ci->flag;
142 }
143}
144
145static inline unsigned int
146vm_ci_argc(const struct rb_callinfo *ci)
147{
148 if (vm_ci_packed_p(ci)) {
149 return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
150 }
151 else {
152 return (unsigned int)ci->argc;
153 }
154}
155
156static inline const struct rb_callinfo_kwarg *
157vm_ci_kwarg(const struct rb_callinfo *ci)
158{
159 if (vm_ci_packed_p(ci)) {
160 return NULL;
161 }
162 else {
163 return ci->kwarg;
164 }
165}
166
167static inline void
168vm_ci_dump(const struct rb_callinfo *ci)
169{
170 if (vm_ci_packed_p(ci)) {
171 ruby_debug_printf("packed_ci ID:%s flag:%x argc:%u\n",
172 rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
173 }
174 else {
175 rp(ci);
176 }
177}
178
179#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
180#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
181
182#/* This is passed to STATIC_ASSERT. Cannot be an inline function. */
183#define VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg) \
184 (((mid ) & ~CI_EMBED_ID_MASK) ? false : \
185 ((flag) & ~CI_EMBED_FLAG_MASK) ? false : \
186 ((argc) & ~CI_EMBED_ARGC_MASK) ? false : \
187 (kwarg) ? false : true)
188
189#define vm_ci_new_id(mid, flag, argc, must_zero) \
190 ((const struct rb_callinfo *) \
191 ((((VALUE)(mid )) << CI_EMBED_ID_SHFT) | \
192 (((VALUE)(flag)) << CI_EMBED_FLAG_SHFT) | \
193 (((VALUE)(argc)) << CI_EMBED_ARGC_SHFT) | \
194 RUBY_FIXNUM_FLAG))
195
196static inline const struct rb_callinfo *
197vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
198{
199#if USE_EMBED_CI
200 if (VM_CI_EMBEDDABLE_P(mid, flag, argc, kwarg)) {
201 RB_DEBUG_COUNTER_INC(ci_packed);
202 return vm_ci_new_id(mid, flag, argc, kwarg);
203 }
204#endif
205
206 const bool debug = 0;
207 if (debug) ruby_debug_printf("%s:%d ", file, line);
208
209 // TODO: dedup
210 const struct rb_callinfo *ci = (const struct rb_callinfo *)
211 rb_imemo_new(imemo_callinfo,
212 (VALUE)mid,
213 (VALUE)flag,
214 (VALUE)argc,
215 (VALUE)kwarg);
216 if (debug) rp(ci);
217 if (kwarg) {
218 RB_DEBUG_COUNTER_INC(ci_kw);
219 }
220 else {
221 RB_DEBUG_COUNTER_INC(ci_nokw);
222 }
223
224 VM_ASSERT(vm_ci_flag(ci) == flag);
225 VM_ASSERT(vm_ci_argc(ci) == argc);
226
227 return ci;
228}
229
230
231static inline const struct rb_callinfo *
232vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
233{
234 RB_DEBUG_COUNTER_INC(ci_runtime);
235 return vm_ci_new_(mid, flag, argc, kwarg, file, line);
236}
237
238#define VM_CALLINFO_NOT_UNDER_GC IMEMO_FL_USER0
239
240static inline bool
241vm_ci_markable(const struct rb_callinfo *ci)
242{
243 if (! ci) {
244 return false; /* or true? This is Qfalse... */
245 }
246 else if (vm_ci_packed_p(ci)) {
247 return true;
248 }
249 else {
250 VM_ASSERT(IMEMO_TYPE_P(ci, imemo_callinfo));
251 return ! FL_ANY_RAW((VALUE)ci, VM_CALLINFO_NOT_UNDER_GC);
252 }
253}
254
255#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
256 (struct rb_callinfo) { \
257 .flags = T_IMEMO | \
258 (imemo_callinfo << FL_USHIFT) | \
259 VM_CALLINFO_NOT_UNDER_GC, \
260 .mid = mid_, \
261 .flag = flags_, \
262 .argc = argc_, \
263 .kwarg = kwarg_, \
264 }
265
266typedef VALUE (*vm_call_handler)(
268 struct rb_control_frame_struct *cfp,
269 struct rb_calling_info *calling);
270
271// imemo_callcache
272
273struct rb_callcache {
274 const VALUE flags;
275
276 /* inline cache: key */
277 const VALUE klass; // should not mark it because klass can not be free'd
278 // because of this marking. When klass is collected,
279 // cc will be cleared (cc->klass = 0) at vm_ccs_free().
280
281 /* inline cache: values */
282 const struct rb_callable_method_entry_struct * const cme_;
283 const vm_call_handler call_;
284
285 union {
286 const unsigned int attr_index;
287 const enum method_missing_reason method_missing_reason; /* used by method_missing */
288 VALUE v;
289 } aux_;
290};
291
292#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
293#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER1
294
295static inline const struct rb_callcache *
296vm_cc_new(VALUE klass,
297 const struct rb_callable_method_entry_struct *cme,
298 vm_call_handler call)
299{
300 const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
301 RB_DEBUG_COUNTER_INC(cc_new);
302 return cc;
303}
304
305#define VM_CC_ON_STACK(clazz, call, aux, cme) \
306 (struct rb_callcache) { \
307 .flags = T_IMEMO | \
308 (imemo_callcache << FL_USHIFT) | \
309 VM_CALLCACHE_UNMARKABLE | \
310 VM_CALLCACHE_ON_STACK, \
311 .klass = clazz, \
312 .cme_ = cme, \
313 .call_ = call, \
314 .aux_ = aux, \
315 }
316
317static inline bool
318vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)
319{
320 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
321 VM_ASSERT(cc->klass == 0 ||
322 RB_TYPE_P(cc->klass, T_CLASS) || RB_TYPE_P(cc->klass, T_ICLASS));
323 return cc->klass == klass;
324}
325
326static inline int
327vm_cc_markable(const struct rb_callcache *cc)
328{
329 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
330 return FL_TEST_RAW((VALUE)cc, VM_CALLCACHE_UNMARKABLE) == 0;
331}
332
333static inline const struct rb_callable_method_entry_struct *
334vm_cc_cme(const struct rb_callcache *cc)
335{
336 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
337 VM_ASSERT(cc->call_ == NULL || // not initialized yet
338 !vm_cc_markable(cc) ||
339 cc->cme_ != NULL);
340
341 return cc->cme_;
342}
343
344static inline vm_call_handler
345vm_cc_call(const struct rb_callcache *cc)
346{
347 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
348 VM_ASSERT(cc->call_ != NULL);
349 return cc->call_;
350}
351
352static inline unsigned int
353vm_cc_attr_index(const struct rb_callcache *cc)
354{
355 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
356 return cc->aux_.attr_index;
357}
358
359static inline unsigned int
360vm_cc_cmethod_missing_reason(const struct rb_callcache *cc)
361{
362 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
363 return cc->aux_.method_missing_reason;
364}
365
366static inline bool
367vm_cc_invalidated_p(const struct rb_callcache *cc)
368{
369 if (cc->klass && !METHOD_ENTRY_INVALIDATED(vm_cc_cme(cc))) {
370 return false;
371 }
372 else {
373 return true;
374 }
375}
376
377// For MJIT. cc_cme is supposed to have inlined `vm_cc_cme(cc)`.
378static inline bool
379vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *cc_cme, VALUE klass)
380{
381 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
382 if (cc->klass == klass && !METHOD_ENTRY_INVALIDATED(cc_cme)) {
383 return 1;
384 }
385 else {
386 return 0;
387 }
388}
389
390extern const struct rb_callcache *rb_vm_empty_cc(void);
391extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
392#define vm_cc_empty() rb_vm_empty_cc()
393
394/* callcache: mutate */
395
396static inline void
397vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
398{
399 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
400 VM_ASSERT(cc != vm_cc_empty());
401 *(vm_call_handler *)&cc->call_ = call;
402}
403
404static inline void
405vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
406{
407 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
408 VM_ASSERT(cc != vm_cc_empty());
409 *(int *)&cc->aux_.attr_index = index;
410}
411
412static inline void
413vm_cc_method_missing_reason_set(const struct rb_callcache *cc, enum method_missing_reason reason)
414{
415 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
416 VM_ASSERT(cc != vm_cc_empty());
417 *(enum method_missing_reason *)&cc->aux_.method_missing_reason = reason;
418}
419
420static inline void
421vm_cc_invalidate(const struct rb_callcache *cc)
422{
423 VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
424 VM_ASSERT(cc != vm_cc_empty());
425 VM_ASSERT(cc->klass != 0); // should be enable
426
427 *(VALUE *)&cc->klass = 0;
428 RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
429}
430
431/* calldata */
432
433struct rb_call_data {
434 const struct rb_callinfo *ci;
435 const struct rb_callcache *cc;
436};
437
438struct rb_class_cc_entries {
439#if VM_CHECK_MODE > 0
440 VALUE debug_sig;
441#endif
442 int capa;
443 int len;
444 const struct rb_callable_method_entry_struct *cme;
445 struct rb_class_cc_entries_entry {
446 const struct rb_callinfo *ci;
447 const struct rb_callcache *cc;
448 } *entries;
449};
450
451#if VM_CHECK_MODE > 0
452
453const rb_callable_method_entry_t *rb_vm_lookup_overloaded_cme(const rb_callable_method_entry_t *cme);
454void rb_vm_dump_overloaded_cme_table(void);
455
456static inline bool
457vm_ccs_p(const struct rb_class_cc_entries *ccs)
458{
459 return ccs->debug_sig == ~(VALUE)ccs;
460}
461
462static inline bool
463vm_cc_check_cme(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme)
464{
465 if (vm_cc_cme(cc) == cme ||
466 (cme->def->iseq_overload && vm_cc_cme(cc) == rb_vm_lookup_overloaded_cme(cme))) {
467 return true;
468 }
469 else {
470#if 1
471 // debug print
472
473 fprintf(stderr, "iseq_overload:%d\n", (int)cme->def->iseq_overload);
474 rp(cme);
475 rp(vm_cc_cme(cc));
476 rb_vm_lookup_overloaded_cme(cme);
477#endif
478 return false;
479 }
480}
481
482#endif
483
484// gc.c
485void rb_vm_ccs_free(struct rb_class_cc_entries *ccs);
486
487#endif /* RUBY_VM_CALLINFO_H */
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:140
#define FL_ANY_RAW
Old name of RB_FL_ANY_RAW.
Definition: fl_type.h:134
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:941
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:375