Ruby 3.1.3p185 (2022-11-24 revision 1a6b16756e0ba6b95ab71a441357ed5484e33498)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#include <signal.h>
23
24#define sighandler_t ruby_sighandler_t
25
26#ifndef _WIN32
27#include <unistd.h>
28#include <sys/mman.h>
29#endif
30
31#include <setjmp.h>
32#include <stdarg.h>
33#include <stdio.h>
34
35/* MALLOC_HEADERS_BEGIN */
36#ifndef HAVE_MALLOC_USABLE_SIZE
37# ifdef _WIN32
38# define HAVE_MALLOC_USABLE_SIZE
39# define malloc_usable_size(a) _msize(a)
40# elif defined HAVE_MALLOC_SIZE
41# define HAVE_MALLOC_USABLE_SIZE
42# define malloc_usable_size(a) malloc_size(a)
43# endif
44#endif
45
46#ifdef HAVE_MALLOC_USABLE_SIZE
47# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
48/* Alternative malloc header is included in ruby/missing.h */
49# elif defined(HAVE_MALLOC_H)
50# include <malloc.h>
51# elif defined(HAVE_MALLOC_NP_H)
52# include <malloc_np.h>
53# elif defined(HAVE_MALLOC_MALLOC_H)
54# include <malloc/malloc.h>
55# endif
56#endif
57
58#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
59/* LIST_HEAD conflicts with sys/queue.h on macOS */
60# include <sys/user.h>
61#endif
62/* MALLOC_HEADERS_END */
63
64#ifdef HAVE_SYS_TIME_H
65# include <sys/time.h>
66#endif
67
68#ifdef HAVE_SYS_RESOURCE_H
69# include <sys/resource.h>
70#endif
71
72#if defined _WIN32 || defined __CYGWIN__
73# include <windows.h>
74#elif defined(HAVE_POSIX_MEMALIGN)
75#elif defined(HAVE_MEMALIGN)
76# include <malloc.h>
77#endif
78
79#include <sys/types.h>
80
81#ifdef __EMSCRIPTEN__
82#include <emscripten.h>
83#endif
84
85#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
86
87#include "constant.h"
88#include "debug_counter.h"
89#include "eval_intern.h"
90#include "gc.h"
91#include "id_table.h"
92#include "internal.h"
93#include "internal/class.h"
94#include "internal/complex.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
112#include "mjit.h"
113#include "probes.h"
114#include "regint.h"
115#include "ruby/debug.h"
116#include "ruby/io.h"
117#include "ruby/re.h"
118#include "ruby/st.h"
119#include "ruby/thread.h"
120#include "ruby/util.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "transient_heap.h"
125#include "vm_core.h"
126#include "vm_sync.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
129
130#include "builtin.h"
131
132#define rb_setjmp(env) RUBY_SETJMP(env)
133#define rb_jmp_buf rb_jmpbuf_t
134#undef rb_data_object_wrap
135
136#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
137#define MAP_ANONYMOUS MAP_ANON
138#endif
139
140static inline struct rbimpl_size_mul_overflow_tag
141size_add_overflow(size_t x, size_t y)
142{
143 size_t z;
144 bool p;
145#if 0
146
147#elif __has_builtin(__builtin_add_overflow)
148 p = __builtin_add_overflow(x, y, &z);
149
150#elif defined(DSIZE_T)
151 RB_GNUC_EXTENSION DSIZE_T dx = x;
152 RB_GNUC_EXTENSION DSIZE_T dy = y;
153 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
154 p = dz > SIZE_MAX;
155 z = (size_t)dz;
156
157#else
158 z = x + y;
159 p = z < y;
160
161#endif
162 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
163}
164
165static inline struct rbimpl_size_mul_overflow_tag
166size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
167{
168 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
169 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
170 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
171}
172
173static inline struct rbimpl_size_mul_overflow_tag
174size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
175{
176 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
177 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
178 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
179 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
180}
181
182PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
183
184static inline size_t
185size_mul_or_raise(size_t x, size_t y, VALUE exc)
186{
187 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188 if (LIKELY(!t.left)) {
189 return t.right;
190 }
191 else if (rb_during_gc()) {
192 rb_memerror(); /* or...? */
193 }
194 else {
195 gc_raise(
196 exc,
197 "integer overflow: %"PRIuSIZE
198 " * %"PRIuSIZE
199 " > %"PRIuSIZE,
200 x, y, (size_t)SIZE_MAX);
201 }
202}
203
204size_t
205rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
206{
207 return size_mul_or_raise(x, y, exc);
208}
209
210static inline size_t
211size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
212{
213 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
214 if (LIKELY(!t.left)) {
215 return t.right;
216 }
217 else if (rb_during_gc()) {
218 rb_memerror(); /* or...? */
219 }
220 else {
221 gc_raise(
222 exc,
223 "integer overflow: %"PRIuSIZE
224 " * %"PRIuSIZE
225 " + %"PRIuSIZE
226 " > %"PRIuSIZE,
227 x, y, z, (size_t)SIZE_MAX);
228 }
229}
230
231size_t
232rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
233{
234 return size_mul_add_or_raise(x, y, z, exc);
235}
236
237static inline size_t
238size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
239{
240 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
241 if (LIKELY(!t.left)) {
242 return t.right;
243 }
244 else if (rb_during_gc()) {
245 rb_memerror(); /* or...? */
246 }
247 else {
248 gc_raise(
249 exc,
250 "integer overflow: %"PRIdSIZE
251 " * %"PRIdSIZE
252 " + %"PRIdSIZE
253 " * %"PRIdSIZE
254 " > %"PRIdSIZE,
255 x, y, z, w, (size_t)SIZE_MAX);
256 }
257}
258
259#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
260/* trick the compiler into thinking a external signal handler uses this */
261volatile VALUE rb_gc_guarded_val;
262volatile VALUE *
263rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
264{
265 rb_gc_guarded_val = val;
266
267 return ptr;
268}
269#endif
270
271#ifndef GC_HEAP_INIT_SLOTS
272#define GC_HEAP_INIT_SLOTS 10000
273#endif
274#ifndef GC_HEAP_FREE_SLOTS
275#define GC_HEAP_FREE_SLOTS 4096
276#endif
277#ifndef GC_HEAP_GROWTH_FACTOR
278#define GC_HEAP_GROWTH_FACTOR 1.8
279#endif
280#ifndef GC_HEAP_GROWTH_MAX_SLOTS
281#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
282#endif
283#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
284#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
285#endif
286
287#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
288#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
289#endif
290#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
291#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
292#endif
293#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
294#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
295#endif
296
297#ifndef GC_MALLOC_LIMIT_MIN
298#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
299#endif
300#ifndef GC_MALLOC_LIMIT_MAX
301#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
302#endif
303#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
304#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
305#endif
306
307#ifndef GC_OLDMALLOC_LIMIT_MIN
308#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
309#endif
310#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
311#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
312#endif
313#ifndef GC_OLDMALLOC_LIMIT_MAX
314#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
315#endif
316
317#ifndef PRINT_MEASURE_LINE
318#define PRINT_MEASURE_LINE 0
319#endif
320#ifndef PRINT_ENTER_EXIT_TICK
321#define PRINT_ENTER_EXIT_TICK 0
322#endif
323#ifndef PRINT_ROOT_TICKS
324#define PRINT_ROOT_TICKS 0
325#endif
326
327#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
328#define TICK_TYPE 1
329
330typedef struct {
331 size_t heap_init_slots;
332 size_t heap_free_slots;
333 double growth_factor;
334 size_t growth_max_slots;
335
336 double heap_free_slots_min_ratio;
337 double heap_free_slots_goal_ratio;
338 double heap_free_slots_max_ratio;
339 double oldobject_limit_factor;
340
341 size_t malloc_limit_min;
342 size_t malloc_limit_max;
343 double malloc_limit_growth_factor;
344
345 size_t oldmalloc_limit_min;
346 size_t oldmalloc_limit_max;
347 double oldmalloc_limit_growth_factor;
348
349 VALUE gc_stress;
351
352static ruby_gc_params_t gc_params = {
353 GC_HEAP_INIT_SLOTS,
354 GC_HEAP_FREE_SLOTS,
355 GC_HEAP_GROWTH_FACTOR,
356 GC_HEAP_GROWTH_MAX_SLOTS,
357
358 GC_HEAP_FREE_SLOTS_MIN_RATIO,
359 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
360 GC_HEAP_FREE_SLOTS_MAX_RATIO,
361 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
362
363 GC_MALLOC_LIMIT_MIN,
364 GC_MALLOC_LIMIT_MAX,
365 GC_MALLOC_LIMIT_GROWTH_FACTOR,
366
367 GC_OLDMALLOC_LIMIT_MIN,
368 GC_OLDMALLOC_LIMIT_MAX,
369 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
370
371 FALSE,
372};
373
374/* GC_DEBUG:
375 * enable to embed GC debugging information.
376 */
377#ifndef GC_DEBUG
378#define GC_DEBUG 0
379#endif
380
381/* RGENGC_DEBUG:
382 * 1: basic information
383 * 2: remember set operation
384 * 3: mark
385 * 4:
386 * 5: sweep
387 */
388#ifndef RGENGC_DEBUG
389#ifdef RUBY_DEVEL
390#define RGENGC_DEBUG -1
391#else
392#define RGENGC_DEBUG 0
393#endif
394#endif
395#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
396# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
397#elif defined(HAVE_VA_ARGS_MACRO)
398# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
399#else
400# define RGENGC_DEBUG_ENABLED(level) 0
401#endif
402int ruby_rgengc_debug;
403
404/* RGENGC_CHECK_MODE
405 * 0: disable all assertions
406 * 1: enable assertions (to debug RGenGC)
407 * 2: enable internal consistency check at each GC (for debugging)
408 * 3: enable internal consistency check at each GC steps (for debugging)
409 * 4: enable liveness check
410 * 5: show all references
411 */
412#ifndef RGENGC_CHECK_MODE
413#define RGENGC_CHECK_MODE 0
414#endif
415
416// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
417#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
418
419/* RGENGC_OLD_NEWOBJ_CHECK
420 * 0: disable all assertions
421 * >0: make a OLD object when new object creation.
422 *
423 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
424 */
425#ifndef RGENGC_OLD_NEWOBJ_CHECK
426#define RGENGC_OLD_NEWOBJ_CHECK 0
427#endif
428
429/* RGENGC_PROFILE
430 * 0: disable RGenGC profiling
431 * 1: enable profiling for basic information
432 * 2: enable profiling for each types
433 */
434#ifndef RGENGC_PROFILE
435#define RGENGC_PROFILE 0
436#endif
437
438/* RGENGC_ESTIMATE_OLDMALLOC
439 * Enable/disable to estimate increase size of malloc'ed size by old objects.
440 * If estimation exceeds threshold, then will invoke full GC.
441 * 0: disable estimation.
442 * 1: enable estimation.
443 */
444#ifndef RGENGC_ESTIMATE_OLDMALLOC
445#define RGENGC_ESTIMATE_OLDMALLOC 1
446#endif
447
448/* RGENGC_FORCE_MAJOR_GC
449 * Force major/full GC if this macro is not 0.
450 */
451#ifndef RGENGC_FORCE_MAJOR_GC
452#define RGENGC_FORCE_MAJOR_GC 0
453#endif
454
455#ifndef GC_PROFILE_MORE_DETAIL
456#define GC_PROFILE_MORE_DETAIL 0
457#endif
458#ifndef GC_PROFILE_DETAIL_MEMORY
459#define GC_PROFILE_DETAIL_MEMORY 0
460#endif
461#ifndef GC_ENABLE_INCREMENTAL_MARK
462#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
463#endif
464#ifndef GC_ENABLE_LAZY_SWEEP
465#define GC_ENABLE_LAZY_SWEEP 1
466#endif
467#ifndef CALC_EXACT_MALLOC_SIZE
468#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
469#endif
470#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
471#ifndef MALLOC_ALLOCATED_SIZE
472#define MALLOC_ALLOCATED_SIZE 0
473#endif
474#else
475#define MALLOC_ALLOCATED_SIZE 0
476#endif
477#ifndef MALLOC_ALLOCATED_SIZE_CHECK
478#define MALLOC_ALLOCATED_SIZE_CHECK 0
479#endif
480
481#ifndef GC_DEBUG_STRESS_TO_CLASS
482#define GC_DEBUG_STRESS_TO_CLASS 0
483#endif
484
485#ifndef RGENGC_OBJ_INFO
486#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
487#endif
488
489typedef enum {
490 GPR_FLAG_NONE = 0x000,
491 /* major reason */
492 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
493 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
494 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
495 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
496#if RGENGC_ESTIMATE_OLDMALLOC
497 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
498#endif
499 GPR_FLAG_MAJOR_MASK = 0x0ff,
500
501 /* gc reason */
502 GPR_FLAG_NEWOBJ = 0x100,
503 GPR_FLAG_MALLOC = 0x200,
504 GPR_FLAG_METHOD = 0x400,
505 GPR_FLAG_CAPI = 0x800,
506 GPR_FLAG_STRESS = 0x1000,
507
508 /* others */
509 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
510 GPR_FLAG_HAVE_FINALIZE = 0x4000,
511 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
512 GPR_FLAG_FULL_MARK = 0x10000,
513 GPR_FLAG_COMPACT = 0x20000,
514
515 GPR_DEFAULT_REASON =
516 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
517 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
518} gc_profile_record_flag;
519
520typedef struct gc_profile_record {
521 unsigned int flags;
522
523 double gc_time;
524 double gc_invoke_time;
525
526 size_t heap_total_objects;
527 size_t heap_use_size;
528 size_t heap_total_size;
529 size_t moved_objects;
530
531#if GC_PROFILE_MORE_DETAIL
532 double gc_mark_time;
533 double gc_sweep_time;
534
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
538
539 size_t allocate_increase;
540 size_t allocate_limit;
541
542 double prepare_time;
543 size_t removing_objects;
544 size_t empty_objects;
545#if GC_PROFILE_DETAIL_MEMORY
546 long maxrss;
547 long minflt;
548 long majflt;
549#endif
550#endif
551#if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
553#endif
554
555#if RGENGC_PROFILE > 0
556 size_t old_objects;
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
559#endif
561
562#define FL_FROM_FREELIST FL_USER0
563
564struct RMoved {
565 VALUE flags;
566 VALUE dummy;
567 VALUE destination;
568};
569
570#define RMOVED(obj) ((struct RMoved *)(obj))
571
572typedef struct RVALUE {
573 union {
574 struct {
575 VALUE flags; /* always 0 for freed obj */
576 struct RVALUE *next;
577 } free;
578 struct RMoved moved;
579 struct RBasic basic;
580 struct RObject object;
581 struct RClass klass;
582 struct RFloat flonum;
583 struct RString string;
584 struct RArray array;
585 struct RRegexp regexp;
586 struct RHash hash;
587 struct RData data;
588 struct RTypedData typeddata;
589 struct RStruct rstruct;
590 struct RBignum bignum;
591 struct RFile file;
592 struct RMatch match;
593 struct RRational rational;
594 struct RComplex complex;
595 struct RSymbol symbol;
596 union {
597 rb_cref_t cref;
598 struct vm_svar svar;
599 struct vm_throw_data throw_data;
600 struct vm_ifunc ifunc;
601 struct MEMO memo;
602 struct rb_method_entry_struct ment;
603 const rb_iseq_t iseq;
604 rb_env_t env;
605 struct rb_imemo_tmpbuf_struct alloc;
606 rb_ast_t ast;
607 } imemo;
608 struct {
609 struct RBasic basic;
610 VALUE v1;
611 VALUE v2;
612 VALUE v3;
613 } values;
614 } as;
615#if GC_DEBUG
616 const char *file;
617 int line;
618#endif
619} RVALUE;
620
621#if GC_DEBUG
622STATIC_ASSERT(sizeof_rvalue, offsetof(RVALUE, file) == SIZEOF_VALUE * 5);
623#else
624STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == SIZEOF_VALUE * 5);
625#endif
626STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
627
628typedef uintptr_t bits_t;
629enum {
630 BITS_SIZE = sizeof(bits_t),
631 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
632};
633#define popcount_bits rb_popcount_intptr
634
636 struct heap_page *page;
637};
638
640 struct heap_page_header header;
641 /* char gap[]; */
642 /* RVALUE values[]; */
643};
644
645struct gc_list {
646 VALUE *varptr;
647 struct gc_list *next;
648};
649
650#define STACK_CHUNK_SIZE 500
651
652typedef struct stack_chunk {
653 VALUE data[STACK_CHUNK_SIZE];
654 struct stack_chunk *next;
656
657typedef struct mark_stack {
658 stack_chunk_t *chunk;
659 stack_chunk_t *cache;
660 int index;
661 int limit;
662 size_t cache_size;
663 size_t unused_cache_size;
665
666#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
667#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
668
669typedef struct rb_heap_struct {
670 struct heap_page *free_pages;
671 struct list_head pages;
672 struct heap_page *sweeping_page; /* iterator for .pages */
673 struct heap_page *compact_cursor;
674 RVALUE * compact_cursor_index;
675#if GC_ENABLE_INCREMENTAL_MARK
676 struct heap_page *pooled_pages;
677#endif
678 size_t total_pages; /* total page count in a heap */
679 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
680} rb_heap_t;
681
682typedef struct rb_size_pool_struct {
683 short slot_size;
684
685 size_t allocatable_pages;
686
687#if USE_RVARGC
688 /* Sweeping statistics */
689 size_t freed_slots;
690 size_t empty_slots;
691
692 /* Global statistics */
693 size_t force_major_gc_count;
694#endif
695
696 rb_heap_t eden_heap;
697 rb_heap_t tomb_heap;
699
700enum gc_mode {
701 gc_mode_none,
702 gc_mode_marking,
703 gc_mode_sweeping
704};
705
706typedef struct rb_objspace {
707 struct {
708 size_t limit;
709 size_t increase;
710#if MALLOC_ALLOCATED_SIZE
711 size_t allocated_size;
712 size_t allocations;
713#endif
714 } malloc_params;
715
716 struct {
717 unsigned int mode : 2;
718 unsigned int immediate_sweep : 1;
719 unsigned int dont_gc : 1;
720 unsigned int dont_incremental : 1;
721 unsigned int during_gc : 1;
722 unsigned int during_compacting : 1;
723 unsigned int gc_stressful: 1;
724 unsigned int has_hook: 1;
725 unsigned int during_minor_gc : 1;
726#if GC_ENABLE_INCREMENTAL_MARK
727 unsigned int during_incremental_marking : 1;
728#endif
729 unsigned int measure_gc : 1;
730 } flags;
731
732 rb_event_flag_t hook_events;
733 size_t total_allocated_objects;
734 VALUE next_object_id;
735
736 rb_size_pool_t size_pools[SIZE_POOL_COUNT];
737
738 struct {
739 rb_atomic_t finalizing;
740 } atomic_flags;
741
743 size_t marked_slots;
744
745 struct {
746 struct heap_page **sorted;
747 size_t allocated_pages;
748 size_t allocatable_pages;
749 size_t sorted_length;
750 RVALUE *range[2];
751 size_t freeable_pages;
752
753 /* final */
754 size_t final_slots;
755 VALUE deferred_final;
756 } heap_pages;
757
758 st_table *finalizer_table;
759
760 struct {
761 int run;
762 unsigned int latest_gc_info;
763 gc_profile_record *records;
764 gc_profile_record *current_record;
765 size_t next_index;
766 size_t size;
767
768#if GC_PROFILE_MORE_DETAIL
769 double prepare_time;
770#endif
771 double invoke_time;
772
773 size_t minor_gc_count;
774 size_t major_gc_count;
775 size_t compact_count;
776 size_t read_barrier_faults;
777#if RGENGC_PROFILE > 0
778 size_t total_generated_normal_object_count;
779 size_t total_generated_shady_object_count;
780 size_t total_shade_operation_count;
781 size_t total_promoted_count;
782 size_t total_remembered_normal_object_count;
783 size_t total_remembered_shady_object_count;
784
785#if RGENGC_PROFILE >= 2
786 size_t generated_normal_object_count_types[RUBY_T_MASK];
787 size_t generated_shady_object_count_types[RUBY_T_MASK];
788 size_t shade_operation_count_types[RUBY_T_MASK];
789 size_t promoted_types[RUBY_T_MASK];
790 size_t remembered_normal_object_count_types[RUBY_T_MASK];
791 size_t remembered_shady_object_count_types[RUBY_T_MASK];
792#endif
793#endif /* RGENGC_PROFILE */
794
795 /* temporary profiling space */
796 double gc_sweep_start_time;
797 size_t total_allocated_objects_at_gc_start;
798 size_t heap_used_at_gc_start;
799
800 /* basic statistics */
801 size_t count;
802 size_t total_freed_objects;
803 size_t total_allocated_pages;
804 size_t total_freed_pages;
805 uint64_t total_time_ns;
806 struct timespec start_time;
807 } profile;
808 struct gc_list *global_list;
809
810 VALUE gc_stress_mode;
811
812 struct {
813 VALUE parent_object;
814 int need_major_gc;
815 size_t last_major_gc;
816 size_t uncollectible_wb_unprotected_objects;
817 size_t uncollectible_wb_unprotected_objects_limit;
818 size_t old_objects;
819 size_t old_objects_limit;
820
821#if RGENGC_ESTIMATE_OLDMALLOC
822 size_t oldmalloc_increase;
823 size_t oldmalloc_increase_limit;
824#endif
825
826#if RGENGC_CHECK_MODE >= 2
827 struct st_table *allrefs_table;
828 size_t error_count;
829#endif
830 } rgengc;
831
832 struct {
833 size_t considered_count_table[T_MASK];
834 size_t moved_count_table[T_MASK];
835 size_t total_moved;
836 } rcompactor;
837
838#if GC_ENABLE_INCREMENTAL_MARK
839 struct {
840 size_t pooled_slots;
841 size_t step_slots;
842 } rincgc;
843#endif
844
845 st_table *id_to_obj_tbl;
846 st_table *obj_to_id_tbl;
847
848#if GC_DEBUG_STRESS_TO_CLASS
849 VALUE stress_to_class;
850#endif
852
853
854/* default tiny heap size: 16KB */
855#define HEAP_PAGE_ALIGN_LOG 14
856#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
857enum {
858 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
859 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
860 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
861 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
862 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH),
863 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
864};
865#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
866#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
867
868#ifdef HAVE_MMAP
869# if HAVE_CONST_PAGE_SIZE
870/* If we have the HEAP_PAGE and it is a constant, then we can directly use it. */
871static const bool USE_MMAP_ALIGNED_ALLOC = (PAGE_SIZE <= HEAP_PAGE_SIZE);
872# elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
873/* PAGE_SIZE <= HEAP_PAGE_SIZE */
874static const bool USE_MMAP_ALIGNED_ALLOC = true;
875# else
876/* Otherwise, fall back to determining if we can use mmap during runtime. */
877# define USE_MMAP_ALIGNED_ALLOC (use_mmap_aligned_alloc != false)
878
879static bool use_mmap_aligned_alloc;
880# endif
881#elif !defined(__MINGW32__) && !defined(_WIN32)
882static const bool USE_MMAP_ALIGNED_ALLOC = false;
883#endif
884
885struct heap_page {
886 short slot_size;
887 short total_slots;
888 short free_slots;
889 short pinned_slots;
890 short final_slots;
891 struct {
892 unsigned int before_sweep : 1;
893 unsigned int has_remembered_objects : 1;
894 unsigned int has_uncollectible_shady_objects : 1;
895 unsigned int in_tomb : 1;
896 } flags;
897
898 rb_size_pool_t *size_pool;
899
900 struct heap_page *free_next;
901 RVALUE *start;
902 RVALUE *freelist;
903 struct list_node page_node;
904
905 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
906 /* the following three bitmaps are cleared at the beginning of full GC */
907 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
908 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
909 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
910
911 /* If set, the object is not movable */
912 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
913};
914
915#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
916#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
917#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
918
919#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
920#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
921#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
922#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
923
924/* Bitmap Operations */
925#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
926#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
927#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
928
929/* getting bitmap */
930#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
931#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
932#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
933#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
934#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
935
936/* Aliases */
937#define rb_objspace (*rb_objspace_of(GET_VM()))
938#define rb_objspace_of(vm) ((vm)->objspace)
939
940#define ruby_initial_gc_stress gc_params.gc_stress
941
942VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
943
944#define malloc_limit objspace->malloc_params.limit
945#define malloc_increase objspace->malloc_params.increase
946#define malloc_allocated_size objspace->malloc_params.allocated_size
947#define heap_pages_sorted objspace->heap_pages.sorted
948#define heap_allocated_pages objspace->heap_pages.allocated_pages
949#define heap_pages_sorted_length objspace->heap_pages.sorted_length
950#define heap_pages_lomem objspace->heap_pages.range[0]
951#define heap_pages_himem objspace->heap_pages.range[1]
952#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
953#define heap_pages_final_slots objspace->heap_pages.final_slots
954#define heap_pages_deferred_final objspace->heap_pages.deferred_final
955#define size_pools objspace->size_pools
956#define during_gc objspace->flags.during_gc
957#define finalizing objspace->atomic_flags.finalizing
958#define finalizer_table objspace->finalizer_table
959#define global_list objspace->global_list
960#define ruby_gc_stressful objspace->flags.gc_stressful
961#define ruby_gc_stress_mode objspace->gc_stress_mode
962#if GC_DEBUG_STRESS_TO_CLASS
963#define stress_to_class objspace->stress_to_class
964#else
965#define stress_to_class 0
966#endif
967
968#if 0
969#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
970#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
971#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
972#define dont_gc_val() (objspace->flags.dont_gc)
973#else
974#define dont_gc_on() (objspace->flags.dont_gc = 1)
975#define dont_gc_off() (objspace->flags.dont_gc = 0)
976#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
977#define dont_gc_val() (objspace->flags.dont_gc)
978#endif
979
980static inline enum gc_mode
981gc_mode_verify(enum gc_mode mode)
982{
983#if RGENGC_CHECK_MODE > 0
984 switch (mode) {
985 case gc_mode_none:
986 case gc_mode_marking:
987 case gc_mode_sweeping:
988 break;
989 default:
990 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
991 }
992#endif
993 return mode;
994}
995
996static inline bool
997has_sweeping_pages(rb_objspace_t *objspace)
998{
999 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1000 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1001 return TRUE;
1002 }
1003 }
1004 return FALSE;
1005}
1006
1007static inline size_t
1008heap_eden_total_pages(rb_objspace_t *objspace)
1009{
1010 size_t count = 0;
1011 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1012 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1013 }
1014 return count;
1015}
1016
1017static inline size_t
1018heap_eden_total_slots(rb_objspace_t *objspace)
1019{
1020 size_t count = 0;
1021 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1022 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1023 }
1024 return count;
1025}
1026
1027static inline size_t
1028heap_tomb_total_pages(rb_objspace_t *objspace)
1029{
1030 size_t count = 0;
1031 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1032 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1033 }
1034 return count;
1035}
1036
1037static inline size_t
1038heap_allocatable_pages(rb_objspace_t *objspace)
1039{
1040 size_t count = 0;
1041 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1042 count += size_pools[i].allocatable_pages;
1043 }
1044 return count;
1045}
1046
1047static inline size_t
1048heap_allocatable_slots(rb_objspace_t *objspace)
1049{
1050 size_t count = 0;
1051 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1052 rb_size_pool_t *size_pool = &size_pools[i];
1053 int slot_size_multiple = size_pool->slot_size / sizeof(RVALUE);
1054 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1055 }
1056 return count;
1057}
1058
1059#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1060#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1061
1062#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1063#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1064#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1065#if GC_ENABLE_INCREMENTAL_MARK
1066#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1067#else
1068#define is_incremental_marking(objspace) FALSE
1069#endif
1070#if GC_ENABLE_INCREMENTAL_MARK
1071#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1072#else
1073#define will_be_incremental_marking(objspace) FALSE
1074#endif
1075#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1076
1077#if SIZEOF_LONG == SIZEOF_VOIDP
1078# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1079# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1080#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1081# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1082# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1083 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1084#else
1085# error not supported
1086#endif
1087
1088#define RANY(o) ((RVALUE*)(o))
1089
1090struct RZombie {
1091 struct RBasic basic;
1092 VALUE next;
1093 void (*dfree)(void *);
1094 void *data;
1095};
1096
1097#define RZOMBIE(o) ((struct RZombie *)(o))
1098
1099#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1100
1101#if RUBY_MARK_FREE_DEBUG
1102int ruby_gc_debug_indent = 0;
1103#endif
1105int ruby_disable_gc = 0;
1106int ruby_enable_autocompact = 0;
1107
1108void rb_iseq_mark(const rb_iseq_t *iseq);
1109void rb_iseq_update_references(rb_iseq_t *iseq);
1110void rb_iseq_free(const rb_iseq_t *iseq);
1111size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1112void rb_vm_update_references(void *ptr);
1113
1114void rb_gcdebug_print_obj_condition(VALUE obj);
1115
1116static VALUE define_final0(VALUE obj, VALUE block);
1117
1118NORETURN(static void *gc_vraise(void *ptr));
1119NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
1120NORETURN(static void negative_size_allocation_error(const char *));
1121
1122static void init_mark_stack(mark_stack_t *stack);
1123
1124static int ready_to_gc(rb_objspace_t *objspace);
1125
1126static int garbage_collect(rb_objspace_t *, unsigned int reason);
1127
1128static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1129static void gc_rest(rb_objspace_t *objspace);
1130
1131enum gc_enter_event {
1132 gc_enter_event_start,
1133 gc_enter_event_mark_continue,
1134 gc_enter_event_sweep_continue,
1135 gc_enter_event_rest,
1136 gc_enter_event_finalizer,
1137 gc_enter_event_rb_memerror,
1138};
1139
1140static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1141static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1142
1143static void gc_marks(rb_objspace_t *objspace, int full_mark);
1144static void gc_marks_start(rb_objspace_t *objspace, int full);
1145static int gc_marks_finish(rb_objspace_t *objspace);
1146static void gc_marks_rest(rb_objspace_t *objspace);
1147static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1148
1149static void gc_sweep(rb_objspace_t *objspace);
1150static void gc_sweep_start(rb_objspace_t *objspace);
1151static void gc_sweep_finish(rb_objspace_t *objspace);
1152static int gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1153static void gc_sweep_rest(rb_objspace_t *objspace);
1154static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1155
1156static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1157static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1158static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1159static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1160NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1161static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1162
1163static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1164static int gc_mark_stacked_objects_all(rb_objspace_t *);
1165static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1166
1167static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1168NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1169
1170static void push_mark_stack(mark_stack_t *, VALUE);
1171static int pop_mark_stack(mark_stack_t *, VALUE *);
1172static size_t mark_stack_size(mark_stack_t *stack);
1173static void shrink_stack_chunk_cache(mark_stack_t *stack);
1174
1175static size_t obj_memsize_of(VALUE obj, int use_all_types);
1176static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1177static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1178static int gc_verify_heap_pages(rb_objspace_t *objspace);
1179
1180static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1181static VALUE gc_disable_no_rest(rb_objspace_t *);
1182
1183static double getrusage_time(void);
1184static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1185static inline void gc_prof_timer_start(rb_objspace_t *);
1186static inline void gc_prof_timer_stop(rb_objspace_t *);
1187static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1188static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1189static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1190static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1191static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1192static inline void gc_prof_set_heap_info(rb_objspace_t *);
1193
1194#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1195 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1196 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1197 } \
1198} while (0)
1199
1200#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1201
1202#define gc_prof_record(objspace) (objspace)->profile.current_record
1203#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1204
1205#ifdef HAVE_VA_ARGS_MACRO
1206# define gc_report(level, objspace, ...) \
1207 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1208#else
1209# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1210#endif
1211PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1212static const char *obj_info(VALUE obj);
1213static const char *obj_type_name(VALUE obj);
1214
1215/*
1216 * 1 - TSC (H/W Time Stamp Counter)
1217 * 2 - getrusage
1218 */
1219#ifndef TICK_TYPE
1220#define TICK_TYPE 1
1221#endif
1222
1223#if USE_TICK_T
1224
1225#if TICK_TYPE == 1
1226/* the following code is only for internal tuning. */
1227
1228/* Source code to use RDTSC is quoted and modified from
1229 * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1230 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1231 */
1232
1233#if defined(__GNUC__) && defined(__i386__)
1234typedef unsigned long long tick_t;
1235#define PRItick "llu"
1236static inline tick_t
1237tick(void)
1238{
1239 unsigned long long int x;
1240 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1241 return x;
1242}
1243
1244#elif defined(__GNUC__) && defined(__x86_64__)
1245typedef unsigned long long tick_t;
1246#define PRItick "llu"
1247
1248static __inline__ tick_t
1249tick(void)
1250{
1251 unsigned long hi, lo;
1252 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1253 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1254}
1255
1256#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1257typedef unsigned long long tick_t;
1258#define PRItick "llu"
1259
1260static __inline__ tick_t
1261tick(void)
1262{
1263 unsigned long long val = __builtin_ppc_get_timebase();
1264 return val;
1265}
1266
1267#elif defined(__aarch64__) && defined(__GNUC__)
1268typedef unsigned long tick_t;
1269#define PRItick "lu"
1270
1271static __inline__ tick_t
1272tick(void)
1273{
1274 unsigned long val;
1275 __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1276 return val;
1277}
1278
1279
1280#elif defined(_WIN32) && defined(_MSC_VER)
1281#include <intrin.h>
1282typedef unsigned __int64 tick_t;
1283#define PRItick "llu"
1284
1285static inline tick_t
1286tick(void)
1287{
1288 return __rdtsc();
1289}
1290
1291#else /* use clock */
1292typedef clock_t tick_t;
1293#define PRItick "llu"
1294
1295static inline tick_t
1296tick(void)
1297{
1298 return clock();
1299}
1300#endif /* TSC */
1301
1302#elif TICK_TYPE == 2
1303typedef double tick_t;
1304#define PRItick "4.9f"
1305
1306static inline tick_t
1307tick(void)
1308{
1309 return getrusage_time();
1310}
1311#else /* TICK_TYPE */
1312#error "choose tick type"
1313#endif /* TICK_TYPE */
1314
1315#define MEASURE_LINE(expr) do { \
1316 volatile tick_t start_time = tick(); \
1317 volatile tick_t end_time; \
1318 expr; \
1319 end_time = tick(); \
1320 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1321} while (0)
1322
1323#else /* USE_TICK_T */
1324#define MEASURE_LINE(expr) expr
1325#endif /* USE_TICK_T */
1326
1327static inline void *
1328asan_unpoison_object_temporary(VALUE obj)
1329{
1330 void *ptr = asan_poisoned_object_p(obj);
1331 asan_unpoison_object(obj, false);
1332 return ptr;
1333}
1334
1335#define FL_CHECK2(name, x, pred) \
1336 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1337 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1338#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1339#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1340#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1341
1342#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1343#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1344#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1345
1346#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1347#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1348#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1349
1350#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1351#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1352#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1353
1354#define RVALUE_OLD_AGE 3
1355#define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1356
1357static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1358static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1359static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1360static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1361static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1362
1363static inline int
1364RVALUE_FLAGS_AGE(VALUE flags)
1365{
1366 return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1367}
1368
1369static int
1370check_rvalue_consistency_force(const VALUE obj, int terminate)
1371{
1372 int err = 0;
1373 rb_objspace_t *objspace = &rb_objspace;
1374
1375 RB_VM_LOCK_ENTER_NO_BARRIER();
1376 {
1377 if (SPECIAL_CONST_P(obj)) {
1378 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1379 err++;
1380 }
1381 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1382 /* check if it is in tomb_pages */
1383 struct heap_page *page = NULL;
1384 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1385 rb_size_pool_t *size_pool = &size_pools[i];
1386 list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1387 if (&page->start[0] <= (RVALUE *)obj &&
1388 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * size_pool->slot_size))) {
1389 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1390 (void *)obj, (void *)page);
1391 err++;
1392 goto skip;
1393 }
1394 }
1395 }
1396 bp();
1397 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1398 err++;
1399 skip:
1400 ;
1401 }
1402 else {
1403 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1404 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1405 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1406 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1407 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1408
1409 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1410 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1411 err++;
1412 }
1413 if (BUILTIN_TYPE(obj) == T_NONE) {
1414 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1415 err++;
1416 }
1417 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1418 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1419 err++;
1420 }
1421
1422 obj_memsize_of((VALUE)obj, FALSE);
1423
1424 /* check generation
1425 *
1426 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1427 */
1428 if (age > 0 && wb_unprotected_bit) {
1429 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1430 err++;
1431 }
1432
1433 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1434 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1435 err++;
1436 }
1437
1438 if (!is_full_marking(objspace)) {
1439 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1440 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1441 obj_info(obj), age);
1442 err++;
1443 }
1444 if (remembered_bit && age != RVALUE_OLD_AGE) {
1445 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1446 obj_info(obj), age);
1447 err++;
1448 }
1449 }
1450
1451 /*
1452 * check coloring
1453 *
1454 * marking:false marking:true
1455 * marked:false white *invalid*
1456 * marked:true black grey
1457 */
1458 if (is_incremental_marking(objspace) && marking_bit) {
1459 if (!is_marking(objspace) && !mark_bit) {
1460 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1461 err++;
1462 }
1463 }
1464 }
1465 }
1466 RB_VM_LOCK_LEAVE_NO_BARRIER();
1467
1468 if (err > 0 && terminate) {
1469 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1470 }
1471 return err;
1472}
1473
1474#if RGENGC_CHECK_MODE == 0
1475static inline VALUE
1476check_rvalue_consistency(const VALUE obj)
1477{
1478 return obj;
1479}
1480#else
1481static VALUE
1482check_rvalue_consistency(const VALUE obj)
1483{
1484 check_rvalue_consistency_force(obj, TRUE);
1485 return obj;
1486}
1487#endif
1488
1489static inline int
1490gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1491{
1492 if (RB_SPECIAL_CONST_P(obj)) {
1493 return FALSE;
1494 }
1495 else {
1496 void *poisoned = asan_poisoned_object_p(obj);
1497 asan_unpoison_object(obj, false);
1498
1499 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1500 /* Re-poison slot if it's not the one we want */
1501 if (poisoned) {
1502 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1503 asan_poison_object(obj);
1504 }
1505 return ret;
1506 }
1507}
1508
1509static inline int
1510RVALUE_MARKED(VALUE obj)
1511{
1512 check_rvalue_consistency(obj);
1513 return RVALUE_MARK_BITMAP(obj) != 0;
1514}
1515
1516static inline int
1517RVALUE_PINNED(VALUE obj)
1518{
1519 check_rvalue_consistency(obj);
1520 return RVALUE_PIN_BITMAP(obj) != 0;
1521}
1522
1523static inline int
1524RVALUE_WB_UNPROTECTED(VALUE obj)
1525{
1526 check_rvalue_consistency(obj);
1527 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1528}
1529
1530static inline int
1531RVALUE_MARKING(VALUE obj)
1532{
1533 check_rvalue_consistency(obj);
1534 return RVALUE_MARKING_BITMAP(obj) != 0;
1535}
1536
1537static inline int
1538RVALUE_REMEMBERED(VALUE obj)
1539{
1540 check_rvalue_consistency(obj);
1541 return RVALUE_MARKING_BITMAP(obj) != 0;
1542}
1543
1544static inline int
1545RVALUE_UNCOLLECTIBLE(VALUE obj)
1546{
1547 check_rvalue_consistency(obj);
1548 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1549}
1550
1551static inline int
1552RVALUE_OLD_P_RAW(VALUE obj)
1553{
1554 const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1555 return (RBASIC(obj)->flags & promoted) == promoted;
1556}
1557
1558static inline int
1559RVALUE_OLD_P(VALUE obj)
1560{
1561 check_rvalue_consistency(obj);
1562 return RVALUE_OLD_P_RAW(obj);
1563}
1564
1565#if RGENGC_CHECK_MODE || GC_DEBUG
1566static inline int
1567RVALUE_AGE(VALUE obj)
1568{
1569 check_rvalue_consistency(obj);
1570 return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1571}
1572#endif
1573
1574static inline void
1575RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1576{
1577 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1578 objspace->rgengc.old_objects++;
1579 rb_transient_heap_promote(obj);
1580
1581#if RGENGC_PROFILE >= 2
1582 objspace->profile.total_promoted_count++;
1583 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1584#endif
1585}
1586
1587static inline void
1588RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1589{
1590 RB_DEBUG_COUNTER_INC(obj_promote);
1591 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1592}
1593
1594static inline VALUE
1595RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1596{
1597 flags &= ~(FL_PROMOTED0 | FL_PROMOTED1);
1598 flags |= (age << RVALUE_AGE_SHIFT);
1599 return flags;
1600}
1601
1602/* set age to age+1 */
1603static inline void
1604RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1605{
1606 VALUE flags = RBASIC(obj)->flags;
1607 int age = RVALUE_FLAGS_AGE(flags);
1608
1609 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1610 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1611 }
1612
1613 age++;
1614 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1615
1616 if (age == RVALUE_OLD_AGE) {
1617 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1618 }
1619 check_rvalue_consistency(obj);
1620}
1621
1622/* set age to RVALUE_OLD_AGE */
1623static inline void
1624RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1625{
1626 check_rvalue_consistency(obj);
1627 GC_ASSERT(!RVALUE_OLD_P(obj));
1628
1629 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1630 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1631
1632 check_rvalue_consistency(obj);
1633}
1634
1635/* set age to RVALUE_OLD_AGE - 1 */
1636static inline void
1637RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1638{
1639 check_rvalue_consistency(obj);
1640 GC_ASSERT(!RVALUE_OLD_P(obj));
1641
1642 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1643
1644 check_rvalue_consistency(obj);
1645}
1646
1647static inline void
1648RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1649{
1650 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1651 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1652}
1653
1654static inline void
1655RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1656{
1657 check_rvalue_consistency(obj);
1658 GC_ASSERT(RVALUE_OLD_P(obj));
1659
1660 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1661 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1662 }
1663
1664 RVALUE_DEMOTE_RAW(objspace, obj);
1665
1666 if (RVALUE_MARKED(obj)) {
1667 objspace->rgengc.old_objects--;
1668 }
1669
1670 check_rvalue_consistency(obj);
1671}
1672
1673static inline void
1674RVALUE_AGE_RESET_RAW(VALUE obj)
1675{
1676 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1677}
1678
1679static inline void
1680RVALUE_AGE_RESET(VALUE obj)
1681{
1682 check_rvalue_consistency(obj);
1683 GC_ASSERT(!RVALUE_OLD_P(obj));
1684
1685 RVALUE_AGE_RESET_RAW(obj);
1686 check_rvalue_consistency(obj);
1687}
1688
1689static inline int
1690RVALUE_BLACK_P(VALUE obj)
1691{
1692 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1693}
1694
1695#if 0
1696static inline int
1697RVALUE_GREY_P(VALUE obj)
1698{
1699 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1700}
1701#endif
1702
1703static inline int
1704RVALUE_WHITE_P(VALUE obj)
1705{
1706 return RVALUE_MARKED(obj) == FALSE;
1707}
1708
1709/*
1710 --------------------------- ObjectSpace -----------------------------
1711*/
1712
1713static inline void *
1714calloc1(size_t n)
1715{
1716 return calloc(1, n);
1717}
1718
1720rb_objspace_alloc(void)
1721{
1722 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1723 objspace->flags.measure_gc = 1;
1724 malloc_limit = gc_params.malloc_limit_min;
1725
1726 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1727 rb_size_pool_t *size_pool = &size_pools[i];
1728
1729 size_pool->slot_size = sizeof(RVALUE) * (1 << i);
1730
1731 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1732 list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1733 }
1734
1735 dont_gc_on();
1736
1737 return objspace;
1738}
1739
1740static void free_stack_chunks(mark_stack_t *);
1741static void mark_stack_free_cache(mark_stack_t *);
1742static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1743
1744void
1745rb_objspace_free(rb_objspace_t *objspace)
1746{
1747 if (is_lazy_sweeping(objspace))
1748 rb_bug("lazy sweeping underway when freeing object space");
1749
1750 if (objspace->profile.records) {
1751 free(objspace->profile.records);
1752 objspace->profile.records = 0;
1753 }
1754
1755 if (global_list) {
1756 struct gc_list *list, *next;
1757 for (list = global_list; list; list = next) {
1758 next = list->next;
1759 xfree(list);
1760 }
1761 }
1762 if (heap_pages_sorted) {
1763 size_t i;
1764 for (i = 0; i < heap_allocated_pages; ++i) {
1765 heap_page_free(objspace, heap_pages_sorted[i]);
1766 }
1767 free(heap_pages_sorted);
1768 heap_allocated_pages = 0;
1769 heap_pages_sorted_length = 0;
1770 heap_pages_lomem = 0;
1771 heap_pages_himem = 0;
1772
1773 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1774 rb_size_pool_t *size_pool = &size_pools[i];
1775 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1776 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1777 }
1778 }
1779 st_free_table(objspace->id_to_obj_tbl);
1780 st_free_table(objspace->obj_to_id_tbl);
1781
1782 free_stack_chunks(&objspace->mark_stack);
1783 mark_stack_free_cache(&objspace->mark_stack);
1784
1785 free(objspace);
1786}
1787
1788static void
1789heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1790{
1791 struct heap_page **sorted;
1792 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1793
1794 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1795 next_length, size);
1796
1797 if (heap_pages_sorted_length > 0) {
1798 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1799 if (sorted) heap_pages_sorted = sorted;
1800 }
1801 else {
1802 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1803 }
1804
1805 if (sorted == 0) {
1806 rb_memerror();
1807 }
1808
1809 heap_pages_sorted_length = next_length;
1810}
1811
1812static void
1813heap_pages_expand_sorted(rb_objspace_t *objspace)
1814{
1815 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1816 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1817 * however, if there are pages which do not have empty slots, then try to create new pages
1818 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1819 */
1820 size_t next_length = heap_allocatable_pages(objspace);
1821 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1822 rb_size_pool_t *size_pool = &size_pools[i];
1823 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1824 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1825 }
1826
1827 if (next_length > heap_pages_sorted_length) {
1828 heap_pages_expand_sorted_to(objspace, next_length);
1829 }
1830
1831 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1832 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1833}
1834
1835static void
1836size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
1837{
1838 size_pool->allocatable_pages = s;
1839 heap_pages_expand_sorted(objspace);
1840}
1841
1842static inline void
1843heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1844{
1845 ASSERT_vm_locking();
1846
1847 RVALUE *p = (RVALUE *)obj;
1848
1849 asan_unpoison_object(obj, false);
1850
1851 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1852
1853 p->as.free.flags = 0;
1854 p->as.free.next = page->freelist;
1855 page->freelist = p;
1856 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1857
1858 if (RGENGC_CHECK_MODE &&
1859 /* obj should belong to page */
1860 !(&page->start[0] <= (RVALUE *)obj &&
1861 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1862 obj % sizeof(RVALUE) == 0)) {
1863 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1864 }
1865
1866 asan_poison_object(obj);
1867 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1868}
1869
1870static inline void
1871heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1872{
1873 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1874 GC_ASSERT(page->free_slots != 0);
1875 GC_ASSERT(page->freelist != NULL);
1876
1877 page->free_next = heap->free_pages;
1878 heap->free_pages = page;
1879
1880 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
1881
1882 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1883}
1884
1885#if GC_ENABLE_INCREMENTAL_MARK
1886static inline void
1887heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1888{
1889 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1890 GC_ASSERT(page->free_slots != 0);
1891 GC_ASSERT(page->freelist != NULL);
1892
1893 page->free_next = heap->pooled_pages;
1894 heap->pooled_pages = page;
1895 objspace->rincgc.pooled_slots += page->free_slots;
1896
1897 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1898}
1899#endif
1900
1901static void
1902heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1903{
1904 list_del(&page->page_node);
1905 heap->total_pages--;
1906 heap->total_slots -= page->total_slots;
1907}
1908
1909static void rb_aligned_free(void *ptr, size_t size);
1910
1911static void
1912heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1913{
1914 heap_allocated_pages--;
1915 objspace->profile.total_freed_pages++;
1916 rb_aligned_free(GET_PAGE_BODY(page->start), HEAP_PAGE_SIZE);
1917 free(page);
1918}
1919
1920static void
1921heap_pages_free_unused_pages(rb_objspace_t *objspace)
1922{
1923 size_t i, j;
1924
1925 bool has_pages_in_tomb_heap = FALSE;
1926 for (i = 0; i < SIZE_POOL_COUNT; i++) {
1927 if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
1928 has_pages_in_tomb_heap = TRUE;
1929 break;
1930 }
1931 }
1932
1933 if (has_pages_in_tomb_heap) {
1934 for (i = j = 1; j < heap_allocated_pages; i++) {
1935 struct heap_page *page = heap_pages_sorted[i];
1936
1937 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1938 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
1939 heap_page_free(objspace, page);
1940 }
1941 else {
1942 if (i != j) {
1943 heap_pages_sorted[j] = page;
1944 }
1945 j++;
1946 }
1947 }
1948
1949 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
1950 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
1951 GC_ASSERT(himem <= (uintptr_t)heap_pages_himem);
1952 heap_pages_himem = (RVALUE *)himem;
1953
1954 GC_ASSERT(j == heap_allocated_pages);
1955 }
1956}
1957
1958static struct heap_page *
1959heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
1960{
1961 uintptr_t start, end, p;
1962 struct heap_page *page;
1963 struct heap_page_body *page_body = 0;
1964 uintptr_t hi, lo, mid;
1965 size_t stride = size_pool->slot_size;
1966 unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
1967
1968 /* assign heap_page body (contains heap_page_header and RVALUEs) */
1969 page_body = (struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1970 if (page_body == 0) {
1971 rb_memerror();
1972 }
1973
1974 /* assign heap_page entry */
1975 page = calloc1(sizeof(struct heap_page));
1976 if (page == 0) {
1977 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
1978 rb_memerror();
1979 }
1980
1981 /* adjust obj_limit (object number available in this page) */
1982 start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
1983
1984 if ((VALUE)start % sizeof(RVALUE) != 0) {
1985 int delta = (int)sizeof(RVALUE) - (start % (int)sizeof(RVALUE));
1986 start = start + delta;
1987 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1988
1989 /* Find a num in page that is evenly divisible by `stride`.
1990 * This is to ensure that objects are aligned with bit planes.
1991 * In other words, ensure there are an even number of objects
1992 * per bit plane. */
1993 if (NUM_IN_PAGE(start) == 1) {
1994 start += stride - sizeof(RVALUE);
1995 }
1996
1997 GC_ASSERT(NUM_IN_PAGE(start) * sizeof(RVALUE) % stride == 0);
1998
1999 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2000 }
2001 end = start + (limit * (int)stride);
2002
2003 /* setup heap_pages_sorted */
2004 lo = 0;
2005 hi = (uintptr_t)heap_allocated_pages;
2006 while (lo < hi) {
2007 struct heap_page *mid_page;
2008
2009 mid = (lo + hi) / 2;
2010 mid_page = heap_pages_sorted[mid];
2011 if ((uintptr_t)mid_page->start < start) {
2012 lo = mid + 1;
2013 }
2014 else if ((uintptr_t)mid_page->start > start) {
2015 hi = mid;
2016 }
2017 else {
2018 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2019 }
2020 }
2021
2022 if (hi < (uintptr_t)heap_allocated_pages) {
2023 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
2024 }
2025
2026 heap_pages_sorted[hi] = page;
2027
2028 heap_allocated_pages++;
2029
2030 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2031 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2032 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2033
2034 objspace->profile.total_allocated_pages++;
2035
2036 if (heap_allocated_pages > heap_pages_sorted_length) {
2037 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
2038 heap_allocated_pages, heap_pages_sorted_length);
2039 }
2040
2041 if (heap_pages_lomem == 0 || (uintptr_t)heap_pages_lomem > start) heap_pages_lomem = (RVALUE *)start;
2042 if ((uintptr_t)heap_pages_himem < end) heap_pages_himem = (RVALUE *)end;
2043
2044 page->start = (RVALUE *)start;
2045 page->total_slots = limit;
2046 page->slot_size = size_pool->slot_size;
2047 page->size_pool = size_pool;
2048 page_body->header.page = page;
2049
2050 for (p = start; p != end; p += stride) {
2051 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
2052 heap_page_add_freeobj(objspace, page, (VALUE)p);
2053 }
2054 page->free_slots = limit;
2055
2056 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2057 return page;
2058}
2059
2060static struct heap_page *
2061heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2062{
2063 struct heap_page *page = 0, *next;
2064
2065 list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2066 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2067 if (page->freelist != NULL) {
2068 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2069 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2070 return page;
2071 }
2072 }
2073
2074 return NULL;
2075}
2076
2077static struct heap_page *
2078heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2079{
2080 struct heap_page *page;
2081 const char *method = "recycle";
2082
2083 size_pool->allocatable_pages--;
2084
2085 page = heap_page_resurrect(objspace, size_pool);
2086
2087 if (page == NULL) {
2088 page = heap_page_allocate(objspace, size_pool);
2089 method = "allocate";
2090 }
2091 if (0) fprintf(stderr, "heap_page_create: %s - %p, "
2092 "heap_allocated_pages: %"PRIdSIZE", "
2093 "heap_allocated_pages: %"PRIdSIZE", "
2094 "tomb->total_pages: %"PRIdSIZE"\n",
2095 method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2096 return page;
2097}
2098
2099static void
2100heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
2101{
2102 /* Adding to eden heap during incremental sweeping is forbidden */
2103 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2104 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2105 list_add_tail(&heap->pages, &page->page_node);
2106 heap->total_pages++;
2107 heap->total_slots += page->total_slots;
2108}
2109
2110static void
2111heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2112{
2113 struct heap_page *page = heap_page_create(objspace, size_pool);
2114 heap_add_page(objspace, size_pool, heap, page);
2115 heap_add_freepage(heap, page);
2116}
2117
2118static void
2119heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
2120{
2121 size_t i;
2122
2123 size_pool_allocatable_pages_set(objspace, size_pool, add);
2124
2125 for (i = 0; i < add; i++) {
2126 heap_assign_page(objspace, size_pool, heap);
2127 }
2128
2129 GC_ASSERT(size_pool->allocatable_pages == 0);
2130}
2131
2132static size_t
2133heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots, size_t used)
2134{
2135 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2136 size_t next_used;
2137
2138 if (goal_ratio == 0.0) {
2139 next_used = (size_t)(used * gc_params.growth_factor);
2140 }
2141 else {
2142 /* Find `f' where free_slots = f * total_slots * goal_ratio
2143 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2144 */
2145 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2146
2147 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2148 if (f < 1.0) f = 1.1;
2149
2150 next_used = (size_t)(f * used);
2151
2152 if (0) {
2153 fprintf(stderr,
2154 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
2155 " G(%1.2f), f(%1.2f),"
2156 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
2157 free_slots, total_slots, free_slots/(double)total_slots,
2158 goal_ratio, f, used, next_used);
2159 }
2160 }
2161
2162 if (gc_params.growth_max_slots > 0) {
2163 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2164 if (next_used > max_used) next_used = max_used;
2165 }
2166
2167 size_t extend_page_count = next_used - used;
2168 /* Extend by at least 1 page. */
2169 if (extend_page_count == 0) extend_page_count = 1;
2170
2171 return extend_page_count;
2172}
2173
2174static int
2175heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2176{
2177 if (size_pool->allocatable_pages > 0) {
2178 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2179 "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2180 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2181
2182 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2183 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2184
2185 heap_assign_page(objspace, size_pool, heap);
2186 return TRUE;
2187 }
2188 return FALSE;
2189}
2190
2191static void
2192heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2193{
2194 GC_ASSERT(heap->free_pages == NULL);
2195
2196 if (is_lazy_sweeping(objspace)) {
2197 gc_sweep_continue(objspace, size_pool, heap);
2198 }
2199 else if (is_incremental_marking(objspace)) {
2200 gc_marks_continue(objspace, size_pool, heap);
2201 }
2202
2203 if (heap->free_pages == NULL &&
2204 (will_be_incremental_marking(objspace) || heap_increment(objspace, size_pool, heap) == FALSE) &&
2205 gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2206 rb_memerror();
2207 }
2208}
2209
2210void
2211rb_objspace_set_event_hook(const rb_event_flag_t event)
2212{
2213 rb_objspace_t *objspace = &rb_objspace;
2214 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2215 objspace->flags.has_hook = (objspace->hook_events != 0);
2216}
2217
2218static void
2219gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2220{
2221 const VALUE *pc = ec->cfp->pc;
2222 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2223 /* increment PC because source line is calculated with PC-1 */
2224 ec->cfp->pc++;
2225 }
2226 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2227 ec->cfp->pc = pc;
2228}
2229
2230#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2231#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2232
2233#define gc_event_hook_prep(objspace, event, data, prep) do { \
2234 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2235 prep; \
2236 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2237 } \
2238} while (0)
2239
2240#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2241
2242static inline VALUE
2243newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2244{
2245#if !__has_feature(memory_sanitizer)
2246 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2247 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2248#endif
2249 RVALUE *p = RANY(obj);
2250 p->as.basic.flags = flags;
2251 *((VALUE *)&p->as.basic.klass) = klass;
2252
2253#if RACTOR_CHECK_MODE
2254 rb_ractor_setup_belonging(obj);
2255#endif
2256
2257#if RGENGC_CHECK_MODE
2258 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2259
2260 RB_VM_LOCK_ENTER_NO_BARRIER();
2261 {
2262 check_rvalue_consistency(obj);
2263
2264 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2265 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2266 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2267 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2268
2269 if (flags & FL_PROMOTED1) {
2270 if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2271 }
2272 else {
2273 if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2274 }
2275 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2276 }
2277 RB_VM_LOCK_LEAVE_NO_BARRIER();
2278#endif
2279
2280 if (UNLIKELY(wb_protected == FALSE)) {
2281 ASSERT_vm_locking();
2282 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2283 }
2284
2285 // TODO: make it atomic, or ractor local
2286 objspace->total_allocated_objects++;
2287
2288#if RGENGC_PROFILE
2289 if (wb_protected) {
2290 objspace->profile.total_generated_normal_object_count++;
2291#if RGENGC_PROFILE >= 2
2292 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2293#endif
2294 }
2295 else {
2296 objspace->profile.total_generated_shady_object_count++;
2297#if RGENGC_PROFILE >= 2
2298 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2299#endif
2300 }
2301#endif
2302
2303#if GC_DEBUG
2304 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2305 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2306#endif
2307
2308 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2309
2310#if RGENGC_OLD_NEWOBJ_CHECK > 0
2311 {
2312 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2313
2314 if (!is_incremental_marking(objspace) &&
2315 flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2316 ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2317 if (--newobj_cnt == 0) {
2318 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2319
2320 gc_mark_set(objspace, obj);
2321 RVALUE_AGE_SET_OLD(objspace, obj);
2322
2323 rb_gc_writebarrier_remember(obj);
2324 }
2325 }
2326 }
2327#endif
2328 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2329 return obj;
2330}
2331
2332static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page);
2333static struct heap_page *heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
2334static inline void ractor_set_cache(rb_ractor_t *cr, struct heap_page *page, size_t size_pool_idx);
2335
2336size_t
2337rb_gc_obj_slot_size(VALUE obj)
2338{
2339 return GET_HEAP_PAGE(obj)->slot_size;
2340}
2341
2342static inline size_t
2343size_pool_slot_size(unsigned char pool_id)
2344{
2345 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2346
2347 size_t slot_size = (1 << pool_id) * sizeof(RVALUE);
2348
2349#if RGENGC_CHECK_MODE
2350 rb_objspace_t *objspace = &rb_objspace;
2351 GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size);
2352#endif
2353
2354 return slot_size;
2355}
2356
2357bool
2358rb_gc_size_allocatable_p(size_t size)
2359{
2360 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2361}
2362
2363static inline VALUE
2364ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2365{
2366 rb_ractor_newobj_size_pool_cache_t *cache = &cr->newobj_cache.size_pool_caches[size_pool_idx];
2367 RVALUE *p = cache->freelist;
2368
2369 if (p) {
2370 VALUE obj = (VALUE)p;
2371 cache->freelist = p->as.free.next;
2372 asan_unpoison_object(obj, true);
2373#if RGENGC_CHECK_MODE
2374 // zero clear
2375 MEMZERO((char *)obj, char, size_pool_slot_size(size_pool_idx));
2376#endif
2377 return obj;
2378 }
2379 else {
2380 return Qfalse;
2381 }
2382}
2383
2384static struct heap_page *
2385heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2386{
2387 ASSERT_vm_locking();
2388
2389 struct heap_page *page;
2390
2391 while (heap->free_pages == NULL) {
2392 heap_prepare(objspace, size_pool, heap);
2393 }
2394 page = heap->free_pages;
2395 heap->free_pages = page->free_next;
2396
2397 GC_ASSERT(page->free_slots != 0);
2398 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page, (void *)page->freelist, page->free_slots);
2399
2400 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2401
2402 return page;
2403}
2404
2405static inline void
2406ractor_set_cache(rb_ractor_t *cr, struct heap_page *page, size_t size_pool_idx)
2407{
2408 gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
2409
2410 rb_ractor_newobj_size_pool_cache_t *cache = &cr->newobj_cache.size_pool_caches[size_pool_idx];
2411
2412 cache->using_page = page;
2413 cache->freelist = page->freelist;
2414 page->free_slots = 0;
2415 page->freelist = NULL;
2416
2417 asan_unpoison_object((VALUE)cache->freelist, false);
2418 GC_ASSERT(RB_TYPE_P((VALUE)cache->freelist, T_NONE));
2419 asan_poison_object((VALUE)cache->freelist);
2420}
2421
2422static inline void
2423ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2424{
2425 ASSERT_vm_locking();
2426
2427 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
2428 struct heap_page *page = heap_next_freepage(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
2429
2430 ractor_set_cache(cr, page, size_pool_idx);
2431}
2432
2433static inline VALUE
2434newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2435{
2436 RVALUE *p = (RVALUE *)obj;
2437 p->as.values.v1 = v1;
2438 p->as.values.v2 = v2;
2439 p->as.values.v3 = v3;
2440 return obj;
2441}
2442
2443static inline size_t
2444size_pool_idx_for_size(size_t size)
2445{
2446#if USE_RVARGC
2447 size_t slot_count = CEILDIV(size, sizeof(RVALUE));
2448
2449 /* size_pool_idx is ceil(log2(slot_count)) */
2450 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2451 if (size_pool_idx >= SIZE_POOL_COUNT) {
2452 rb_bug("size_pool_idx_for_size: allocation size too large");
2453 }
2454
2455 return size_pool_idx;
2456#else
2457 GC_ASSERT(size <= sizeof(RVALUE));
2458 return 0;
2459#endif
2460}
2461
2462ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
2463
2464static inline VALUE
2465newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
2466{
2467 VALUE obj;
2468 unsigned int lev;
2469
2470 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2471 {
2472 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2473 if (during_gc) {
2474 dont_gc_on();
2475 during_gc = 0;
2476 rb_bug("object allocation during garbage collection phase");
2477 }
2478
2479 if (ruby_gc_stressful) {
2480 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2481 rb_memerror();
2482 }
2483 }
2484 }
2485
2486 // allocate new slot
2487 while ((obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) == Qfalse) {
2488 ractor_cache_slots(objspace, cr, size_pool_idx);
2489 }
2490 GC_ASSERT(obj != 0);
2491 newobj_init(klass, flags, wb_protected, objspace, obj);
2492
2493 gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_fill(obj, 0, 0, 0));
2494 }
2495 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2496
2497 return obj;
2498}
2499
2500NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2501 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2502NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2503 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2504
2505static VALUE
2506newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2507{
2508 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2509}
2510
2511static VALUE
2512newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2513{
2514 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2515}
2516
2517static inline VALUE
2518newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
2519{
2520 VALUE obj;
2521 rb_objspace_t *objspace = &rb_objspace;
2522
2523 RB_DEBUG_COUNTER_INC(obj_newobj);
2524 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2525
2526#if GC_DEBUG_STRESS_TO_CLASS
2527 if (UNLIKELY(stress_to_class)) {
2528 long i, cnt = RARRAY_LEN(stress_to_class);
2529 for (i = 0; i < cnt; ++i) {
2530 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2531 }
2532 }
2533#endif
2534
2535 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2536
2537 if ((!UNLIKELY(during_gc ||
2538 ruby_gc_stressful ||
2539 gc_event_hook_available_p(objspace)) &&
2540 wb_protected &&
2541 (obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) != Qfalse)) {
2542
2543 newobj_init(klass, flags, wb_protected, objspace, obj);
2544 }
2545 else {
2546 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2547
2548 obj = wb_protected ?
2549 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2550 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2551 }
2552
2553 return obj;
2554}
2555
2556static inline VALUE
2557newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2558{
2559 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2560 return newobj_fill(obj, v1, v2, v3);
2561}
2562
2563static inline VALUE
2564newobj_of_cr(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2565{
2566 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2567 return newobj_fill(obj, v1, v2, v3);
2568}
2569
2570VALUE
2571rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
2572{
2573 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2574 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2575}
2576
2577VALUE
2578rb_wb_protected_newobj_of(VALUE klass, VALUE flags, size_t size)
2579{
2580 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2581 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2582}
2583
2584VALUE
2585rb_ec_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
2586{
2587 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2588 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2589}
2590
2591/* for compatibility */
2592
2593VALUE
2595{
2596 return newobj_of(0, T_NONE, 0, 0, 0, FALSE, sizeof(RVALUE));
2597}
2598
2599VALUE
2600rb_newobj_of(VALUE klass, VALUE flags)
2601{
2602 if ((flags & RUBY_T_MASK) == T_OBJECT) {
2603 st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2604
2605 VALUE obj = newobj_of(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED , Qundef, Qundef, Qundef, flags & FL_WB_PROTECTED, sizeof(RVALUE));
2606
2607 if (index_tbl && index_tbl->num_entries > ROBJECT_EMBED_LEN_MAX) {
2608 rb_init_iv_list(obj);
2609 }
2610 return obj;
2611 }
2612 else {
2613 return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED, sizeof(RVALUE));
2614 }
2615}
2616
2617#define UNEXPECTED_NODE(func) \
2618 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2619 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2620
2621const char *
2622rb_imemo_name(enum imemo_type type)
2623{
2624 // put no default case to get a warning if an imemo type is missing
2625 switch (type) {
2626#define IMEMO_NAME(x) case imemo_##x: return #x;
2627 IMEMO_NAME(env);
2628 IMEMO_NAME(cref);
2629 IMEMO_NAME(svar);
2630 IMEMO_NAME(throw_data);
2631 IMEMO_NAME(ifunc);
2632 IMEMO_NAME(memo);
2633 IMEMO_NAME(ment);
2634 IMEMO_NAME(iseq);
2635 IMEMO_NAME(tmpbuf);
2636 IMEMO_NAME(ast);
2637 IMEMO_NAME(parser_strterm);
2638 IMEMO_NAME(callinfo);
2639 IMEMO_NAME(callcache);
2640 IMEMO_NAME(constcache);
2641#undef IMEMO_NAME
2642 }
2643 return "unknown";
2644}
2645
2646#undef rb_imemo_new
2647
2648VALUE
2649rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2650{
2651 size_t size = sizeof(RVALUE);
2652 VALUE flags = T_IMEMO | (type << FL_USHIFT);
2653 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
2654}
2655
2656static VALUE
2657rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2658{
2659 size_t size = sizeof(RVALUE);
2660 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
2661 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
2662}
2663
2664static VALUE
2665rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2666{
2667 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2668}
2669
2671rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
2672{
2673 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2674}
2675
2676static size_t
2677imemo_memsize(VALUE obj)
2678{
2679 size_t size = 0;
2680 switch (imemo_type(obj)) {
2681 case imemo_ment:
2682 size += sizeof(RANY(obj)->as.imemo.ment.def);
2683 break;
2684 case imemo_iseq:
2685 size += rb_iseq_memsize((rb_iseq_t *)obj);
2686 break;
2687 case imemo_env:
2688 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
2689 break;
2690 case imemo_tmpbuf:
2691 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
2692 break;
2693 case imemo_ast:
2694 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2695 break;
2696 case imemo_cref:
2697 case imemo_svar:
2698 case imemo_throw_data:
2699 case imemo_ifunc:
2700 case imemo_memo:
2701 case imemo_parser_strterm:
2702 break;
2703 default:
2704 /* unreachable */
2705 break;
2706 }
2707 return size;
2708}
2709
2710#if IMEMO_DEBUG
2711VALUE
2712rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2713{
2714 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2715 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2716 return memo;
2717}
2718#endif
2719
2720VALUE
2721rb_class_allocate_instance(VALUE klass)
2722{
2723 st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2724
2725 VALUE flags = T_OBJECT | ROBJECT_EMBED;
2726
2727 VALUE obj = newobj_of(klass, flags, Qundef, Qundef, Qundef, RGENGC_WB_PROTECTED_OBJECT, sizeof(RVALUE));
2728
2729 if (index_tbl && index_tbl->num_entries > ROBJECT_EMBED_LEN_MAX) {
2730 rb_init_iv_list(obj);
2731 }
2732
2733 return obj;
2734}
2735
2736static inline void
2737rb_data_object_check(VALUE klass)
2738{
2739 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
2740 rb_undef_alloc_func(klass);
2741#if RUBY_VERSION_SINCE(3, 2)
2742 RBIMPL_TODO("enable the warning at this release");
2743 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
2744#endif
2745 }
2746}
2747
2748VALUE
2749rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
2750{
2752 if (klass) rb_data_object_check(klass);
2753 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE, sizeof(RVALUE));
2754}
2755
2756VALUE
2757rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
2758{
2759 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2760 DATA_PTR(obj) = xcalloc(1, size);
2761 return obj;
2762}
2763
2764VALUE
2765rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
2766{
2767 RBIMPL_NONNULL_ARG(type);
2768 if (klass) rb_data_object_check(klass);
2769 return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED, sizeof(RVALUE));
2770}
2771
2772VALUE
2773rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
2774{
2775 VALUE obj = rb_data_typed_object_wrap(klass, 0, type);
2776 DATA_PTR(obj) = xcalloc(1, size);
2777 return obj;
2778}
2779
2780size_t
2781rb_objspace_data_type_memsize(VALUE obj)
2782{
2783 if (RTYPEDDATA_P(obj)) {
2784 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
2785 const void *ptr = RTYPEDDATA_DATA(obj);
2786 if (ptr && type->function.dsize) {
2787 return type->function.dsize(ptr);
2788 }
2789 }
2790 return 0;
2791}
2792
2793const char *
2794rb_objspace_data_type_name(VALUE obj)
2795{
2796 if (RTYPEDDATA_P(obj)) {
2797 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2798 }
2799 else {
2800 return 0;
2801 }
2802}
2803
2804PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2805static inline int
2806is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2807{
2808 register RVALUE *p = RANY(ptr);
2809 register struct heap_page *page;
2810 register size_t hi, lo, mid;
2811
2812 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2813
2814 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2815 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2816
2817 if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2818 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2819
2820 /* check if p looks like a pointer using bsearch*/
2821 lo = 0;
2822 hi = heap_allocated_pages;
2823 while (lo < hi) {
2824 mid = (lo + hi) / 2;
2825 page = heap_pages_sorted[mid];
2826 if (page->start <= p) {
2827 if ((uintptr_t)p < ((uintptr_t)page->start + (page->total_slots * page->slot_size))) {
2828 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2829
2830 if (page->flags.in_tomb) {
2831 return FALSE;
2832 }
2833 else {
2834 if ((NUM_IN_PAGE(p) * sizeof(RVALUE)) % page->slot_size != 0) return FALSE;
2835
2836 return TRUE;
2837 }
2838 }
2839 lo = mid + 1;
2840 }
2841 else {
2842 hi = mid;
2843 }
2844 }
2845 return FALSE;
2846}
2847
2848static enum rb_id_table_iterator_result
2849free_const_entry_i(VALUE value, void *data)
2850{
2851 rb_const_entry_t *ce = (rb_const_entry_t *)value;
2852 xfree(ce);
2853 return ID_TABLE_CONTINUE;
2854}
2855
2856void
2857rb_free_const_table(struct rb_id_table *tbl)
2858{
2859 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2860 rb_id_table_free(tbl);
2861}
2862
2863static int
2864free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
2865{
2866 xfree((void *)value);
2867 return ST_CONTINUE;
2868}
2869
2870static void
2871iv_index_tbl_free(struct st_table *tbl)
2872{
2873 st_foreach(tbl, free_iv_index_tbl_free_i, 0);
2874 st_free_table(tbl);
2875}
2876
2877// alive: if false, target pointers can be freed already.
2878// To check it, we need objspace parameter.
2879static void
2880vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
2881{
2882 if (ccs->entries) {
2883 for (int i=0; i<ccs->len; i++) {
2884 const struct rb_callcache *cc = ccs->entries[i].cc;
2885 if (!alive) {
2886 void *ptr = asan_poisoned_object_p((VALUE)cc);
2887 asan_unpoison_object((VALUE)cc, false);
2888 // ccs can be free'ed.
2889 if (is_pointer_to_heap(objspace, (void *)cc) &&
2890 IMEMO_TYPE_P(cc, imemo_callcache) &&
2891 cc->klass == klass) {
2892 // OK. maybe target cc.
2893 }
2894 else {
2895 if (ptr) {
2896 asan_poison_object((VALUE)cc);
2897 }
2898 continue;
2899 }
2900 if (ptr) {
2901 asan_poison_object((VALUE)cc);
2902 }
2903 }
2904 vm_cc_invalidate(cc);
2905 }
2906 ruby_xfree(ccs->entries);
2907 }
2908 ruby_xfree(ccs);
2909}
2910
2911void
2912rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
2913{
2914 RB_DEBUG_COUNTER_INC(ccs_free);
2915 vm_ccs_free(ccs, TRUE, NULL, Qundef);
2916}
2917
2919 rb_objspace_t *objspace;
2920 VALUE klass;
2921 bool alive;
2922};
2923
2924static enum rb_id_table_iterator_result
2925cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
2926{
2927 struct cc_tbl_i_data *data = data_ptr;
2928 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2929 VM_ASSERT(vm_ccs_p(ccs));
2930 VM_ASSERT(id == ccs->cme->called_id);
2931
2932 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2933 rb_vm_ccs_free(ccs);
2934 return ID_TABLE_DELETE;
2935 }
2936 else {
2937 gc_mark(data->objspace, (VALUE)ccs->cme);
2938
2939 for (int i=0; i<ccs->len; i++) {
2940 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
2941 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
2942
2943 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
2944 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
2945 }
2946 return ID_TABLE_CONTINUE;
2947 }
2948}
2949
2950static void
2951cc_table_mark(rb_objspace_t *objspace, VALUE klass)
2952{
2953 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2954 if (cc_tbl) {
2955 struct cc_tbl_i_data data = {
2956 .objspace = objspace,
2957 .klass = klass,
2958 };
2959 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
2960 }
2961}
2962
2963static enum rb_id_table_iterator_result
2964cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
2965{
2966 struct cc_tbl_i_data *data = data_ptr;
2967 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2968 VM_ASSERT(vm_ccs_p(ccs));
2969 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
2970 return ID_TABLE_CONTINUE;
2971}
2972
2973static void
2974cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
2975{
2976 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2977
2978 if (cc_tbl) {
2979 struct cc_tbl_i_data data = {
2980 .objspace = objspace,
2981 .klass = klass,
2982 .alive = alive,
2983 };
2984 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
2985 rb_id_table_free(cc_tbl);
2986 }
2987}
2988
2989static enum rb_id_table_iterator_result
2990cvar_table_free_i(VALUE value, void * ctx)
2991{
2992 xfree((void *) value);
2993 return ID_TABLE_CONTINUE;
2994}
2995
2996void
2997rb_cc_table_free(VALUE klass)
2998{
2999 cc_table_free(&rb_objspace, klass, TRUE);
3000}
3001
3002static inline void
3003make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
3004{
3005 struct RZombie *zombie = RZOMBIE(obj);
3006 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
3007 zombie->dfree = dfree;
3008 zombie->data = data;
3009 zombie->next = heap_pages_deferred_final;
3010 heap_pages_deferred_final = (VALUE)zombie;
3011
3012 struct heap_page *page = GET_HEAP_PAGE(obj);
3013 page->final_slots++;
3014 heap_pages_final_slots++;
3015}
3016
3017static inline void
3018make_io_zombie(rb_objspace_t *objspace, VALUE obj)
3019{
3020 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3021 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3022}
3023
3024static void
3025obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
3026{
3027 ASSERT_vm_locking();
3028 st_data_t o = (st_data_t)obj, id;
3029
3030 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
3032
3033 if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
3034 GC_ASSERT(id);
3035 st_delete(objspace->id_to_obj_tbl, &id, NULL);
3036 }
3037 else {
3038 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3039 }
3040}
3041
3042static int
3043obj_free(rb_objspace_t *objspace, VALUE obj)
3044{
3045 RB_DEBUG_COUNTER_INC(obj_free);
3046 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3047
3048 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
3049
3050 switch (BUILTIN_TYPE(obj)) {
3051 case T_NIL:
3052 case T_FIXNUM:
3053 case T_TRUE:
3054 case T_FALSE:
3055 rb_bug("obj_free() called for broken object");
3056 break;
3057 default:
3058 break;
3059 }
3060
3061 if (FL_TEST(obj, FL_EXIVAR)) {
3062 rb_free_generic_ivar((VALUE)obj);
3063 FL_UNSET(obj, FL_EXIVAR);
3064 }
3065
3066 if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
3067 obj_free_object_id(objspace, obj);
3068 }
3069
3070 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3071
3072#if RGENGC_CHECK_MODE
3073#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3074 CHECK(RVALUE_WB_UNPROTECTED);
3075 CHECK(RVALUE_MARKED);
3076 CHECK(RVALUE_MARKING);
3077 CHECK(RVALUE_UNCOLLECTIBLE);
3078#undef CHECK
3079#endif
3080
3081 switch (BUILTIN_TYPE(obj)) {
3082 case T_OBJECT:
3083 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3084 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3085 }
3086 else if (ROBJ_TRANSIENT_P(obj)) {
3087 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3088 }
3089 else {
3090 xfree(RANY(obj)->as.object.as.heap.ivptr);
3091 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3092 }
3093 break;
3094 case T_MODULE:
3095 case T_CLASS:
3096 rb_id_table_free(RCLASS_M_TBL(obj));
3097 cc_table_free(objspace, obj, FALSE);
3098 if (RCLASS_IV_TBL(obj)) {
3099 st_free_table(RCLASS_IV_TBL(obj));
3100 }
3101 if (RCLASS_CONST_TBL(obj)) {
3102 rb_free_const_table(RCLASS_CONST_TBL(obj));
3103 }
3104 if (RCLASS_IV_INDEX_TBL(obj)) {
3105 iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
3106 }
3107 if (RCLASS_CVC_TBL(obj)) {
3108 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3109 rb_id_table_free(RCLASS_CVC_TBL(obj));
3110 }
3111 rb_class_remove_subclass_head(obj);
3112 rb_class_remove_from_module_subclasses(obj);
3113 rb_class_remove_from_super_subclasses(obj);
3114#if !USE_RVARGC
3115 if (RCLASS_EXT(obj))
3116 xfree(RCLASS_EXT(obj));
3117#endif
3118
3119 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
3120 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
3121 break;
3122 case T_STRING:
3123 rb_str_free(obj);
3124 break;
3125 case T_ARRAY:
3126 rb_ary_free(obj);
3127 break;
3128 case T_HASH:
3129#if USE_DEBUG_COUNTER
3130 switch (RHASH_SIZE(obj)) {
3131 case 0:
3132 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3133 break;
3134 case 1:
3135 RB_DEBUG_COUNTER_INC(obj_hash_1);
3136 break;
3137 case 2:
3138 RB_DEBUG_COUNTER_INC(obj_hash_2);
3139 break;
3140 case 3:
3141 RB_DEBUG_COUNTER_INC(obj_hash_3);
3142 break;
3143 case 4:
3144 RB_DEBUG_COUNTER_INC(obj_hash_4);
3145 break;
3146 case 5:
3147 case 6:
3148 case 7:
3149 case 8:
3150 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3151 break;
3152 default:
3153 GC_ASSERT(RHASH_SIZE(obj) > 8);
3154 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3155 }
3156
3157 if (RHASH_AR_TABLE_P(obj)) {
3158 if (RHASH_AR_TABLE(obj) == NULL) {
3159 RB_DEBUG_COUNTER_INC(obj_hash_null);
3160 }
3161 else {
3162 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3163 }
3164 }
3165 else {
3166 RB_DEBUG_COUNTER_INC(obj_hash_st);
3167 }
3168#endif
3169 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
3170 struct ar_table_struct *tab = RHASH(obj)->as.ar;
3171
3172 if (tab) {
3173 if (RHASH_TRANSIENT_P(obj)) {
3174 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3175 }
3176 else {
3177 ruby_xfree(tab);
3178 }
3179 }
3180 }
3181 else {
3182 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3183 st_free_table(RHASH(obj)->as.st);
3184 }
3185 break;
3186 case T_REGEXP:
3187 if (RANY(obj)->as.regexp.ptr) {
3188 onig_free(RANY(obj)->as.regexp.ptr);
3189 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3190 }
3191 break;
3192 case T_DATA:
3193 if (DATA_PTR(obj)) {
3194 int free_immediately = FALSE;
3195 void (*dfree)(void *);
3196 void *data = DATA_PTR(obj);
3197
3198 if (RTYPEDDATA_P(obj)) {
3199 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3200 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3201 if (0 && free_immediately == 0) {
3202 /* to expose non-free-immediate T_DATA */
3203 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3204 }
3205 }
3206 else {
3207 dfree = RANY(obj)->as.data.dfree;
3208 }
3209
3210 if (dfree) {
3211 if (dfree == RUBY_DEFAULT_FREE) {
3212 xfree(data);
3213 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3214 }
3215 else if (free_immediately) {
3216 (*dfree)(data);
3217 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3218 }
3219 else {
3220 make_zombie(objspace, obj, dfree, data);
3221 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3222 return FALSE;
3223 }
3224 }
3225 else {
3226 RB_DEBUG_COUNTER_INC(obj_data_empty);
3227 }
3228 }
3229 break;
3230 case T_MATCH:
3231 if (RANY(obj)->as.match.rmatch) {
3232 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3233#if USE_DEBUG_COUNTER
3234 if (rm->regs.num_regs >= 8) {
3235 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3236 }
3237 else if (rm->regs.num_regs >= 4) {
3238 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3239 }
3240 else if (rm->regs.num_regs >= 1) {
3241 RB_DEBUG_COUNTER_INC(obj_match_under4);
3242 }
3243#endif
3244 onig_region_free(&rm->regs, 0);
3245 if (rm->char_offset)
3246 xfree(rm->char_offset);
3247 xfree(rm);
3248
3249 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3250 }
3251 break;
3252 case T_FILE:
3253 if (RANY(obj)->as.file.fptr) {
3254 make_io_zombie(objspace, obj);
3255 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3256 return FALSE;
3257 }
3258 break;
3259 case T_RATIONAL:
3260 RB_DEBUG_COUNTER_INC(obj_rational);
3261 break;
3262 case T_COMPLEX:
3263 RB_DEBUG_COUNTER_INC(obj_complex);
3264 break;
3265 case T_MOVED:
3266 break;
3267 case T_ICLASS:
3268 /* Basically , T_ICLASS shares table with the module */
3269 if (RICLASS_OWNS_M_TBL_P(obj)) {
3270 /* Method table is not shared for origin iclasses of classes */
3271 rb_id_table_free(RCLASS_M_TBL(obj));
3272 }
3273 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3274 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3275 }
3276 rb_class_remove_subclass_head(obj);
3277 cc_table_free(objspace, obj, FALSE);
3278 rb_class_remove_from_module_subclasses(obj);
3279 rb_class_remove_from_super_subclasses(obj);
3280#if !USE_RVARGC
3281 xfree(RCLASS_EXT(obj));
3282#endif
3283
3284 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3285 break;
3286
3287 case T_FLOAT:
3288 RB_DEBUG_COUNTER_INC(obj_float);
3289 break;
3290
3291 case T_BIGNUM:
3292 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3293 xfree(BIGNUM_DIGITS(obj));
3294 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3295 }
3296 else {
3297 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3298 }
3299 break;
3300
3301 case T_NODE:
3302 UNEXPECTED_NODE(obj_free);
3303 break;
3304
3305 case T_STRUCT:
3306 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3307 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3308 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3309 }
3310 else if (RSTRUCT_TRANSIENT_P(obj)) {
3311 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3312 }
3313 else {
3314 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3315 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3316 }
3317 break;
3318
3319 case T_SYMBOL:
3320 {
3321 rb_gc_free_dsymbol(obj);
3322 RB_DEBUG_COUNTER_INC(obj_symbol);
3323 }
3324 break;
3325
3326 case T_IMEMO:
3327 switch (imemo_type(obj)) {
3328 case imemo_ment:
3329 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3330 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3331 break;
3332 case imemo_iseq:
3333 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3334 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3335 break;
3336 case imemo_env:
3337 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3338 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3339 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3340 break;
3341 case imemo_tmpbuf:
3342 xfree(RANY(obj)->as.imemo.alloc.ptr);
3343 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3344 break;
3345 case imemo_ast:
3346 rb_ast_free(&RANY(obj)->as.imemo.ast);
3347 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3348 break;
3349 case imemo_cref:
3350 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3351 break;
3352 case imemo_svar:
3353 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3354 break;
3355 case imemo_throw_data:
3356 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3357 break;
3358 case imemo_ifunc:
3359 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3360 break;
3361 case imemo_memo:
3362 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3363 break;
3364 case imemo_parser_strterm:
3365 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3366 break;
3367 case imemo_callinfo:
3368 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3369 break;
3370 case imemo_callcache:
3371 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3372 break;
3373 case imemo_constcache:
3374 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3375 break;
3376 }
3377 return TRUE;
3378
3379 default:
3380 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3381 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3382 }
3383
3384 if (FL_TEST(obj, FL_FINALIZE)) {
3385 make_zombie(objspace, obj, 0, 0);
3386 return FALSE;
3387 }
3388 else {
3389 return TRUE;
3390 }
3391}
3392
3393
3394#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3395#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3396
3397static int
3398object_id_cmp(st_data_t x, st_data_t y)
3399{
3400 if (RB_BIGNUM_TYPE_P(x)) {
3401 return !rb_big_eql(x, y);
3402 }
3403 else {
3404 return x != y;
3405 }
3406}
3407
3408static st_index_t
3409object_id_hash(st_data_t n)
3410{
3411 if (RB_BIGNUM_TYPE_P(n)) {
3412 return FIX2LONG(rb_big_hash(n));
3413 }
3414 else {
3415 return st_numhash(n);
3416 }
3417}
3418static const struct st_hash_type object_id_hash_type = {
3419 object_id_cmp,
3420 object_id_hash,
3421};
3422
3423void
3424Init_heap(void)
3425{
3426 rb_objspace_t *objspace = &rb_objspace;
3427
3428#if defined(HAVE_MMAP) && !HAVE_CONST_PAGE_SIZE && !defined(PAGE_MAX_SIZE)
3429 /* Need to determine if we can use mmap at runtime. */
3430# ifdef PAGE_SIZE
3431 /* If the PAGE_SIZE macro can be used. */
3432 use_mmap_aligned_alloc = PAGE_SIZE <= HEAP_PAGE_SIZE;
3433# elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3434 /* If we can use sysconf to determine the page size. */
3435 use_mmap_aligned_alloc = sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE;
3436# else
3437 /* Otherwise we can't determine the system page size, so don't use mmap. */
3438 use_mmap_aligned_alloc = FALSE;
3439# endif
3440#endif
3441
3442 objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
3443 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3444 objspace->obj_to_id_tbl = st_init_numtable();
3445
3446#if RGENGC_ESTIMATE_OLDMALLOC
3447 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3448#endif
3449
3450 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3451
3452 /* Give other size pools allocatable pages. */
3453 for (int i = 1; i < SIZE_POOL_COUNT; i++) {
3454 rb_size_pool_t *size_pool = &size_pools[i];
3455 int multiple = size_pool->slot_size / sizeof(RVALUE);
3456 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3457 }
3458 heap_pages_expand_sorted(objspace);
3459
3460 init_mark_stack(&objspace->mark_stack);
3461
3462 objspace->profile.invoke_time = getrusage_time();
3463 finalizer_table = st_init_numtable();
3464}
3465
3466void
3467Init_gc_stress(void)
3468{
3469 rb_objspace_t *objspace = &rb_objspace;
3470
3471 gc_stress_set(objspace, ruby_initial_gc_stress);
3472}
3473
3474typedef int each_obj_callback(void *, void *, size_t, void *);
3475
3476static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
3477static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3478
3480 rb_objspace_t *objspace;
3481 bool reenable_incremental;
3482
3483 each_obj_callback *callback;
3484 void *data;
3485
3486 struct heap_page **pages[SIZE_POOL_COUNT];
3487 size_t pages_counts[SIZE_POOL_COUNT];
3488};
3489
3490static VALUE
3491objspace_each_objects_ensure(VALUE arg)
3492{
3493 struct each_obj_data *data = (struct each_obj_data *)arg;
3494 rb_objspace_t *objspace = data->objspace;
3495
3496 /* Reenable incremental GC */
3497 if (data->reenable_incremental) {
3498 objspace->flags.dont_incremental = FALSE;
3499 }
3500
3501 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3502 struct heap_page **pages = data->pages[i];
3503 /* pages could be NULL if an error was raised during setup (e.g.
3504 * malloc failed due to out of memory). */
3505 if (pages) {
3506 free(pages);
3507 }
3508 }
3509
3510 return Qnil;
3511}
3512
3513static VALUE
3514objspace_each_objects_try(VALUE arg)
3515{
3516 struct each_obj_data *data = (struct each_obj_data *)arg;
3517 rb_objspace_t *objspace = data->objspace;
3518
3519 /* Copy pages from all size_pools to their respective buffers. */
3520 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3521 rb_size_pool_t *size_pool = &size_pools[i];
3522 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
3523
3524 struct heap_page **pages = malloc(size);
3525 if (!pages) rb_memerror();
3526
3527 /* Set up pages buffer by iterating over all pages in the current eden
3528 * heap. This will be a snapshot of the state of the heap before we
3529 * call the callback over each page that exists in this buffer. Thus it
3530 * is safe for the callback to allocate objects without possibly entering
3531 * an infinite loop. */
3532 struct heap_page *page = 0;
3533 size_t pages_count = 0;
3534 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3535 pages[pages_count] = page;
3536 pages_count++;
3537 }
3538 data->pages[i] = pages;
3539 data->pages_counts[i] = pages_count;
3540 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3541 }
3542
3543 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3544 rb_size_pool_t *size_pool = &size_pools[i];
3545 size_t pages_count = data->pages_counts[i];
3546 struct heap_page **pages = data->pages[i];
3547
3548 struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
3549 for (size_t i = 0; i < pages_count; i++) {
3550 /* If we have reached the end of the linked list then there are no
3551 * more pages, so break. */
3552 if (page == NULL) break;
3553
3554 /* If this page does not match the one in the buffer, then move to
3555 * the next page in the buffer. */
3556 if (pages[i] != page) continue;
3557
3558 uintptr_t pstart = (uintptr_t)page->start;
3559 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3560
3561 if ((*data->callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
3562 break;
3563 }
3564
3565 page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3566 }
3567 }
3568
3569 return Qnil;
3570}
3571
3572/*
3573 * rb_objspace_each_objects() is special C API to walk through
3574 * Ruby object space. This C API is too difficult to use it.
3575 * To be frank, you should not use it. Or you need to read the
3576 * source code of this function and understand what this function does.
3577 *
3578 * 'callback' will be called several times (the number of heap page,
3579 * at current implementation) with:
3580 * vstart: a pointer to the first living object of the heap_page.
3581 * vend: a pointer to next to the valid heap_page area.
3582 * stride: a distance to next VALUE.
3583 *
3584 * If callback() returns non-zero, the iteration will be stopped.
3585 *
3586 * This is a sample callback code to iterate liveness objects:
3587 *
3588 * int
3589 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3590 * VALUE v = (VALUE)vstart;
3591 * for (; v != (VALUE)vend; v += stride) {
3592 * if (RBASIC(v)->flags) { // liveness check
3593 * // do something with live object 'v'
3594 * }
3595 * return 0; // continue to iteration
3596 * }
3597 *
3598 * Note: 'vstart' is not a top of heap_page. This point the first
3599 * living object to grasp at least one object to avoid GC issue.
3600 * This means that you can not walk through all Ruby object page
3601 * including freed object page.
3602 *
3603 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3604 * However, there are possibilities to pass variable values with
3605 * 'stride' with some reasons. You must use stride instead of
3606 * use some constant value in the iteration.
3607 */
3608void
3609rb_objspace_each_objects(each_obj_callback *callback, void *data)
3610{
3611 objspace_each_objects(&rb_objspace, callback, data, TRUE);
3612}
3613
3614static void
3615objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
3616{
3617 /* Disable incremental GC */
3618 bool reenable_incremental = FALSE;
3619 if (protected) {
3620 reenable_incremental = !objspace->flags.dont_incremental;
3621
3622 gc_rest(objspace);
3623 objspace->flags.dont_incremental = TRUE;
3624 }
3625
3626 struct each_obj_data each_obj_data = {
3627 .objspace = objspace,
3628 .reenable_incremental = reenable_incremental,
3629
3630 .callback = callback,
3631 .data = data,
3632
3633 .pages = {NULL},
3634 .pages_counts = {0},
3635 };
3636 rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
3637 objspace_each_objects_ensure, (VALUE)&each_obj_data);
3638}
3639
3640void
3641rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
3642{
3643 objspace_each_objects(&rb_objspace, callback, data, FALSE);
3644}
3645
3647 size_t num;
3648 VALUE of;
3649};
3650
3651static int
3652internal_object_p(VALUE obj)
3653{
3654 RVALUE *p = (RVALUE *)obj;
3655 void *ptr = __asan_region_is_poisoned(p, SIZEOF_VALUE);
3656 asan_unpoison_object(obj, false);
3657 bool used_p = p->as.basic.flags;
3658
3659 if (used_p) {
3660 switch (BUILTIN_TYPE(obj)) {
3661 case T_NODE:
3662 UNEXPECTED_NODE(internal_object_p);
3663 break;
3664 case T_NONE:
3665 case T_MOVED:
3666 case T_IMEMO:
3667 case T_ICLASS:
3668 case T_ZOMBIE:
3669 break;
3670 case T_CLASS:
3671 if (!p->as.basic.klass) break;
3672 if (FL_TEST(obj, FL_SINGLETON)) {
3673 return rb_singleton_class_internal_p(obj);
3674 }
3675 return 0;
3676 default:
3677 if (!p->as.basic.klass) break;
3678 return 0;
3679 }
3680 }
3681 if (ptr || ! used_p) {
3682 asan_poison_object(obj);
3683 }
3684 return 1;
3685}
3686
3687int
3688rb_objspace_internal_object_p(VALUE obj)
3689{
3690 return internal_object_p(obj);
3691}
3692
3693static int
3694os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
3695{
3696 struct os_each_struct *oes = (struct os_each_struct *)data;
3697
3698 VALUE v = (VALUE)vstart;
3699 for (; v != (VALUE)vend; v += stride) {
3700 if (!internal_object_p(v)) {
3701 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3702 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
3703 rb_yield(v);
3704 oes->num++;
3705 }
3706 }
3707 }
3708 }
3709
3710 return 0;
3711}
3712
3713static VALUE
3714os_obj_of(VALUE of)
3715{
3716 struct os_each_struct oes;
3717
3718 oes.num = 0;
3719 oes.of = of;
3720 rb_objspace_each_objects(os_obj_of_i, &oes);
3721 return SIZET2NUM(oes.num);
3722}
3723
3724/*
3725 * call-seq:
3726 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3727 * ObjectSpace.each_object([module]) -> an_enumerator
3728 *
3729 * Calls the block once for each living, nonimmediate object in this
3730 * Ruby process. If <i>module</i> is specified, calls the block
3731 * for only those classes or modules that match (or are a subclass of)
3732 * <i>module</i>. Returns the number of objects found. Immediate
3733 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3734 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3735 * never returned. In the example below, #each_object returns both
3736 * the numbers we defined and several constants defined in the Math
3737 * module.
3738 *
3739 * If no block is given, an enumerator is returned instead.
3740 *
3741 * a = 102.7
3742 * b = 95 # Won't be returned
3743 * c = 12345678987654321
3744 * count = ObjectSpace.each_object(Numeric) {|x| p x }
3745 * puts "Total count: #{count}"
3746 *
3747 * <em>produces:</em>
3748 *
3749 * 12345678987654321
3750 * 102.7
3751 * 2.71828182845905
3752 * 3.14159265358979
3753 * 2.22044604925031e-16
3754 * 1.7976931348623157e+308
3755 * 2.2250738585072e-308
3756 * Total count: 7
3757 *
3758 */
3759
3760static VALUE
3761os_each_obj(int argc, VALUE *argv, VALUE os)
3762{
3763 VALUE of;
3764
3765 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
3766 RETURN_ENUMERATOR(os, 1, &of);
3767 return os_obj_of(of);
3768}
3769
3770/*
3771 * call-seq:
3772 * ObjectSpace.undefine_finalizer(obj)
3773 *
3774 * Removes all finalizers for <i>obj</i>.
3775 *
3776 */
3777
3778static VALUE
3779undefine_final(VALUE os, VALUE obj)
3780{
3781 return rb_undefine_finalizer(obj);
3782}
3783
3784VALUE
3786{
3787 rb_objspace_t *objspace = &rb_objspace;
3788 st_data_t data = obj;
3789 rb_check_frozen(obj);
3790 st_delete(finalizer_table, &data, 0);
3791 FL_UNSET(obj, FL_FINALIZE);
3792 return obj;
3793}
3794
3795static void
3796should_be_callable(VALUE block)
3797{
3798 if (!rb_obj_respond_to(block, idCall, TRUE)) {
3799 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
3800 rb_obj_class(block));
3801 }
3802}
3803
3804static void
3805should_be_finalizable(VALUE obj)
3806{
3807 if (!FL_ABLE(obj)) {
3808 rb_raise(rb_eArgError, "cannot define finalizer for %s",
3809 rb_obj_classname(obj));
3810 }
3811 rb_check_frozen(obj);
3812}
3813
3814/*
3815 * call-seq:
3816 * ObjectSpace.define_finalizer(obj, aProc=proc())
3817 *
3818 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3819 * was destroyed. The object ID of the <i>obj</i> will be passed
3820 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3821 * method, make sure it can be called with a single argument.
3822 *
3823 * The return value is an array <code>[0, aProc]</code>.
3824 *
3825 * The two recommended patterns are to either create the finaliser proc
3826 * in a non-instance method where it can safely capture the needed state,
3827 * or to use a custom callable object that stores the needed state
3828 * explicitly as instance variables.
3829 *
3830 * class Foo
3831 * def initialize(data_needed_for_finalization)
3832 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
3833 * end
3834 *
3835 * def self.create_finalizer(data_needed_for_finalization)
3836 * proc {
3837 * puts "finalizing #{data_needed_for_finalization}"
3838 * }
3839 * end
3840 * end
3841 *
3842 * class Bar
3843 * class Remover
3844 * def initialize(data_needed_for_finalization)
3845 * @data_needed_for_finalization = data_needed_for_finalization
3846 * end
3847 *
3848 * def call(id)
3849 * puts "finalizing #{@data_needed_for_finalization}"
3850 * end
3851 * end
3852 *
3853 * def initialize(data_needed_for_finalization)
3854 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
3855 * end
3856 * end
3857 *
3858 * Note that if your finalizer references the object to be
3859 * finalized it will never be run on GC, although it will still be
3860 * run at exit. You will get a warning if you capture the object
3861 * to be finalized as the receiver of the finalizer.
3862 *
3863 * class CapturesSelf
3864 * def initialize(name)
3865 * ObjectSpace.define_finalizer(self, proc {
3866 * # this finalizer will only be run on exit
3867 * puts "finalizing #{name}"
3868 * })
3869 * end
3870 * end
3871 *
3872 * Also note that finalization can be unpredictable and is never guaranteed
3873 * to be run except on exit.
3874 */
3875
3876static VALUE
3877define_final(int argc, VALUE *argv, VALUE os)
3878{
3879 VALUE obj, block;
3880
3881 rb_scan_args(argc, argv, "11", &obj, &block);
3882 should_be_finalizable(obj);
3883 if (argc == 1) {
3884 block = rb_block_proc();
3885 }
3886 else {
3887 should_be_callable(block);
3888 }
3889
3890 if (rb_callable_receiver(block) == obj) {
3891 rb_warn("finalizer references object to be finalized");
3892 }
3893
3894 return define_final0(obj, block);
3895}
3896
3897static VALUE
3898define_final0(VALUE obj, VALUE block)
3899{
3900 rb_objspace_t *objspace = &rb_objspace;
3901 VALUE table;
3902 st_data_t data;
3903
3904 RBASIC(obj)->flags |= FL_FINALIZE;
3905
3906 if (st_lookup(finalizer_table, obj, &data)) {
3907 table = (VALUE)data;
3908
3909 /* avoid duplicate block, table is usually small */
3910 {
3911 long len = RARRAY_LEN(table);
3912 long i;
3913
3914 for (i = 0; i < len; i++) {
3915 VALUE recv = RARRAY_AREF(table, i);
3916 if (rb_equal(recv, block)) {
3917 block = recv;
3918 goto end;
3919 }
3920 }
3921 }
3922
3923 rb_ary_push(table, block);
3924 }
3925 else {
3926 table = rb_ary_new3(1, block);
3927 RBASIC_CLEAR_CLASS(table);
3928 st_add_direct(finalizer_table, obj, table);
3929 }
3930 end:
3931 block = rb_ary_new3(2, INT2FIX(0), block);
3932 OBJ_FREEZE(block);
3933 return block;
3934}
3935
3936VALUE
3937rb_define_finalizer(VALUE obj, VALUE block)
3938{
3939 should_be_finalizable(obj);
3940 should_be_callable(block);
3941 return define_final0(obj, block);
3942}
3943
3944void
3945rb_gc_copy_finalizer(VALUE dest, VALUE obj)
3946{
3947 rb_objspace_t *objspace = &rb_objspace;
3948 VALUE table;
3949 st_data_t data;
3950
3951 if (!FL_TEST(obj, FL_FINALIZE)) return;
3952 if (st_lookup(finalizer_table, obj, &data)) {
3953 table = (VALUE)data;
3954 st_insert(finalizer_table, dest, table);
3955 }
3956 FL_SET(dest, FL_FINALIZE);
3957}
3958
3959static VALUE
3960run_single_final(VALUE cmd, VALUE objid)
3961{
3962 return rb_check_funcall(cmd, idCall, 1, &objid);
3963}
3964
3965static void
3966warn_exception_in_finalizer(rb_execution_context_t *ec, VALUE final)
3967{
3968 if (final != Qundef && !NIL_P(ruby_verbose)) {
3969 VALUE errinfo = ec->errinfo;
3970 rb_warn("Exception in finalizer %+"PRIsVALUE, final);
3971 rb_ec_error_print(ec, errinfo);
3972 }
3973}
3974
3975static void
3976run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
3977{
3978 long i;
3979 enum ruby_tag_type state;
3980 volatile struct {
3981 VALUE errinfo;
3982 VALUE objid;
3983 VALUE final;
3984 rb_control_frame_t *cfp;
3985 long finished;
3986 } saved;
3987 rb_execution_context_t * volatile ec = GET_EC();
3988#define RESTORE_FINALIZER() (\
3989 ec->cfp = saved.cfp, \
3990 ec->errinfo = saved.errinfo)
3991
3992 saved.errinfo = ec->errinfo;
3993 saved.objid = rb_obj_id(obj);
3994 saved.cfp = ec->cfp;
3995 saved.finished = 0;
3996 saved.final = Qundef;
3997
3998 EC_PUSH_TAG(ec);
3999 state = EC_EXEC_TAG();
4000 if (state != TAG_NONE) {
4001 ++saved.finished; /* skip failed finalizer */
4002 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final, Qundef));
4003 }
4004 for (i = saved.finished;
4005 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
4006 saved.finished = ++i) {
4007 run_single_final(saved.final = RARRAY_AREF(table, i), saved.objid);
4008 }
4009 EC_POP_TAG();
4010#undef RESTORE_FINALIZER
4011}
4012
4013static void
4014run_final(rb_objspace_t *objspace, VALUE zombie)
4015{
4016 st_data_t key, table;
4017
4018 if (RZOMBIE(zombie)->dfree) {
4019 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4020 }
4021
4022 key = (st_data_t)zombie;
4023 if (st_delete(finalizer_table, &key, &table)) {
4024 run_finalizer(objspace, zombie, (VALUE)table);
4025 }
4026}
4027
4028static void
4029finalize_list(rb_objspace_t *objspace, VALUE zombie)
4030{
4031 while (zombie) {
4032 VALUE next_zombie;
4033 struct heap_page *page;
4034 asan_unpoison_object(zombie, false);
4035 next_zombie = RZOMBIE(zombie)->next;
4036 page = GET_HEAP_PAGE(zombie);
4037
4038 run_final(objspace, zombie);
4039
4040 RB_VM_LOCK_ENTER();
4041 {
4042 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
4043 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
4044 obj_free_object_id(objspace, zombie);
4045 }
4046
4047 GC_ASSERT(heap_pages_final_slots > 0);
4048 GC_ASSERT(page->final_slots > 0);
4049
4050 heap_pages_final_slots--;
4051 page->final_slots--;
4052 page->free_slots++;
4053 heap_page_add_freeobj(objspace, page, zombie);
4054 objspace->profile.total_freed_objects++;
4055 }
4056 RB_VM_LOCK_LEAVE();
4057
4058 zombie = next_zombie;
4059 }
4060}
4061
4062static void
4063finalize_deferred(rb_objspace_t *objspace)
4064{
4065 VALUE zombie;
4066 rb_execution_context_t *ec = GET_EC();
4067 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4068
4069 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4070 finalize_list(objspace, zombie);
4071 }
4072
4073 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4074}
4075
4076static void
4077gc_finalize_deferred(void *dmy)
4078{
4079 rb_objspace_t *objspace = dmy;
4080 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4081
4082 finalize_deferred(objspace);
4083 ATOMIC_SET(finalizing, 0);
4084}
4085
4086static void
4087gc_finalize_deferred_register(rb_objspace_t *objspace)
4088{
4089 if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
4090 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
4091 }
4092}
4093
4095 VALUE obj;
4096 VALUE table;
4097 struct force_finalize_list *next;
4098};
4099
4100static int
4101force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4102{
4103 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
4104 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
4105 curr->obj = key;
4106 curr->table = val;
4107 curr->next = *prev;
4108 *prev = curr;
4109 return ST_CONTINUE;
4110}
4111
4112bool rb_obj_is_main_ractor(VALUE gv);
4113
4114void
4115rb_objspace_call_finalizer(rb_objspace_t *objspace)
4116{
4117 size_t i;
4118
4119#if RGENGC_CHECK_MODE >= 2
4120 gc_verify_internal_consistency(objspace);
4121#endif
4122 gc_rest(objspace);
4123
4124 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4125
4126 /* run finalizers */
4127 finalize_deferred(objspace);
4128 GC_ASSERT(heap_pages_deferred_final == 0);
4129
4130 gc_rest(objspace);
4131 /* prohibit incremental GC */
4132 objspace->flags.dont_incremental = 1;
4133
4134 /* force to run finalizer */
4135 while (finalizer_table->num_entries) {
4136 struct force_finalize_list *list = 0;
4137 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4138 while (list) {
4139 struct force_finalize_list *curr = list;
4140 st_data_t obj = (st_data_t)curr->obj;
4141 run_finalizer(objspace, curr->obj, curr->table);
4142 st_delete(finalizer_table, &obj, 0);
4143 list = curr->next;
4144 xfree(curr);
4145 }
4146 }
4147
4148 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4149 dont_gc_on();
4150
4151 /* running data/file finalizers are part of garbage collection */
4152 unsigned int lock_lev;
4153 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4154
4155 /* run data/file object's finalizers */
4156 for (i = 0; i < heap_allocated_pages; i++) {
4157 struct heap_page *page = heap_pages_sorted[i];
4158 short stride = page->slot_size;
4159
4160 uintptr_t p = (uintptr_t)page->start;
4161 uintptr_t pend = p + page->total_slots * stride;
4162 for (; p < pend; p += stride) {
4163 VALUE vp = (VALUE)p;
4164 void *poisoned = asan_poisoned_object_p(vp);
4165 asan_unpoison_object(vp, false);
4166 switch (BUILTIN_TYPE(vp)) {
4167 case T_DATA:
4168 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
4169 if (rb_obj_is_thread(vp)) break;
4170 if (rb_obj_is_mutex(vp)) break;
4171 if (rb_obj_is_fiber(vp)) break;
4172 if (rb_obj_is_main_ractor(vp)) break;
4173 if (RTYPEDDATA_P(vp)) {
4174 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4175 }
4176 RANY(p)->as.free.flags = 0;
4177 if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
4178 xfree(DATA_PTR(p));
4179 }
4180 else if (RANY(p)->as.data.dfree) {
4181 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4182 }
4183 break;
4184 case T_FILE:
4185 if (RANY(p)->as.file.fptr) {
4186 make_io_zombie(objspace, vp);
4187 }
4188 break;
4189 default:
4190 break;
4191 }
4192 if (poisoned) {
4193 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4194 asan_poison_object(vp);
4195 }
4196 }
4197 }
4198
4199 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4200
4201 if (heap_pages_deferred_final) {
4202 finalize_list(objspace, heap_pages_deferred_final);
4203 }
4204
4205 st_free_table(finalizer_table);
4206 finalizer_table = 0;
4207 ATOMIC_SET(finalizing, 0);
4208}
4209
4210static inline int
4211is_swept_object(rb_objspace_t *objspace, VALUE ptr)
4212{
4213 struct heap_page *page = GET_HEAP_PAGE(ptr);
4214 return page->flags.before_sweep ? FALSE : TRUE;
4215}
4216
4217/* garbage objects will be collected soon. */
4218static inline int
4219is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
4220{
4221 if (!is_lazy_sweeping(objspace) ||
4222 is_swept_object(objspace, ptr) ||
4223 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4224
4225 return FALSE;
4226 }
4227 else {
4228 return TRUE;
4229 }
4230}
4231
4232static inline int
4233is_live_object(rb_objspace_t *objspace, VALUE ptr)
4234{
4235 switch (BUILTIN_TYPE(ptr)) {
4236 case T_NONE:
4237 case T_MOVED:
4238 case T_ZOMBIE:
4239 return FALSE;
4240 default:
4241 break;
4242 }
4243
4244 if (!is_garbage_object(objspace, ptr)) {
4245 return TRUE;
4246 }
4247 else {
4248 return FALSE;
4249 }
4250}
4251
4252static inline int
4253is_markable_object(rb_objspace_t *objspace, VALUE obj)
4254{
4255 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
4256 check_rvalue_consistency(obj);
4257 return TRUE;
4258}
4259
4260int
4261rb_objspace_markable_object_p(VALUE obj)
4262{
4263 rb_objspace_t *objspace = &rb_objspace;
4264 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4265}
4266
4267int
4268rb_objspace_garbage_object_p(VALUE obj)
4269{
4270 rb_objspace_t *objspace = &rb_objspace;
4271 return is_garbage_object(objspace, obj);
4272}
4273
4274static VALUE
4275id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
4276{
4277 VALUE orig;
4278 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4279 return orig;
4280 }
4281 else {
4282 return Qundef;
4283 }
4284}
4285
4286/*
4287 * call-seq:
4288 * ObjectSpace._id2ref(object_id) -> an_object
4289 *
4290 * Converts an object id to a reference to the object. May not be
4291 * called on an object id passed as a parameter to a finalizer.
4292 *
4293 * s = "I am a string" #=> "I am a string"
4294 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4295 * r == s #=> true
4296 *
4297 * On multi-ractor mode, if the object is not shareable, it raises
4298 * RangeError.
4299 */
4300
4301static VALUE
4302id2ref(VALUE objid)
4303{
4304#if SIZEOF_LONG == SIZEOF_VOIDP
4305#define NUM2PTR(x) NUM2ULONG(x)
4306#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4307#define NUM2PTR(x) NUM2ULL(x)
4308#endif
4309 rb_objspace_t *objspace = &rb_objspace;
4310 VALUE ptr;
4311 VALUE orig;
4312 void *p0;
4313
4314 objid = rb_to_int(objid);
4315 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4316 ptr = NUM2PTR(objid);
4317 if (ptr == Qtrue) return Qtrue;
4318 if (ptr == Qfalse) return Qfalse;
4319 if (NIL_P(ptr)) return Qnil;
4320 if (FIXNUM_P(ptr)) return (VALUE)ptr;
4321 if (FLONUM_P(ptr)) return (VALUE)ptr;
4322
4323 ptr = obj_id_to_ref(objid);
4324 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
4325 ID symid = ptr / sizeof(RVALUE);
4326 p0 = (void *)ptr;
4327 if (rb_id2str(symid) == 0)
4328 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
4329 return ID2SYM(symid);
4330 }
4331 }
4332
4333 if ((orig = id2ref_obj_tbl(objspace, objid)) != Qundef &&
4334 is_live_object(objspace, orig)) {
4335
4336 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4337 return orig;
4338 }
4339 else {
4340 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4341 }
4342 }
4343
4344 if (rb_int_ge(objid, objspace->next_object_id)) {
4345 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4346 }
4347 else {
4348 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4349 }
4350}
4351
4352static VALUE
4353os_id2ref(VALUE os, VALUE objid)
4354{
4355 return id2ref(objid);
4356}
4357
4358static VALUE
4359rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4360{
4361 if (STATIC_SYM_P(obj)) {
4362 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4363 }
4364 else if (FLONUM_P(obj)) {
4365#if SIZEOF_LONG == SIZEOF_VOIDP
4366 return LONG2NUM((SIGNED_VALUE)obj);
4367#else
4368 return LL2NUM((SIGNED_VALUE)obj);
4369#endif
4370 }
4371 else if (SPECIAL_CONST_P(obj)) {
4372 return LONG2NUM((SIGNED_VALUE)obj);
4373 }
4374
4375 return get_heap_object_id(obj);
4376}
4377
4378static VALUE
4379cached_object_id(VALUE obj)
4380{
4381 VALUE id;
4382 rb_objspace_t *objspace = &rb_objspace;
4383
4384 RB_VM_LOCK_ENTER();
4385 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4386 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
4387 }
4388 else {
4389 GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
4390
4391 id = objspace->next_object_id;
4392 objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
4393
4394 VALUE already_disabled = rb_gc_disable_no_rest();
4395 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4396 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4397 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4398 FL_SET(obj, FL_SEEN_OBJ_ID);
4399 }
4400 RB_VM_LOCK_LEAVE();
4401
4402 return id;
4403}
4404
4405static VALUE
4406nonspecial_obj_id_(VALUE obj)
4407{
4408 return nonspecial_obj_id(obj);
4409}
4410
4411
4412VALUE
4414{
4415 return rb_find_object_id(obj, nonspecial_obj_id_);
4416}
4417
4418/*
4419 * Document-method: __id__
4420 * Document-method: object_id
4421 *
4422 * call-seq:
4423 * obj.__id__ -> integer
4424 * obj.object_id -> integer
4425 *
4426 * Returns an integer identifier for +obj+.
4427 *
4428 * The same number will be returned on all calls to +object_id+ for a given
4429 * object, and no two active objects will share an id.
4430 *
4431 * Note: that some objects of builtin classes are reused for optimization.
4432 * This is the case for immediate values and frozen string literals.
4433 *
4434 * BasicObject implements +__id__+, Kernel implements +object_id+.
4435 *
4436 * Immediate values are not passed by reference but are passed by value:
4437 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4438 *
4439 * Object.new.object_id == Object.new.object_id # => false
4440 * (21 * 2).object_id == (21 * 2).object_id # => true
4441 * "hello".object_id == "hello".object_id # => false
4442 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4443 */
4444
4445VALUE
4446rb_obj_id(VALUE obj)
4447{
4448 /*
4449 * 32-bit VALUE space
4450 * MSB ------------------------ LSB
4451 * false 00000000000000000000000000000000
4452 * true 00000000000000000000000000000010
4453 * nil 00000000000000000000000000000100
4454 * undef 00000000000000000000000000000110
4455 * symbol ssssssssssssssssssssssss00001110
4456 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4457 * fixnum fffffffffffffffffffffffffffffff1
4458 *
4459 * object_id space
4460 * LSB
4461 * false 00000000000000000000000000000000
4462 * true 00000000000000000000000000000010
4463 * nil 00000000000000000000000000000100
4464 * undef 00000000000000000000000000000110
4465 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4466 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4467 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4468 *
4469 * where A = sizeof(RVALUE)/4
4470 *
4471 * sizeof(RVALUE) is
4472 * 20 if 32-bit, double is 4-byte aligned
4473 * 24 if 32-bit, double is 8-byte aligned
4474 * 40 if 64-bit
4475 */
4476
4477 return rb_find_object_id(obj, cached_object_id);
4478}
4479
4480static enum rb_id_table_iterator_result
4481cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
4482{
4483 size_t *total_size = data_ptr;
4484 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
4485 *total_size += sizeof(*ccs);
4486 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
4487 return ID_TABLE_CONTINUE;
4488}
4489
4490static size_t
4491cc_table_memsize(struct rb_id_table *cc_table)
4492{
4493 size_t total = rb_id_table_memsize(cc_table);
4494 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4495 return total;
4496}
4497
4498static size_t
4499obj_memsize_of(VALUE obj, int use_all_types)
4500{
4501 size_t size = 0;
4502
4503 if (SPECIAL_CONST_P(obj)) {
4504 return 0;
4505 }
4506
4507 if (FL_TEST(obj, FL_EXIVAR)) {
4508 size += rb_generic_ivar_memsize(obj);
4509 }
4510
4511 switch (BUILTIN_TYPE(obj)) {
4512 case T_OBJECT:
4513 if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
4514 size += ROBJECT_NUMIV(obj) * sizeof(VALUE);
4515 }
4516 break;
4517 case T_MODULE:
4518 case T_CLASS:
4519 if (RCLASS_EXT(obj)) {
4520 if (RCLASS_M_TBL(obj)) {
4521 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4522 }
4523 if (RCLASS_IV_TBL(obj)) {
4524 size += st_memsize(RCLASS_IV_TBL(obj));
4525 }
4526 if (RCLASS_CVC_TBL(obj)) {
4527 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4528 }
4529 if (RCLASS_IV_INDEX_TBL(obj)) {
4530 // TODO: more correct value
4531 size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
4532 }
4533 if (RCLASS_EXT(obj)->iv_tbl) {
4534 size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
4535 }
4536 if (RCLASS_EXT(obj)->const_tbl) {
4537 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4538 }
4539 if (RCLASS_CC_TBL(obj)) {
4540 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4541 }
4542#if !USE_RVARGC
4543 size += sizeof(rb_classext_t);
4544#endif
4545 }
4546 break;
4547 case T_ICLASS:
4548 if (RICLASS_OWNS_M_TBL_P(obj)) {
4549 if (RCLASS_M_TBL(obj)) {
4550 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4551 }
4552 }
4553 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4554 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4555 }
4556 break;
4557 case T_STRING:
4558 size += rb_str_memsize(obj);
4559 break;
4560 case T_ARRAY:
4561 size += rb_ary_memsize(obj);
4562 break;
4563 case T_HASH:
4564 if (RHASH_AR_TABLE_P(obj)) {
4565 if (RHASH_AR_TABLE(obj) != NULL) {
4566 size_t rb_hash_ar_table_size(void);
4567 size += rb_hash_ar_table_size();
4568 }
4569 }
4570 else {
4571 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4572 size += st_memsize(RHASH_ST_TABLE(obj));
4573 }
4574 break;
4575 case T_REGEXP:
4576 if (RREGEXP_PTR(obj)) {
4577 size += onig_memsize(RREGEXP_PTR(obj));
4578 }
4579 break;
4580 case T_DATA:
4581 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4582 break;
4583 case T_MATCH:
4584 if (RMATCH(obj)->rmatch) {
4585 struct rmatch *rm = RMATCH(obj)->rmatch;
4586 size += onig_region_memsize(&rm->regs);
4587 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
4588 size += sizeof(struct rmatch);
4589 }
4590 break;
4591 case T_FILE:
4592 if (RFILE(obj)->fptr) {
4593 size += rb_io_memsize(RFILE(obj)->fptr);
4594 }
4595 break;
4596 case T_RATIONAL:
4597 case T_COMPLEX:
4598 break;
4599 case T_IMEMO:
4600 size += imemo_memsize(obj);
4601 break;
4602
4603 case T_FLOAT:
4604 case T_SYMBOL:
4605 break;
4606
4607 case T_BIGNUM:
4608 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4609 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
4610 }
4611 break;
4612
4613 case T_NODE:
4614 UNEXPECTED_NODE(obj_memsize_of);
4615 break;
4616
4617 case T_STRUCT:
4618 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4619 RSTRUCT(obj)->as.heap.ptr) {
4620 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
4621 }
4622 break;
4623
4624 case T_ZOMBIE:
4625 case T_MOVED:
4626 break;
4627
4628 default:
4629 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4630 BUILTIN_TYPE(obj), (void*)obj);
4631 }
4632
4633 return size + GET_HEAP_PAGE(obj)->slot_size;
4634}
4635
4636size_t
4637rb_obj_memsize_of(VALUE obj)
4638{
4639 return obj_memsize_of(obj, TRUE);
4640}
4641
4642static int
4643set_zero(st_data_t key, st_data_t val, st_data_t arg)
4644{
4645 VALUE k = (VALUE)key;
4646 VALUE hash = (VALUE)arg;
4647 rb_hash_aset(hash, k, INT2FIX(0));
4648 return ST_CONTINUE;
4649}
4650
4651static VALUE
4652type_sym(size_t type)
4653{
4654 switch (type) {
4655#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4656 COUNT_TYPE(T_NONE);
4657 COUNT_TYPE(T_OBJECT);
4658 COUNT_TYPE(T_CLASS);
4659 COUNT_TYPE(T_MODULE);
4660 COUNT_TYPE(T_FLOAT);
4661 COUNT_TYPE(T_STRING);
4662 COUNT_TYPE(T_REGEXP);
4663 COUNT_TYPE(T_ARRAY);
4664 COUNT_TYPE(T_HASH);
4665 COUNT_TYPE(T_STRUCT);
4666 COUNT_TYPE(T_BIGNUM);
4667 COUNT_TYPE(T_FILE);
4668 COUNT_TYPE(T_DATA);
4669 COUNT_TYPE(T_MATCH);
4670 COUNT_TYPE(T_COMPLEX);
4671 COUNT_TYPE(T_RATIONAL);
4672 COUNT_TYPE(T_NIL);
4673 COUNT_TYPE(T_TRUE);
4674 COUNT_TYPE(T_FALSE);
4675 COUNT_TYPE(T_SYMBOL);
4676 COUNT_TYPE(T_FIXNUM);
4677 COUNT_TYPE(T_IMEMO);
4678 COUNT_TYPE(T_UNDEF);
4679 COUNT_TYPE(T_NODE);
4680 COUNT_TYPE(T_ICLASS);
4681 COUNT_TYPE(T_ZOMBIE);
4682 COUNT_TYPE(T_MOVED);
4683#undef COUNT_TYPE
4684 default: return SIZET2NUM(type); break;
4685 }
4686}
4687
4688/*
4689 * call-seq:
4690 * ObjectSpace.count_objects([result_hash]) -> hash
4691 *
4692 * Counts all objects grouped by type.
4693 *
4694 * It returns a hash, such as:
4695 * {
4696 * :TOTAL=>10000,
4697 * :FREE=>3011,
4698 * :T_OBJECT=>6,
4699 * :T_CLASS=>404,
4700 * # ...
4701 * }
4702 *
4703 * The contents of the returned hash are implementation specific.
4704 * It may be changed in future.
4705 *
4706 * The keys starting with +:T_+ means live objects.
4707 * For example, +:T_ARRAY+ is the number of arrays.
4708 * +:FREE+ means object slots which is not used now.
4709 * +:TOTAL+ means sum of above.
4710 *
4711 * If the optional argument +result_hash+ is given,
4712 * it is overwritten and returned. This is intended to avoid probe effect.
4713 *
4714 * h = {}
4715 * ObjectSpace.count_objects(h)
4716 * puts h
4717 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4718 *
4719 * This method is only expected to work on C Ruby.
4720 *
4721 */
4722
4723static VALUE
4724count_objects(int argc, VALUE *argv, VALUE os)
4725{
4726 rb_objspace_t *objspace = &rb_objspace;
4727 size_t counts[T_MASK+1];
4728 size_t freed = 0;
4729 size_t total = 0;
4730 size_t i;
4731 VALUE hash = Qnil;
4732
4733 if (rb_check_arity(argc, 0, 1) == 1) {
4734 hash = argv[0];
4735 if (!RB_TYPE_P(hash, T_HASH))
4736 rb_raise(rb_eTypeError, "non-hash given");
4737 }
4738
4739 for (i = 0; i <= T_MASK; i++) {
4740 counts[i] = 0;
4741 }
4742
4743 for (i = 0; i < heap_allocated_pages; i++) {
4744 struct heap_page *page = heap_pages_sorted[i];
4745 short stride = page->slot_size;
4746
4747 uintptr_t p = (uintptr_t)page->start;
4748 uintptr_t pend = p + page->total_slots * stride;
4749 for (;p < pend; p += stride) {
4750 VALUE vp = (VALUE)p;
4751 GC_ASSERT((NUM_IN_PAGE(vp) * sizeof(RVALUE)) % page->slot_size == 0);
4752
4753 void *poisoned = asan_poisoned_object_p(vp);
4754 asan_unpoison_object(vp, false);
4755 if (RANY(p)->as.basic.flags) {
4756 counts[BUILTIN_TYPE(vp)]++;
4757 }
4758 else {
4759 freed++;
4760 }
4761 if (poisoned) {
4762 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4763 asan_poison_object(vp);
4764 }
4765 }
4766 total += page->total_slots;
4767 }
4768
4769 if (NIL_P(hash)) {
4770 hash = rb_hash_new();
4771 }
4772 else if (!RHASH_EMPTY_P(hash)) {
4773 rb_hash_stlike_foreach(hash, set_zero, hash);
4774 }
4775 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
4776 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
4777
4778 for (i = 0; i <= T_MASK; i++) {
4779 VALUE type = type_sym(i);
4780 if (counts[i])
4781 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
4782 }
4783
4784 return hash;
4785}
4786
4787/*
4788 ------------------------ Garbage Collection ------------------------
4789*/
4790
4791/* Sweeping */
4792
4793static size_t
4794objspace_available_slots(rb_objspace_t *objspace)
4795{
4796 size_t total_slots = 0;
4797 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
4798 rb_size_pool_t *size_pool = &size_pools[i];
4799 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
4800 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
4801 }
4802 return total_slots;
4803}
4804
4805static size_t
4806objspace_live_slots(rb_objspace_t *objspace)
4807{
4808 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
4809}
4810
4811static size_t
4812objspace_free_slots(rb_objspace_t *objspace)
4813{
4814 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4815}
4816
4817static void
4818gc_setup_mark_bits(struct heap_page *page)
4819{
4820 /* copy oldgen bitmap to mark bitmap */
4821 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
4822}
4823
4824static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
4825static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t slot_size);
4826
4827static void
4828lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
4829{
4830#if defined(_WIN32)
4831 DWORD old_protect;
4832
4833 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
4834#else
4835 if (mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
4836#endif
4837 rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
4838 }
4839 else {
4840 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
4841 }
4842}
4843
4844static void
4845unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
4846{
4847#if defined(_WIN32)
4848 DWORD old_protect;
4849
4850 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
4851#else
4852 if (mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
4853#endif
4854 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
4855 }
4856 else {
4857 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
4858 }
4859}
4860
4861static inline bool
4862try_move_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page, uintptr_t p, bits_t bits, VALUE dest)
4863{
4864 if (bits) {
4865 do {
4866 if (bits & 1) {
4867 /* We're trying to move "p" */
4868 objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)p)]++;
4869
4870 if (gc_is_moveable_obj(objspace, (VALUE)p)) {
4871 /* We were able to move "p" */
4872 objspace->rcompactor.moved_count_table[BUILTIN_TYPE((VALUE)p)]++;
4873 objspace->rcompactor.total_moved++;
4874
4875 bool from_freelist = false;
4876
4877 if (BUILTIN_TYPE(dest) == T_NONE) {
4878 from_freelist = true;
4879 }
4880
4881 gc_move(objspace, (VALUE)p, dest, page->slot_size);
4882 gc_pin(objspace, (VALUE)p);
4883 heap->compact_cursor_index = (RVALUE *)p;
4884 if (from_freelist) {
4885 FL_SET((VALUE)p, FL_FROM_FREELIST);
4886 }
4887
4888 return true;
4889 }
4890 }
4891 p += sizeof(RVALUE);
4892 bits >>= 1;
4893 } while (bits);
4894 }
4895
4896 return false;
4897}
4898
4899static short
4900try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, VALUE dest)
4901{
4902 struct heap_page * cursor = heap->compact_cursor;
4903
4904 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
4905
4906 /* T_NONE objects came from the free list. If the object is *not* a
4907 * T_NONE, it is an object that just got freed but hasn't been
4908 * added to the freelist yet */
4909
4910 while (1) {
4911 size_t index;
4912
4913 bits_t *mark_bits = cursor->mark_bits;
4914 bits_t *pin_bits = cursor->pinned_bits;
4915 RVALUE * p;
4916
4917 if (heap->compact_cursor_index) {
4918 index = BITMAP_INDEX(heap->compact_cursor_index);
4919 p = heap->compact_cursor_index;
4920 GC_ASSERT(cursor == GET_HEAP_PAGE(p));
4921 }
4922 else {
4923 index = 0;
4924 p = cursor->start;
4925 }
4926
4927 bits_t bits = mark_bits[index] & ~pin_bits[index];
4928
4929 bits >>= NUM_IN_PAGE(p);
4930 if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest)) return 1;
4931
4932 if (index == 0) {
4933 p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start));
4934 }
4935 else {
4936 p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start)) + (BITS_BITLENGTH * index);
4937 }
4938
4939 /* Find an object to move and move it. Movable objects must be
4940 * marked, so we iterate using the marking bitmap */
4941 for (size_t i = index + 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4942 bits_t bits = mark_bits[i] & ~pin_bits[i];
4943 if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest)) return 1;
4944 p += BITS_BITLENGTH;
4945 }
4946
4947 /* We couldn't find a movable object on the compact cursor, so lets
4948 * move to the next page (previous page since we are traveling in the
4949 * opposite direction of the sweep cursor) and look there. */
4950
4951 struct heap_page * next;
4952
4953 next = list_prev(&heap->pages, cursor, page_node);
4954
4955 /* Protect the current cursor since it probably has T_MOVED slots. */
4956 lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4957
4958 heap->compact_cursor = next;
4959 heap->compact_cursor_index = 0;
4960 cursor = next;
4961
4962 // Cursors have met, lets quit. We set `heap->compact_cursor` equal
4963 // to `heap->sweeping_page` so we know how far to iterate through
4964 // the heap when unprotecting pages.
4965 if (next == sweep_page) {
4966 break;
4967 }
4968 }
4969
4970 return 0;
4971}
4972
4973static void
4974gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4975{
4976 struct heap_page *cursor = heap->compact_cursor;
4977
4978 while (cursor) {
4979 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4980 cursor = list_next(&heap->pages, cursor, page_node);
4981 }
4982}
4983
4984static void gc_update_references(rb_objspace_t * objspace);
4985static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
4986
4987static void
4988read_barrier_handler(uintptr_t address)
4989{
4990 VALUE obj;
4991 rb_objspace_t * objspace = &rb_objspace;
4992
4993 address -= address % sizeof(RVALUE);
4994
4995 obj = (VALUE)address;
4996
4997 RB_VM_LOCK_ENTER();
4998 {
4999 unlock_page_body(objspace, GET_PAGE_BODY(obj));
5000
5001 objspace->profile.read_barrier_faults++;
5002
5003 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5004 }
5005 RB_VM_LOCK_LEAVE();
5006}
5007
5008#if defined(_WIN32)
5009static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5010typedef void (*signal_handler)(int);
5011static signal_handler old_sigsegv_handler;
5012
5013static LONG WINAPI
5014read_barrier_signal(EXCEPTION_POINTERS * info)
5015{
5016 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5017 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5018 /* > The second array element specifies the virtual address of the inaccessible data.
5019 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5020 *
5021 * Use this address to invalidate the page */
5022 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5023 return EXCEPTION_CONTINUE_EXECUTION;
5024 }
5025 else {
5026 return EXCEPTION_CONTINUE_SEARCH;
5027 }
5028}
5029
5030static void
5031uninstall_handlers(void)
5032{
5033 signal(SIGSEGV, old_sigsegv_handler);
5034 SetUnhandledExceptionFilter(old_handler);
5035}
5036
5037static void
5038install_handlers(void)
5039{
5040 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5041 old_sigsegv_handler = signal(SIGSEGV, NULL);
5042 /* Unhandled Exception Filter has access to the violation address similar
5043 * to si_addr from sigaction */
5044 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5045}
5046#else
5047static struct sigaction old_sigbus_handler;
5048static struct sigaction old_sigsegv_handler;
5049
5050static void
5051read_barrier_signal(int sig, siginfo_t * info, void * data)
5052{
5053 // setup SEGV/BUS handlers for errors
5054 struct sigaction prev_sigbus, prev_sigsegv;
5055 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5056 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5057
5058 // enable SIGBUS/SEGV
5059 sigset_t set, prev_set;
5060 sigemptyset(&set);
5061 sigaddset(&set, SIGBUS);
5062 sigaddset(&set, SIGSEGV);
5063 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5064
5065 // run handler
5066 read_barrier_handler((uintptr_t)info->si_addr);
5067
5068 // reset SEGV/BUS handlers
5069 sigaction(SIGBUS, &prev_sigbus, NULL);
5070 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5071 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5072}
5073
5074static void
5075uninstall_handlers(void)
5076{
5077 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5078 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5079}
5080
5081static void
5082install_handlers(void)
5083{
5084 struct sigaction action;
5085 memset(&action, 0, sizeof(struct sigaction));
5086 sigemptyset(&action.sa_mask);
5087 action.sa_sigaction = read_barrier_signal;
5088 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5089
5090 sigaction(SIGBUS, &action, &old_sigbus_handler);
5091 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5092}
5093#endif
5094
5095static void
5096revert_stack_objects(VALUE stack_obj, void *ctx)
5097{
5098 rb_objspace_t * objspace = (rb_objspace_t*)ctx;
5099
5100 if (BUILTIN_TYPE(stack_obj) == T_MOVED) {
5101 /* For now we'll revert the whole page if the object made it to the
5102 * stack. I think we can change this to move just the one object
5103 * back though */
5104 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5105 }
5106}
5107
5108static void
5109revert_machine_stack_references(rb_objspace_t *objspace, VALUE v)
5110{
5111 if (is_pointer_to_heap(objspace, (void *)v)) {
5112 if (BUILTIN_TYPE(v) == T_MOVED) {
5113 /* For now we'll revert the whole page if the object made it to the
5114 * stack. I think we can change this to move just the one object
5115 * back though */
5116 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5117 }
5118 }
5119}
5120
5121static void each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE));
5122
5123static void
5124check_stack_for_moved(rb_objspace_t *objspace)
5125{
5126 rb_execution_context_t *ec = GET_EC();
5127 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5128 rb_vm_each_stack_value(vm, revert_stack_objects, (void*)objspace);
5129 each_machine_stack_value(ec, revert_machine_stack_references);
5130}
5131
5132static void
5133gc_compact_finish(rb_objspace_t *objspace, rb_size_pool_t *pool, rb_heap_t *heap)
5134{
5135 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5136 rb_size_pool_t *size_pool = &size_pools[i];
5137 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5138 gc_unprotect_pages(objspace, heap);
5139 }
5140
5141 uninstall_handlers();
5142
5143 /* The mutator is allowed to run during incremental sweeping. T_MOVED
5144 * objects can get pushed on the stack and when the compaction process
5145 * finishes up, it may remove the read barrier before anything has a
5146 * chance to read from the T_MOVED address. To fix this, we scan the stack
5147 * then revert any moved objects that made it to the stack. */
5148 check_stack_for_moved(objspace);
5149
5150 gc_update_references(objspace);
5151 objspace->profile.compact_count++;
5152
5153 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5154 rb_size_pool_t *size_pool = &size_pools[i];
5155 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5156 heap->compact_cursor = NULL;
5157 heap->compact_cursor_index = 0;
5158 }
5159
5160 if (gc_prof_enabled(objspace)) {
5161 gc_profile_record *record = gc_prof_record(objspace);
5162 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5163 }
5164 objspace->flags.during_compacting = FALSE;
5165}
5166
5168 struct heap_page *page;
5169 int final_slots;
5170 int freed_slots;
5171 int empty_slots;
5172};
5173
5174static inline void
5175gc_fill_swept_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, bool *finished_compacting, struct gc_sweep_context *ctx)
5176{
5177 struct heap_page * sweep_page = ctx->page;
5178
5179 if (bitset) {
5180 short slot_size = sweep_page->slot_size;
5181 short slot_bits = slot_size / sizeof(RVALUE);
5182
5183 do {
5184 if (bitset & 1) {
5185 VALUE dest = (VALUE)p;
5186
5187 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest));
5188 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
5189
5190 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest);
5191
5192 if (*finished_compacting) {
5193 if (BUILTIN_TYPE(dest) == T_NONE) {
5194 ctx->empty_slots++;
5195 }
5196 else {
5197 ctx->freed_slots++;
5198 }
5199 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)dest, sizeof(RVALUE));
5200 heap_page_add_freeobj(objspace, sweep_page, dest);
5201 }
5202 else {
5203 /* Zombie slots don't get marked, but we can't reuse
5204 * their memory until they have their finalizers run.*/
5205 if (BUILTIN_TYPE(dest) != T_ZOMBIE) {
5206 if (!try_move(objspace, heap, sweep_page, dest)) {
5207 *finished_compacting = true;
5208 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
5209 gc_report(5, objspace, "Quit compacting, couldn't find an object to move\n");
5210 if (BUILTIN_TYPE(dest) == T_NONE) {
5211 ctx->empty_slots++;
5212 }
5213 else {
5214 ctx->freed_slots++;
5215 }
5216 heap_page_add_freeobj(objspace, sweep_page, dest);
5217 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(dest));
5218 }
5219 else {
5220 //moved_slots++;
5221 }
5222 }
5223 }
5224 }
5225 p += slot_size;
5226 bitset >>= slot_bits;
5227 } while (bitset);
5228 }
5229}
5230
5231static bool
5232gc_fill_swept_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, struct gc_sweep_context *ctx)
5233{
5234 /* Find any pinned but not marked objects and try to fill those slots */
5235 bool finished_compacting = false;
5236 bits_t *mark_bits, *pin_bits;
5237 bits_t bitset;
5238 uintptr_t p;
5239
5240 mark_bits = sweep_page->mark_bits;
5241 pin_bits = sweep_page->pinned_bits;
5242
5243 p = (uintptr_t)sweep_page->start;
5244
5245 struct heap_page * cursor = heap->compact_cursor;
5246
5247 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5248
5249 /* *Want to move* objects are pinned but not marked. */
5250 bitset = pin_bits[0] & ~mark_bits[0];
5251 bitset >>= NUM_IN_PAGE(p); // Skip header / dead space bits
5252 gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5253 p += ((BITS_BITLENGTH - NUM_IN_PAGE(p)) * sizeof(RVALUE));
5254
5255 for (int i = 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5256 /* *Want to move* objects are pinned but not marked. */
5257 bitset = pin_bits[i] & ~mark_bits[i];
5258 gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5259 p += ((BITS_BITLENGTH) * sizeof(RVALUE));
5260 }
5261
5262 lock_page_body(objspace, GET_PAGE_BODY(heap->compact_cursor->start));
5263
5264 return finished_compacting;
5265}
5266
5267static inline void
5268gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
5269{
5270 struct heap_page * sweep_page = ctx->page;
5271 short slot_size = sweep_page->slot_size;
5272 short slot_bits = slot_size / sizeof(RVALUE);
5273 GC_ASSERT(slot_bits > 0);
5274
5275 do {
5276 VALUE vp = (VALUE)p;
5277 GC_ASSERT(vp % sizeof(RVALUE) == 0);
5278
5279 asan_unpoison_object(vp, false);
5280 if (bitset & 1) {
5281 switch (BUILTIN_TYPE(vp)) {
5282 default: /* majority case */
5283 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
5284#if RGENGC_CHECK_MODE
5285 if (!is_full_marking(objspace)) {
5286 if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
5287 if (rgengc_remembered_sweep(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
5288 }
5289#endif
5290 if (obj_free(objspace, vp)) {
5291 if (heap->compact_cursor) {
5292 /* We *want* to fill this slot */
5293 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5294 }
5295 else {
5296 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
5297 heap_page_add_freeobj(objspace, sweep_page, vp);
5298 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5299 ctx->freed_slots++;
5300 }
5301 }
5302 else {
5303 ctx->final_slots++;
5304 }
5305 break;
5306
5307 case T_MOVED:
5308 if (objspace->flags.during_compacting) {
5309 /* The sweep cursor shouldn't have made it to any
5310 * T_MOVED slots while the compact flag is enabled.
5311 * The sweep cursor and compact cursor move in
5312 * opposite directions, and when they meet references will
5313 * get updated and "during_compacting" should get disabled */
5314 rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
5315 }
5316 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5317 if (FL_TEST(vp, FL_FROM_FREELIST)) {
5318 ctx->empty_slots++;
5319 }
5320 else {
5321 ctx->freed_slots++;
5322 }
5323 heap_page_add_freeobj(objspace, sweep_page, vp);
5324 break;
5325 case T_ZOMBIE:
5326 /* already counted */
5327 break;
5328 case T_NONE:
5329 if (heap->compact_cursor) {
5330 /* We *want* to fill this slot */
5331 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5332 }
5333 else {
5334 ctx->empty_slots++; /* already freed */
5335 }
5336 break;
5337 }
5338 }
5339 p += slot_size;
5340 bitset >>= slot_bits;
5341 } while (bitset);
5342}
5343
5344static inline void
5345gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct gc_sweep_context *ctx)
5346{
5347 struct heap_page *sweep_page = ctx->page;
5348
5349 int i;
5350
5351 RVALUE *p;
5352 bits_t *bits, bitset;
5353
5354 gc_report(2, objspace, "page_sweep: start.\n");
5355
5356 if (heap->compact_cursor) {
5357 if (sweep_page == heap->compact_cursor) {
5358 /* The compaction cursor and sweep page met, so we need to quit compacting */
5359 gc_report(5, objspace, "Quit compacting, mark and compact cursor met\n");
5360 gc_compact_finish(objspace, size_pool, heap);
5361 }
5362 else {
5363 /* We anticipate filling the page, so NULL out the freelist. */
5364 asan_unpoison_memory_region(&sweep_page->freelist, sizeof(RVALUE*), false);
5365 sweep_page->freelist = NULL;
5366 asan_poison_memory_region(&sweep_page->freelist, sizeof(RVALUE*));
5367 }
5368 }
5369
5370 sweep_page->flags.before_sweep = FALSE;
5371 sweep_page->free_slots = 0;
5372
5373 p = sweep_page->start;
5374 bits = sweep_page->mark_bits;
5375
5376 int page_rvalue_count = sweep_page->total_slots * (size_pool->slot_size / sizeof(RVALUE));
5377 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5378 if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
5379 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5380 }
5381
5382 // Skip out of range slots at the head of the page
5383 bitset = ~bits[0];
5384 bitset >>= NUM_IN_PAGE(p);
5385 if (bitset) {
5386 gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5387 }
5388 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5389
5390 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5391 bitset = ~bits[i];
5392 if (bitset) {
5393 gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5394 }
5395 p += BITS_BITLENGTH;
5396 }
5397
5398 if (heap->compact_cursor) {
5399 if (gc_fill_swept_page(objspace, heap, sweep_page, ctx)) {
5400 gc_compact_finish(objspace, size_pool, heap);
5401 }
5402 }
5403
5404 if (!heap->compact_cursor) {
5405 gc_setup_mark_bits(sweep_page);
5406 }
5407
5408#if GC_PROFILE_MORE_DETAIL
5409 if (gc_prof_enabled(objspace)) {
5410 gc_profile_record *record = gc_prof_record(objspace);
5411 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5412 record->empty_objects += ctx->empty_slots;
5413 }
5414#endif
5415 if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5416 rb_gc_count(),
5417 sweep_page->total_slots,
5418 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5419
5420 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5421 objspace->profile.total_freed_objects += ctx->freed_slots;
5422
5423 if (heap_pages_deferred_final && !finalizing) {
5424 rb_thread_t *th = GET_THREAD();
5425 if (th) {
5426 gc_finalize_deferred_register(objspace);
5427 }
5428 }
5429
5430#if RGENGC_CHECK_MODE
5431 short freelist_len = 0;
5432 RVALUE *ptr = sweep_page->freelist;
5433 while (ptr) {
5434 freelist_len++;
5435 ptr = ptr->as.free.next;
5436 }
5437 if (freelist_len != sweep_page->free_slots) {
5438 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5439 }
5440#endif
5441
5442 gc_report(2, objspace, "page_sweep: end.\n");
5443}
5444
5445#if !USE_RVARGC
5446/* allocate additional minimum page to work */
5447static void
5448gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5449{
5450 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5451 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5452 /* there is no free after page_sweep() */
5453 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5454 if (!heap_increment(objspace, size_pool, heap)) { /* can't allocate additional free objects */
5455 rb_memerror();
5456 }
5457 }
5458 }
5459}
5460#endif
5461
5462static const char *
5463gc_mode_name(enum gc_mode mode)
5464{
5465 switch (mode) {
5466 case gc_mode_none: return "none";
5467 case gc_mode_marking: return "marking";
5468 case gc_mode_sweeping: return "sweeping";
5469 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5470 }
5471}
5472
5473static void
5474gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5475{
5476#if RGENGC_CHECK_MODE
5477 enum gc_mode prev_mode = gc_mode(objspace);
5478 switch (prev_mode) {
5479 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5480 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
5481 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
5482 }
5483#endif
5484 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5485 gc_mode_set(objspace, mode);
5486}
5487
5488static void
5489heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
5490{
5491 if (freelist) {
5492 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
5493 if (page->freelist) {
5494 RVALUE *p = page->freelist;
5495 asan_unpoison_object((VALUE)p, false);
5496 while (p->as.free.next) {
5497 RVALUE *prev = p;
5498 p = p->as.free.next;
5499 asan_poison_object((VALUE)prev);
5500 asan_unpoison_object((VALUE)p, false);
5501 }
5502 p->as.free.next = freelist;
5503 asan_poison_object((VALUE)p);
5504 }
5505 else {
5506 page->freelist = freelist;
5507 }
5508 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
5509 }
5510}
5511
5512static void
5513gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5514{
5515 heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
5516 heap->free_pages = NULL;
5517#if GC_ENABLE_INCREMENTAL_MARK
5518 heap->pooled_pages = NULL;
5519#endif
5520}
5521
5522#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5523__attribute__((noinline))
5524#endif
5525static void
5526gc_sweep_start(rb_objspace_t *objspace)
5527{
5528 gc_mode_transition(objspace, gc_mode_sweeping);
5529
5530#if GC_ENABLE_INCREMENTAL_MARK
5531 objspace->rincgc.pooled_slots = 0;
5532#endif
5533
5534 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5535 rb_size_pool_t *size_pool = &size_pools[i];
5536
5537 gc_sweep_start_heap(objspace, SIZE_POOL_EDEN_HEAP(size_pool));
5538 }
5539
5540 rb_ractor_t *r = NULL;
5541 list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5542 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5543 }
5544}
5545
5546#if USE_RVARGC
5547static void
5548gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
5549{
5550 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5551 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5552 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5553 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5554
5555 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5556
5557 if (swept_slots < min_free_slots) {
5558 bool grow_heap = is_full_marking(objspace);
5559
5560 if (!is_full_marking(objspace)) {
5561 /* The heap is a growth heap if it freed more slots than had empty slots. */
5562 bool is_growth_heap = size_pool->empty_slots == 0 ||
5563 size_pool->freed_slots > size_pool->empty_slots;
5564
5565 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5566 grow_heap = TRUE;
5567 }
5568 else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
5569 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5570 size_pool->force_major_gc_count++;
5571 }
5572 }
5573
5574 if (grow_heap) {
5575 size_t extend_page_count = heap_extend_pages(objspace, swept_slots, total_slots, total_pages);
5576
5577 if (extend_page_count > size_pool->allocatable_pages) {
5578 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5579 }
5580
5581 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5582 }
5583 }
5584}
5585#endif
5586
5587static void
5588gc_sweep_finish(rb_objspace_t *objspace)
5589{
5590 gc_report(1, objspace, "gc_sweep_finish\n");
5591
5592 gc_prof_set_heap_info(objspace);
5593 heap_pages_free_unused_pages(objspace);
5594
5595 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5596 rb_size_pool_t *size_pool = &size_pools[i];
5597
5598 /* if heap_pages has unused pages, then assign them to increment */
5599 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5600 if (size_pool->allocatable_pages < tomb_pages) {
5601 size_pool->allocatable_pages = tomb_pages;
5602 }
5603
5604#if USE_RVARGC
5605 size_pool->freed_slots = 0;
5606 size_pool->empty_slots = 0;
5607
5608#if GC_ENABLE_INCREMENTAL_MARK
5609 if (!will_be_incremental_marking(objspace)) {
5610 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5611 struct heap_page *end_page = eden_heap->free_pages;
5612 if (end_page) {
5613 while (end_page->free_next) end_page = end_page->free_next;
5614 end_page->free_next = eden_heap->pooled_pages;
5615 }
5616 else {
5617 eden_heap->free_pages = eden_heap->pooled_pages;
5618 }
5619 eden_heap->pooled_pages = NULL;
5620 objspace->rincgc.pooled_slots = 0;
5621 }
5622#endif
5623#endif
5624 }
5625 heap_pages_expand_sorted(objspace);
5626
5627 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
5628 gc_mode_transition(objspace, gc_mode_none);
5629
5630#if RGENGC_CHECK_MODE >= 2
5631 gc_verify_internal_consistency(objspace);
5632#endif
5633}
5634
5635static int
5636gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5637{
5638 struct heap_page *sweep_page = heap->sweeping_page;
5639 int unlink_limit = 3;
5640
5641#if GC_ENABLE_INCREMENTAL_MARK
5642 int swept_slots = 0;
5643#if USE_RVARGC
5644 bool need_pool = TRUE;
5645#else
5646 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5647#endif
5648
5649 gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
5650#else
5651 gc_report(2, objspace, "gc_sweep_step\n");
5652#endif
5653
5654 if (sweep_page == NULL) return FALSE;
5655
5656#if GC_ENABLE_LAZY_SWEEP
5657 gc_prof_sweep_timer_start(objspace);
5658#endif
5659
5660 do {
5661 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
5662
5663 struct gc_sweep_context ctx = {
5664 .page = sweep_page,
5665 .final_slots = 0,
5666 .freed_slots = 0,
5667 .empty_slots = 0,
5668 };
5669 gc_sweep_page(objspace, size_pool, heap, &ctx);
5670 int free_slots = ctx.freed_slots + ctx.empty_slots;
5671
5672 heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
5673
5674 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5675 heap_pages_freeable_pages > 0 &&
5676 unlink_limit > 0) {
5677 heap_pages_freeable_pages--;
5678 unlink_limit--;
5679 /* there are no living objects -> move this page to tomb heap */
5680 heap_unlink_page(objspace, heap, sweep_page);
5681 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
5682 }
5683 else if (free_slots > 0) {
5684#if USE_RVARGC
5685 size_pool->freed_slots += ctx.freed_slots;
5686 size_pool->empty_slots += ctx.empty_slots;
5687#endif
5688
5689#if GC_ENABLE_INCREMENTAL_MARK
5690 if (need_pool) {
5691 heap_add_poolpage(objspace, heap, sweep_page);
5692 need_pool = FALSE;
5693 }
5694 else {
5695 heap_add_freepage(heap, sweep_page);
5696 swept_slots += free_slots;
5697 if (swept_slots > 2048) {
5698 break;
5699 }
5700 }
5701#else
5702 heap_add_freepage(heap, sweep_page);
5703 break;
5704#endif
5705 }
5706 else {
5707 sweep_page->free_next = NULL;
5708 }
5709 } while ((sweep_page = heap->sweeping_page));
5710
5711 if (!heap->sweeping_page) {
5712#if USE_RVARGC
5713 gc_sweep_finish_size_pool(objspace, size_pool);
5714#endif
5715
5716 if (!has_sweeping_pages(objspace)) {
5717 gc_sweep_finish(objspace);
5718 }
5719 }
5720
5721#if GC_ENABLE_LAZY_SWEEP
5722 gc_prof_sweep_timer_stop(objspace);
5723#endif
5724
5725 return heap->free_pages != NULL;
5726}
5727
5728static void
5729gc_sweep_rest(rb_objspace_t *objspace)
5730{
5731 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5732 rb_size_pool_t *size_pool = &size_pools[i];
5733
5734 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
5735 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5736 }
5737 }
5738}
5739
5740static void
5741gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
5742{
5743 GC_ASSERT(dont_gc_val() == FALSE);
5744 if (!GC_ENABLE_LAZY_SWEEP) return;
5745
5746 unsigned int lock_lev;
5747 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
5748
5749 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5750 rb_size_pool_t *size_pool = &size_pools[i];
5751 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
5752#if USE_RVARGC
5753 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
5754 if (size_pool == sweep_size_pool) {
5755 if (size_pool->allocatable_pages > 0) {
5756 heap_increment(objspace, size_pool, heap);
5757 }
5758 else {
5759 /* Not allowed to create a new page so finish sweeping. */
5760 gc_sweep_rest(objspace);
5761 break;
5762 }
5763 }
5764#endif
5765 }
5766 }
5767
5768 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
5769}
5770
5771static void
5772invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
5773{
5774 if (bitset) {
5775 do {
5776 if (bitset & 1) {
5777 VALUE forwarding_object = (VALUE)p;
5778 VALUE object;
5779
5780 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
5781 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
5782 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5783
5784 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
5785
5786 bool from_freelist = FL_TEST_RAW(forwarding_object, FL_FROM_FREELIST);
5787 object = rb_gc_location(forwarding_object);
5788
5789 gc_move(objspace, object, forwarding_object, page->slot_size);
5790 /* forwarding_object is now our actual object, and "object"
5791 * is the free slot for the original page */
5792 struct heap_page *orig_page = GET_HEAP_PAGE(object);
5793 orig_page->free_slots++;
5794 if (!from_freelist) {
5795 objspace->profile.total_freed_objects++;
5796 }
5797 heap_page_add_freeobj(objspace, orig_page, object);
5798
5799 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5800 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
5801 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
5802 }
5803 }
5804 p += sizeof(RVALUE);
5805 bitset >>= 1;
5806 } while (bitset);
5807 }
5808}
5809
5810static void
5811invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
5812{
5813 int i;
5814 bits_t *mark_bits, *pin_bits;
5815 bits_t bitset;
5816 RVALUE *p;
5817
5818 mark_bits = page->mark_bits;
5819 pin_bits = page->pinned_bits;
5820
5821 p = page->start;
5822
5823 // Skip out of range slots at the head of the page
5824 bitset = pin_bits[0] & ~mark_bits[0];
5825 bitset >>= NUM_IN_PAGE(p);
5826 invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5827 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5828
5829 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5830 /* Moved objects are pinned but never marked. We reuse the pin bits
5831 * to indicate there is a moved object in this slot. */
5832 bitset = pin_bits[i] & ~mark_bits[i];
5833
5834 invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5835 p += BITS_BITLENGTH;
5836 }
5837}
5838
5839static void
5840gc_compact_start(rb_objspace_t *objspace)
5841{
5842 struct heap_page *page = NULL;
5843
5844 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5845 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
5846 list_for_each(&heap->pages, page, page_node) {
5847 page->flags.before_sweep = TRUE;
5848 }
5849
5850 heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
5851 heap->compact_cursor_index = 0;
5852 }
5853
5854 if (gc_prof_enabled(objspace)) {
5855 gc_profile_record *record = gc_prof_record(objspace);
5856 record->moved_objects = objspace->rcompactor.total_moved;
5857 }
5858
5859 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
5860 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
5861
5862 /* Set up read barrier for pages containing MOVED objects */
5863 install_handlers();
5864}
5865
5866static void
5867gc_sweep(rb_objspace_t *objspace)
5868{
5869 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
5870
5871 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
5872
5873 if (immediate_sweep) {
5874#if !GC_ENABLE_LAZY_SWEEP
5875 gc_prof_sweep_timer_start(objspace);
5876#endif
5877 gc_sweep_start(objspace);
5878 if (objspace->flags.during_compacting) {
5879 gc_compact_start(objspace);
5880 }
5881
5882 gc_sweep_rest(objspace);
5883#if !GC_ENABLE_LAZY_SWEEP
5884 gc_prof_sweep_timer_stop(objspace);
5885#endif
5886 }
5887 else {
5888 struct heap_page *page = NULL;
5889 gc_sweep_start(objspace);
5890
5891 if (ruby_enable_autocompact && is_full_marking(objspace)) {
5892 gc_compact_start(objspace);
5893 }
5894
5895 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5896 list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
5897 page->flags.before_sweep = TRUE;
5898 }
5899 }
5900
5901 /* Sweep every size pool. */
5902 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5903 rb_size_pool_t *size_pool = &size_pools[i];
5904 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5905 }
5906 }
5907
5908#if !USE_RVARGC
5909 rb_size_pool_t *size_pool = &size_pools[0];
5910 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5911#endif
5912}
5913
5914/* Marking - Marking stack */
5915
5916static stack_chunk_t *
5917stack_chunk_alloc(void)
5918{
5919 stack_chunk_t *res;
5920
5921 res = malloc(sizeof(stack_chunk_t));
5922 if (!res)
5923 rb_memerror();
5924
5925 return res;
5926}
5927
5928static inline int
5929is_mark_stack_empty(mark_stack_t *stack)
5930{
5931 return stack->chunk == NULL;
5932}
5933
5934static size_t
5935mark_stack_size(mark_stack_t *stack)
5936{
5937 size_t size = stack->index;
5938 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
5939
5940 while (chunk) {
5941 size += stack->limit;
5942 chunk = chunk->next;
5943 }
5944 return size;
5945}
5946
5947static void
5948add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
5949{
5950 chunk->next = stack->cache;
5951 stack->cache = chunk;
5952 stack->cache_size++;
5953}
5954
5955static void
5956shrink_stack_chunk_cache(mark_stack_t *stack)
5957{
5958 stack_chunk_t *chunk;
5959
5960 if (stack->unused_cache_size > (stack->cache_size/2)) {
5961 chunk = stack->cache;
5962 stack->cache = stack->cache->next;
5963 stack->cache_size--;
5964 free(chunk);
5965 }
5966 stack->unused_cache_size = stack->cache_size;
5967}
5968
5969static void
5970push_mark_stack_chunk(mark_stack_t *stack)
5971{
5972 stack_chunk_t *next;
5973
5974 GC_ASSERT(stack->index == stack->limit);
5975
5976 if (stack->cache_size > 0) {
5977 next = stack->cache;
5978 stack->cache = stack->cache->next;
5979 stack->cache_size--;
5980 if (stack->unused_cache_size > stack->cache_size)
5981 stack->unused_cache_size = stack->cache_size;
5982 }
5983 else {
5984 next = stack_chunk_alloc();
5985 }
5986 next->next = stack->chunk;
5987 stack->chunk = next;
5988 stack->index = 0;
5989}
5990
5991static void
5992pop_mark_stack_chunk(mark_stack_t *stack)
5993{
5994 stack_chunk_t *prev;
5995
5996 prev = stack->chunk->next;
5997 GC_ASSERT(stack->index == 0);
5998 add_stack_chunk_cache(stack, stack->chunk);
5999 stack->chunk = prev;
6000 stack->index = stack->limit;
6001}
6002
6003static void
6004mark_stack_chunk_list_free(stack_chunk_t *chunk)
6005{
6006 stack_chunk_t *next = NULL;
6007
6008 while (chunk != NULL) {
6009 next = chunk->next;
6010 free(chunk);
6011 chunk = next;
6012 }
6013}
6014
6015static void
6016free_stack_chunks(mark_stack_t *stack)
6017{
6018 mark_stack_chunk_list_free(stack->chunk);
6019}
6020
6021static void
6022mark_stack_free_cache(mark_stack_t *stack)
6023{
6024 mark_stack_chunk_list_free(stack->cache);
6025 stack->cache_size = 0;
6026 stack->unused_cache_size = 0;
6027}
6028
6029static void
6030push_mark_stack(mark_stack_t *stack, VALUE data)
6031{
6032 VALUE obj = data;
6033 switch (BUILTIN_TYPE(obj)) {
6034 case T_OBJECT:
6035 case T_CLASS:
6036 case T_MODULE:
6037 case T_FLOAT:
6038 case T_STRING:
6039 case T_REGEXP:
6040 case T_ARRAY:
6041 case T_HASH:
6042 case T_STRUCT:
6043 case T_BIGNUM:
6044 case T_FILE:
6045 case T_DATA:
6046 case T_MATCH:
6047 case T_COMPLEX:
6048 case T_RATIONAL:
6049 case T_TRUE:
6050 case T_FALSE:
6051 case T_SYMBOL:
6052 case T_IMEMO:
6053 case T_ICLASS:
6054 if (stack->index == stack->limit) {
6055 push_mark_stack_chunk(stack);
6056 }
6057 stack->chunk->data[stack->index++] = data;
6058 return;
6059
6060 case T_NONE:
6061 case T_NIL:
6062 case T_FIXNUM:
6063 case T_MOVED:
6064 case T_ZOMBIE:
6065 case T_UNDEF:
6066 case T_MASK:
6067 rb_bug("push_mark_stack() called for broken object");
6068 break;
6069
6070 case T_NODE:
6071 UNEXPECTED_NODE(push_mark_stack);
6072 break;
6073 }
6074
6075 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6076 BUILTIN_TYPE(obj), (void *)data,
6077 is_pointer_to_heap(&rb_objspace, (void *)data) ? "corrupted object" : "non object");
6078}
6079
6080static int
6081pop_mark_stack(mark_stack_t *stack, VALUE *data)
6082{
6083 if (is_mark_stack_empty(stack)) {
6084 return FALSE;
6085 }
6086 if (stack->index == 1) {
6087 *data = stack->chunk->data[--stack->index];
6088 pop_mark_stack_chunk(stack);
6089 }
6090 else {
6091 *data = stack->chunk->data[--stack->index];
6092 }
6093 return TRUE;
6094}
6095
6096static void
6097init_mark_stack(mark_stack_t *stack)
6098{
6099 int i;
6100
6101 MEMZERO(stack, mark_stack_t, 1);
6102 stack->index = stack->limit = STACK_CHUNK_SIZE;
6103
6104 for (i=0; i < 4; i++) {
6105 add_stack_chunk_cache(stack, stack_chunk_alloc());
6106 }
6107 stack->unused_cache_size = stack->cache_size;
6108}
6109
6110/* Marking */
6111
6112#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6113
6114#define STACK_START (ec->machine.stack_start)
6115#define STACK_END (ec->machine.stack_end)
6116#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6117
6118#if STACK_GROW_DIRECTION < 0
6119# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6120#elif STACK_GROW_DIRECTION > 0
6121# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6122#else
6123# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6124 : (size_t)(STACK_END - STACK_START + 1))
6125#endif
6126#if !STACK_GROW_DIRECTION
6127int ruby_stack_grow_direction;
6128int
6129ruby_get_stack_grow_direction(volatile VALUE *addr)
6130{
6131 VALUE *end;
6132 SET_MACHINE_STACK_END(&end);
6133
6134 if (end > addr) return ruby_stack_grow_direction = 1;
6135 return ruby_stack_grow_direction = -1;
6136}
6137#endif
6138
6139size_t
6141{
6142 rb_execution_context_t *ec = GET_EC();
6143 SET_STACK_END;
6144 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6145 return STACK_LENGTH;
6146}
6147
6148#define PREVENT_STACK_OVERFLOW 1
6149#ifndef PREVENT_STACK_OVERFLOW
6150#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6151# define PREVENT_STACK_OVERFLOW 1
6152#else
6153# define PREVENT_STACK_OVERFLOW 0
6154#endif
6155#endif
6156#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6157static int
6158stack_check(rb_execution_context_t *ec, int water_mark)
6159{
6160 SET_STACK_END;
6161
6162 size_t length = STACK_LENGTH;
6163 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6164
6165 return length > maximum_length;
6166}
6167#else
6168#define stack_check(ec, water_mark) FALSE
6169#endif
6170
6171#define STACKFRAME_FOR_CALL_CFUNC 2048
6172
6173MJIT_FUNC_EXPORTED int
6174rb_ec_stack_check(rb_execution_context_t *ec)
6175{
6176 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6177}
6178
6179int
6181{
6182 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6183}
6184
6185ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE)));
6186static void
6187each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE))
6188{
6189 VALUE v;
6190 while (n--) {
6191 v = *x;
6192 cb(objspace, v);
6193 x++;
6194 }
6195}
6196
6197static void
6198gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end, void (*cb)(rb_objspace_t *, VALUE))
6199{
6200 long n;
6201
6202 if (end <= start) return;
6203 n = end - start;
6204 each_location(objspace, start, n, cb);
6205}
6206
6207void
6208rb_gc_mark_locations(const VALUE *start, const VALUE *end)
6209{
6210 gc_mark_locations(&rb_objspace, start, end, gc_mark_maybe);
6211}
6212
6213static void
6214gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
6215{
6216 long i;
6217
6218 for (i=0; i<n; i++) {
6219 gc_mark(objspace, values[i]);
6220 }
6221}
6222
6223void
6224rb_gc_mark_values(long n, const VALUE *values)
6225{
6226 long i;
6227 rb_objspace_t *objspace = &rb_objspace;
6228
6229 for (i=0; i<n; i++) {
6230 gc_mark_and_pin(objspace, values[i]);
6231 }
6232}
6233
6234static void
6235gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
6236{
6237 long i;
6238
6239 for (i=0; i<n; i++) {
6240 if (is_markable_object(objspace, values[i])) {
6241 gc_mark_and_pin(objspace, values[i]);
6242 }
6243 }
6244}
6245
6246void
6247rb_gc_mark_vm_stack_values(long n, const VALUE *values)
6248{
6249 rb_objspace_t *objspace = &rb_objspace;
6250 gc_mark_stack_values(objspace, n, values);
6251}
6252
6253static int
6254mark_value(st_data_t key, st_data_t value, st_data_t data)
6255{
6256 rb_objspace_t *objspace = (rb_objspace_t *)data;
6257 gc_mark(objspace, (VALUE)value);
6258 return ST_CONTINUE;
6259}
6260
6261static int
6262mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6263{
6264 rb_objspace_t *objspace = (rb_objspace_t *)data;
6265 gc_mark_and_pin(objspace, (VALUE)value);
6266 return ST_CONTINUE;
6267}
6268
6269static void
6270mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
6271{
6272 if (!tbl || tbl->num_entries == 0) return;
6273 st_foreach(tbl, mark_value, (st_data_t)objspace);
6274}
6275
6276static void
6277mark_tbl(rb_objspace_t *objspace, st_table *tbl)
6278{
6279 if (!tbl || tbl->num_entries == 0) return;
6280 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6281}
6282
6283static int
6284mark_key(st_data_t key, st_data_t value, st_data_t data)
6285{
6286 rb_objspace_t *objspace = (rb_objspace_t *)data;
6287 gc_mark_and_pin(objspace, (VALUE)key);
6288 return ST_CONTINUE;
6289}
6290
6291static void
6292mark_set(rb_objspace_t *objspace, st_table *tbl)
6293{
6294 if (!tbl) return;
6295 st_foreach(tbl, mark_key, (st_data_t)objspace);
6296}
6297
6298static int
6299pin_value(st_data_t key, st_data_t value, st_data_t data)
6300{
6301 rb_objspace_t *objspace = (rb_objspace_t *)data;
6302 gc_mark_and_pin(objspace, (VALUE)value);
6303 return ST_CONTINUE;
6304}
6305
6306static void
6307mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
6308{
6309 if (!tbl) return;
6310 st_foreach(tbl, pin_value, (st_data_t)objspace);
6311}
6312
6313void
6315{
6316 mark_set(&rb_objspace, tbl);
6317}
6318
6319static int
6320mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6321{
6322 rb_objspace_t *objspace = (rb_objspace_t *)data;
6323
6324 gc_mark(objspace, (VALUE)key);
6325 gc_mark(objspace, (VALUE)value);
6326 return ST_CONTINUE;
6327}
6328
6329static int
6330pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6331{
6332 rb_objspace_t *objspace = (rb_objspace_t *)data;
6333
6334 gc_mark_and_pin(objspace, (VALUE)key);
6335 gc_mark_and_pin(objspace, (VALUE)value);
6336 return ST_CONTINUE;
6337}
6338
6339static int
6340pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6341{
6342 rb_objspace_t *objspace = (rb_objspace_t *)data;
6343
6344 gc_mark_and_pin(objspace, (VALUE)key);
6345 gc_mark(objspace, (VALUE)value);
6346 return ST_CONTINUE;
6347}
6348
6349static void
6350mark_hash(rb_objspace_t *objspace, VALUE hash)
6351{
6352 if (rb_hash_compare_by_id_p(hash)) {
6353 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6354 }
6355 else {
6356 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6357 }
6358
6359 if (RHASH_AR_TABLE_P(hash)) {
6360 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6361 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6362 }
6363 }
6364 else {
6365 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6366 }
6367 gc_mark(objspace, RHASH(hash)->ifnone);
6368}
6369
6370static void
6371mark_st(rb_objspace_t *objspace, st_table *tbl)
6372{
6373 if (!tbl) return;
6374 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6375}
6376
6377void
6379{
6380 mark_st(&rb_objspace, tbl);
6381}
6382
6383static void
6384mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
6385{
6386 const rb_method_definition_t *def = me->def;
6387
6388 gc_mark(objspace, me->owner);
6389 gc_mark(objspace, me->defined_class);
6390
6391 if (def) {
6392 switch (def->type) {
6393 case VM_METHOD_TYPE_ISEQ:
6394 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
6395 gc_mark(objspace, (VALUE)def->body.iseq.cref);
6396
6397 if (def->iseq_overload && me->defined_class) {
6398 // it can be a key of "overloaded_cme" table
6399 // so it should be pinned.
6400 gc_mark_and_pin(objspace, (VALUE)me);
6401 }
6402 break;
6403 case VM_METHOD_TYPE_ATTRSET:
6404 case VM_METHOD_TYPE_IVAR:
6405 gc_mark(objspace, def->body.attr.location);
6406 break;
6407 case VM_METHOD_TYPE_BMETHOD:
6408 gc_mark(objspace, def->body.bmethod.proc);
6409 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6410 break;
6411 case VM_METHOD_TYPE_ALIAS:
6412 gc_mark(objspace, (VALUE)def->body.alias.original_me);
6413 return;
6414 case VM_METHOD_TYPE_REFINED:
6415 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6416 gc_mark(objspace, (VALUE)def->body.refined.owner);
6417 break;
6418 case VM_METHOD_TYPE_CFUNC:
6419 case VM_METHOD_TYPE_ZSUPER:
6420 case VM_METHOD_TYPE_MISSING:
6421 case VM_METHOD_TYPE_OPTIMIZED:
6422 case VM_METHOD_TYPE_UNDEF:
6423 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6424 break;
6425 }
6426 }
6427}
6428
6429static enum rb_id_table_iterator_result
6430mark_method_entry_i(VALUE me, void *data)
6431{
6432 rb_objspace_t *objspace = (rb_objspace_t *)data;
6433
6434 gc_mark(objspace, me);
6435 return ID_TABLE_CONTINUE;
6436}
6437
6438static void
6439mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6440{
6441 if (tbl) {
6442 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6443 }
6444}
6445
6446static enum rb_id_table_iterator_result
6447mark_const_entry_i(VALUE value, void *data)
6448{
6449 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
6450 rb_objspace_t *objspace = data;
6451
6452 gc_mark(objspace, ce->value);
6453 gc_mark(objspace, ce->file);
6454 return ID_TABLE_CONTINUE;
6455}
6456
6457static void
6458mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6459{
6460 if (!tbl) return;
6461 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6462}
6463
6464#if STACK_GROW_DIRECTION < 0
6465#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6466#elif STACK_GROW_DIRECTION > 0
6467#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6468#else
6469#define GET_STACK_BOUNDS(start, end, appendix) \
6470 ((STACK_END < STACK_START) ? \
6471 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6472#endif
6473
6474static void each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6475 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE));
6476
6477#ifndef __EMSCRIPTEN__
6478static void
6479mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6480{
6481 union {
6482 rb_jmp_buf j;
6483 VALUE v[sizeof(rb_jmp_buf) / (sizeof(VALUE))];
6484 } save_regs_gc_mark;
6485 VALUE *stack_start, *stack_end;
6486
6487 FLUSH_REGISTER_WINDOWS;
6488 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
6489 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6490 rb_setjmp(save_regs_gc_mark.j);
6491
6492 /* SET_STACK_END must be called in this function because
6493 * the stack frame of this function may contain
6494 * callee save registers and they should be marked. */
6495 SET_STACK_END;
6496 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6497
6498 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6499
6500 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6501}
6502#else
6503
6504static VALUE *rb_emscripten_stack_range_tmp[2];
6505
6506static void
6507rb_emscripten_mark_locations(void *begin, void *end)
6508{
6509 rb_emscripten_stack_range_tmp[0] = begin;
6510 rb_emscripten_stack_range_tmp[1] = end;
6511}
6512
6513static void
6514mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6515{
6516 emscripten_scan_stack(rb_emscripten_mark_locations);
6517 each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6518
6519 emscripten_scan_registers(rb_emscripten_mark_locations);
6520 each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6521}
6522#endif
6523
6524static void
6525each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE))
6526{
6527 rb_objspace_t *objspace = &rb_objspace;
6528 VALUE *stack_start, *stack_end;
6529
6530 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6531 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6532}
6533
6534void
6535rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
6536{
6537 each_machine_stack_value(ec, gc_mark_maybe);
6538}
6539
6540static void
6541each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6542 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE))
6543{
6544
6545 gc_mark_locations(objspace, stack_start, stack_end, cb);
6546
6547#if defined(__mc68000__)
6548 gc_mark_locations(objspace,
6549 (VALUE*)((char*)stack_start + 2),
6550 (VALUE*)((char*)stack_end - 2), cb);
6551#endif
6552}
6553
6554void
6556{
6557 mark_tbl(&rb_objspace, tbl);
6558}
6559
6560void
6562{
6563 mark_tbl_no_pin(&rb_objspace, tbl);
6564}
6565
6566static void
6567gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
6568{
6569 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
6570
6571 if (is_pointer_to_heap(objspace, (void *)obj)) {
6572 void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
6573 asan_unpoison_object(obj, false);
6574
6575 /* Garbage can live on the stack, so do not mark or pin */
6576 switch (BUILTIN_TYPE(obj)) {
6577 case T_ZOMBIE:
6578 case T_NONE:
6579 break;
6580 default:
6581 gc_mark_and_pin(objspace, obj);
6582 break;
6583 }
6584
6585 if (ptr) {
6586 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6587 asan_poison_object(obj);
6588 }
6589 }
6590}
6591
6592void
6594{
6595 gc_mark_maybe(&rb_objspace, obj);
6596}
6597
6598static inline int
6599gc_mark_set(rb_objspace_t *objspace, VALUE obj)
6600{
6601 ASSERT_vm_locking();
6602 if (RVALUE_MARKED(obj)) return 0;
6603 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6604 return 1;
6605}
6606
6607static int
6608gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
6609{
6610 struct heap_page *page = GET_HEAP_PAGE(obj);
6611 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6612
6613 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6614 page->flags.has_uncollectible_shady_objects = TRUE;
6615 MARK_IN_BITMAP(uncollectible_bits, obj);
6616 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6617
6618#if RGENGC_PROFILE > 0
6619 objspace->profile.total_remembered_shady_object_count++;
6620#if RGENGC_PROFILE >= 2
6621 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
6622#endif
6623#endif
6624 return TRUE;
6625 }
6626 else {
6627 return FALSE;
6628 }
6629}
6630
6631static void
6632rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
6633{
6634 const VALUE old_parent = objspace->rgengc.parent_object;
6635
6636 if (old_parent) { /* parent object is old */
6637 if (RVALUE_WB_UNPROTECTED(obj)) {
6638 if (gc_remember_unprotected(objspace, obj)) {
6639 gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6640 }
6641 }
6642 else {
6643 if (!RVALUE_OLD_P(obj)) {
6644 if (RVALUE_MARKED(obj)) {
6645 /* An object pointed from an OLD object should be OLD. */
6646 gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6647 RVALUE_AGE_SET_OLD(objspace, obj);
6648 if (is_incremental_marking(objspace)) {
6649 if (!RVALUE_MARKING(obj)) {
6650 gc_grey(objspace, obj);
6651 }
6652 }
6653 else {
6654 rgengc_remember(objspace, obj);
6655 }
6656 }
6657 else {
6658 gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6659 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6660 }
6661 }
6662 }
6663 }
6664
6665 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6666}
6667
6668static void
6669gc_grey(rb_objspace_t *objspace, VALUE obj)
6670{
6671#if RGENGC_CHECK_MODE
6672 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
6673 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
6674#endif
6675
6676#if GC_ENABLE_INCREMENTAL_MARK
6677 if (is_incremental_marking(objspace)) {
6678 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
6679 }
6680#endif
6681
6682 push_mark_stack(&objspace->mark_stack, obj);
6683}
6684
6685static void
6686gc_aging(rb_objspace_t *objspace, VALUE obj)
6687{
6688 struct heap_page *page = GET_HEAP_PAGE(obj);
6689
6690 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
6691 check_rvalue_consistency(obj);
6692
6693 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
6694 if (!RVALUE_OLD_P(obj)) {
6695 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
6696 RVALUE_AGE_INC(objspace, obj);
6697 }
6698 else if (is_full_marking(objspace)) {
6699 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
6700 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
6701 }
6702 }
6703 check_rvalue_consistency(obj);
6704
6705 objspace->marked_slots++;
6706}
6707
6708NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
6709static void reachable_objects_from_callback(VALUE obj);
6710
6711static void
6712gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
6713{
6714 if (LIKELY(during_gc)) {
6715 rgengc_check_relation(objspace, obj);
6716 if (!gc_mark_set(objspace, obj)) return; /* already marked */
6717
6718 if (0) { // for debug GC marking miss
6719 if (objspace->rgengc.parent_object) {
6720 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
6721 (void *)obj, obj_type_name(obj),
6722 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
6723 }
6724 else {
6725 RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
6726 }
6727 }
6728
6729 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
6730 rp(obj);
6731 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
6732 }
6733 gc_aging(objspace, obj);
6734 gc_grey(objspace, obj);
6735 }
6736 else {
6737 reachable_objects_from_callback(obj);
6738 }
6739}
6740
6741static inline void
6742gc_pin(rb_objspace_t *objspace, VALUE obj)
6743{
6744 GC_ASSERT(is_markable_object(objspace, obj));
6745 if (UNLIKELY(objspace->flags.during_compacting)) {
6746 if (LIKELY(during_gc)) {
6747 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
6748 }
6749 }
6750}
6751
6752static inline void
6753gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
6754{
6755 if (!is_markable_object(objspace, obj)) return;
6756 gc_pin(objspace, obj);
6757 gc_mark_ptr(objspace, obj);
6758}
6759
6760static inline void
6761gc_mark(rb_objspace_t *objspace, VALUE obj)
6762{
6763 if (!is_markable_object(objspace, obj)) return;
6764 gc_mark_ptr(objspace, obj);
6765}
6766
6767void
6769{
6770 gc_mark(&rb_objspace, ptr);
6771}
6772
6773void
6774rb_gc_mark(VALUE ptr)
6775{
6776 gc_mark_and_pin(&rb_objspace, ptr);
6777}
6778
6779/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
6780 * This function is only for GC_END_MARK timing.
6781 */
6782
6783int
6784rb_objspace_marked_object_p(VALUE obj)
6785{
6786 return RVALUE_MARKED(obj) ? TRUE : FALSE;
6787}
6788
6789static inline void
6790gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
6791{
6792 if (RVALUE_OLD_P(obj)) {
6793 objspace->rgengc.parent_object = obj;
6794 }
6795 else {
6796 objspace->rgengc.parent_object = Qfalse;
6797 }
6798}
6799
6800static void
6801gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
6802{
6803 switch (imemo_type(obj)) {
6804 case imemo_env:
6805 {
6806 const rb_env_t *env = (const rb_env_t *)obj;
6807
6808 if (LIKELY(env->ep)) {
6809 // just after newobj() can be NULL here.
6810 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
6811 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
6812 gc_mark_values(objspace, (long)env->env_size, env->env);
6813 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
6814 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
6815 gc_mark(objspace, (VALUE)env->iseq);
6816 }
6817 }
6818 return;
6819 case imemo_cref:
6820 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
6821 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
6822 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
6823 return;
6824 case imemo_svar:
6825 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
6826 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
6827 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
6828 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
6829 return;
6830 case imemo_throw_data:
6831 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
6832 return;
6833 case imemo_ifunc:
6834 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
6835 return;
6836 case imemo_memo:
6837 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
6838 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
6839 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
6840 return;
6841 case imemo_ment:
6842 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
6843 return;
6844 case imemo_iseq:
6845 rb_iseq_mark((rb_iseq_t *)obj);
6846 return;
6847 case imemo_tmpbuf:
6848 {
6849 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
6850 do {
6851 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
6852 } while ((m = m->next) != NULL);
6853 }
6854 return;
6855 case imemo_ast:
6856 rb_ast_mark(&RANY(obj)->as.imemo.ast);
6857 return;
6858 case imemo_parser_strterm:
6859 rb_strterm_mark(obj);
6860 return;
6861 case imemo_callinfo:
6862 return;
6863 case imemo_callcache:
6864 {
6865 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
6866 // should not mark klass here
6867 gc_mark(objspace, (VALUE)vm_cc_cme(cc));
6868 }
6869 return;
6870 case imemo_constcache:
6871 {
6873 gc_mark(objspace, ice->value);
6874 }
6875 return;
6876#if VM_CHECK_MODE > 0
6877 default:
6878 VM_UNREACHABLE(gc_mark_imemo);
6879#endif
6880 }
6881}
6882
6883static void
6884gc_mark_children(rb_objspace_t *objspace, VALUE obj)
6885{
6886 register RVALUE *any = RANY(obj);
6887 gc_mark_set_parent(objspace, obj);
6888
6889 if (FL_TEST(obj, FL_EXIVAR)) {
6890 rb_mark_generic_ivar(obj);
6891 }
6892
6893 switch (BUILTIN_TYPE(obj)) {
6894 case T_FLOAT:
6895 case T_BIGNUM:
6896 case T_SYMBOL:
6897 /* Not immediates, but does not have references and singleton
6898 * class */
6899 return;
6900
6901 case T_NIL:
6902 case T_FIXNUM:
6903 rb_bug("rb_gc_mark() called for broken object");
6904 break;
6905
6906 case T_NODE:
6907 UNEXPECTED_NODE(rb_gc_mark);
6908 break;
6909
6910 case T_IMEMO:
6911 gc_mark_imemo(objspace, obj);
6912 return;
6913
6914 default:
6915 break;
6916 }
6917
6918 gc_mark(objspace, any->as.basic.klass);
6919
6920 switch (BUILTIN_TYPE(obj)) {
6921 case T_CLASS:
6922 case T_MODULE:
6923 if (RCLASS_SUPER(obj)) {
6924 gc_mark(objspace, RCLASS_SUPER(obj));
6925 }
6926 if (!RCLASS_EXT(obj)) break;
6927
6928 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6929 cc_table_mark(objspace, obj);
6930 mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
6931 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
6932 break;
6933
6934 case T_ICLASS:
6935 if (RICLASS_OWNS_M_TBL_P(obj)) {
6936 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6937 }
6938 if (RCLASS_SUPER(obj)) {
6939 gc_mark(objspace, RCLASS_SUPER(obj));
6940 }
6941 if (!RCLASS_EXT(obj)) break;
6942 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
6943 cc_table_mark(objspace, obj);
6944 break;
6945
6946 case T_ARRAY:
6947 if (FL_TEST(obj, ELTS_SHARED)) {
6948 VALUE root = any->as.array.as.heap.aux.shared_root;
6949 gc_mark(objspace, root);
6950 }
6951 else {
6952 long i, len = RARRAY_LEN(obj);
6953 const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
6954 for (i=0; i < len; i++) {
6955 gc_mark(objspace, ptr[i]);
6956 }
6957
6958 if (LIKELY(during_gc)) {
6959 if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
6960 RARRAY_TRANSIENT_P(obj)) {
6961 rb_transient_heap_mark(obj, ptr);
6962 }
6963 }
6964 }
6965 break;
6966
6967 case T_HASH:
6968 mark_hash(objspace, obj);
6969 break;
6970
6971 case T_STRING:
6972 if (STR_SHARED_P(obj)) {
6973 gc_mark(objspace, any->as.string.as.heap.aux.shared);
6974 }
6975 break;
6976
6977 case T_DATA:
6978 {
6979 void *const ptr = DATA_PTR(obj);
6980 if (ptr) {
6981 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
6982 any->as.typeddata.type->function.dmark :
6983 any->as.data.dmark;
6984 if (mark_func) (*mark_func)(ptr);
6985 }
6986 }
6987 break;
6988
6989 case T_OBJECT:
6990 {
6991 const VALUE * const ptr = ROBJECT_IVPTR(obj);
6992
6993 uint32_t i, len = ROBJECT_NUMIV(obj);
6994 for (i = 0; i < len; i++) {
6995 gc_mark(objspace, ptr[i]);
6996 }
6997
6998 if (LIKELY(during_gc) &&
6999 ROBJ_TRANSIENT_P(obj)) {
7000 rb_transient_heap_mark(obj, ptr);
7001 }
7002 }
7003 break;
7004
7005 case T_FILE:
7006 if (any->as.file.fptr) {
7007 gc_mark(objspace, any->as.file.fptr->self);
7008 gc_mark(objspace, any->as.file.fptr->pathv);
7009 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
7010 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
7011 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
7012 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
7013 gc_mark(objspace, any->as.file.fptr->write_lock);
7014 }
7015 break;
7016
7017 case T_REGEXP:
7018 gc_mark(objspace, any->as.regexp.src);
7019 break;
7020
7021 case T_MATCH:
7022 gc_mark(objspace, any->as.match.regexp);
7023 if (any->as.match.str) {
7024 gc_mark(objspace, any->as.match.str);
7025 }
7026 break;
7027
7028 case T_RATIONAL:
7029 gc_mark(objspace, any->as.rational.num);
7030 gc_mark(objspace, any->as.rational.den);
7031 break;
7032
7033 case T_COMPLEX:
7034 gc_mark(objspace, any->as.complex.real);
7035 gc_mark(objspace, any->as.complex.imag);
7036 break;
7037
7038 case T_STRUCT:
7039 {
7040 long i;
7041 const long len = RSTRUCT_LEN(obj);
7042 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
7043
7044 for (i=0; i<len; i++) {
7045 gc_mark(objspace, ptr[i]);
7046 }
7047
7048 if (LIKELY(during_gc) &&
7049 RSTRUCT_TRANSIENT_P(obj)) {
7050 rb_transient_heap_mark(obj, ptr);
7051 }
7052 }
7053 break;
7054
7055 default:
7056#if GC_DEBUG
7057 rb_gcdebug_print_obj_condition((VALUE)obj);
7058#endif
7059 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
7060 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
7061 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
7062 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7063 BUILTIN_TYPE(obj), (void *)any,
7064 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
7065 }
7066}
7067
7072static inline int
7073gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
7074{
7075 mark_stack_t *mstack = &objspace->mark_stack;
7076 VALUE obj;
7077#if GC_ENABLE_INCREMENTAL_MARK
7078 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7079 size_t popped_count = 0;
7080#endif
7081
7082 while (pop_mark_stack(mstack, &obj)) {
7083 if (obj == Qundef) continue; /* skip */
7084
7085 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7086 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7087 }
7088 gc_mark_children(objspace, obj);
7089
7090#if GC_ENABLE_INCREMENTAL_MARK
7091 if (incremental) {
7092 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7093 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7094 }
7095 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7096 popped_count++;
7097
7098 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7099 break;
7100 }
7101 }
7102 else {
7103 /* just ignore marking bits */
7104 }
7105#endif
7106 }
7107
7108 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7109
7110 if (is_mark_stack_empty(mstack)) {
7111 shrink_stack_chunk_cache(mstack);
7112 return TRUE;
7113 }
7114 else {
7115 return FALSE;
7116 }
7117}
7118
7119static int
7120gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
7121{
7122 return gc_mark_stacked_objects(objspace, TRUE, count);
7123}
7124
7125static int
7126gc_mark_stacked_objects_all(rb_objspace_t *objspace)
7127{
7128 return gc_mark_stacked_objects(objspace, FALSE, 0);
7129}
7130
7131#if PRINT_ROOT_TICKS
7132#define MAX_TICKS 0x100
7133static tick_t mark_ticks[MAX_TICKS];
7134static const char *mark_ticks_categories[MAX_TICKS];
7135
7136static void
7137show_mark_ticks(void)
7138{
7139 int i;
7140 fprintf(stderr, "mark ticks result:\n");
7141 for (i=0; i<MAX_TICKS; i++) {
7142 const char *category = mark_ticks_categories[i];
7143 if (category) {
7144 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
7145 }
7146 else {
7147 break;
7148 }
7149 }
7150}
7151
7152#endif /* PRINT_ROOT_TICKS */
7153
7154static void
7155gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
7156{
7157 struct gc_list *list;
7158 rb_execution_context_t *ec = GET_EC();
7159 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7160
7161#if PRINT_ROOT_TICKS
7162 tick_t start_tick = tick();
7163 int tick_count = 0;
7164 const char *prev_category = 0;
7165
7166 if (mark_ticks_categories[0] == 0) {
7167 atexit(show_mark_ticks);
7168 }
7169#endif
7170
7171 if (categoryp) *categoryp = "xxx";
7172
7173 objspace->rgengc.parent_object = Qfalse;
7174
7175#if PRINT_ROOT_TICKS
7176#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7177 if (prev_category) { \
7178 tick_t t = tick(); \
7179 mark_ticks[tick_count] = t - start_tick; \
7180 mark_ticks_categories[tick_count] = prev_category; \
7181 tick_count++; \
7182 } \
7183 prev_category = category; \
7184 start_tick = tick(); \
7185} while (0)
7186#else /* PRINT_ROOT_TICKS */
7187#define MARK_CHECKPOINT_PRINT_TICK(category)
7188#endif
7189
7190#define MARK_CHECKPOINT(category) do { \
7191 if (categoryp) *categoryp = category; \
7192 MARK_CHECKPOINT_PRINT_TICK(category); \
7193} while (0)
7194
7195 MARK_CHECKPOINT("vm");
7196 SET_STACK_END;
7197 rb_vm_mark(vm);
7198 if (vm->self) gc_mark(objspace, vm->self);
7199
7200 MARK_CHECKPOINT("finalizers");
7201 mark_finalizer_tbl(objspace, finalizer_table);
7202
7203 MARK_CHECKPOINT("machine_context");
7204 mark_current_machine_context(objspace, ec);
7205
7206 /* mark protected global variables */
7207 MARK_CHECKPOINT("global_list");
7208 for (list = global_list; list; list = list->next) {
7209 gc_mark_maybe(objspace, *list->varptr);
7210 }
7211
7212 MARK_CHECKPOINT("end_proc");
7213 rb_mark_end_proc();
7214
7215 MARK_CHECKPOINT("global_tbl");
7216 rb_gc_mark_global_tbl();
7217
7218 MARK_CHECKPOINT("object_id");
7219 rb_gc_mark(objspace->next_object_id);
7220 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
7221
7222 if (stress_to_class) rb_gc_mark(stress_to_class);
7223
7224 MARK_CHECKPOINT("finish");
7225#undef MARK_CHECKPOINT
7226}
7227
7228#if RGENGC_CHECK_MODE >= 4
7229
7230#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7231#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7232#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7233
7234struct reflist {
7235 VALUE *list;
7236 int pos;
7237 int size;
7238};
7239
7240static struct reflist *
7241reflist_create(VALUE obj)
7242{
7243 struct reflist *refs = xmalloc(sizeof(struct reflist));
7244 refs->size = 1;
7245 refs->list = ALLOC_N(VALUE, refs->size);
7246 refs->list[0] = obj;
7247 refs->pos = 1;
7248 return refs;
7249}
7250
7251static void
7252reflist_destruct(struct reflist *refs)
7253{
7254 xfree(refs->list);
7255 xfree(refs);
7256}
7257
7258static void
7259reflist_add(struct reflist *refs, VALUE obj)
7260{
7261 if (refs->pos == refs->size) {
7262 refs->size *= 2;
7263 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7264 }
7265
7266 refs->list[refs->pos++] = obj;
7267}
7268
7269static void
7270reflist_dump(struct reflist *refs)
7271{
7272 int i;
7273 for (i=0; i<refs->pos; i++) {
7274 VALUE obj = refs->list[i];
7275 if (IS_ROOTSIG(obj)) { /* root */
7276 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
7277 }
7278 else {
7279 fprintf(stderr, "<%s>", obj_info(obj));
7280 }
7281 if (i+1 < refs->pos) fprintf(stderr, ", ");
7282 }
7283}
7284
7285static int
7286reflist_referred_from_machine_context(struct reflist *refs)
7287{
7288 int i;
7289 for (i=0; i<refs->pos; i++) {
7290 VALUE obj = refs->list[i];
7291 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
7292 }
7293 return 0;
7294}
7295
7296struct allrefs {
7297 rb_objspace_t *objspace;
7298 /* a -> obj1
7299 * b -> obj1
7300 * c -> obj1
7301 * c -> obj2
7302 * d -> obj3
7303 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7304 */
7305 struct st_table *references;
7306 const char *category;
7307 VALUE root_obj;
7309};
7310
7311static int
7312allrefs_add(struct allrefs *data, VALUE obj)
7313{
7314 struct reflist *refs;
7315 st_data_t r;
7316
7317 if (st_lookup(data->references, obj, &r)) {
7318 refs = (struct reflist *)r;
7319 reflist_add(refs, data->root_obj);
7320 return 0;
7321 }
7322 else {
7323 refs = reflist_create(data->root_obj);
7324 st_insert(data->references, obj, (st_data_t)refs);
7325 return 1;
7326 }
7327}
7328
7329static void
7330allrefs_i(VALUE obj, void *ptr)
7331{
7332 struct allrefs *data = (struct allrefs *)ptr;
7333
7334 if (allrefs_add(data, obj)) {
7335 push_mark_stack(&data->mark_stack, obj);
7336 }
7337}
7338
7339static void
7340allrefs_roots_i(VALUE obj, void *ptr)
7341{
7342 struct allrefs *data = (struct allrefs *)ptr;
7343 if (strlen(data->category) == 0) rb_bug("!!!");
7344 data->root_obj = MAKE_ROOTSIG(data->category);
7345
7346 if (allrefs_add(data, obj)) {
7347 push_mark_stack(&data->mark_stack, obj);
7348 }
7349}
7350#define PUSH_MARK_FUNC_DATA(v) do { \
7351 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7352 GET_RACTOR()->mfd = (v);
7353
7354#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7355
7356static st_table *
7357objspace_allrefs(rb_objspace_t *objspace)
7358{
7359 struct allrefs data;
7360 struct gc_mark_func_data_struct mfd;
7361 VALUE obj;
7362 int prev_dont_gc = dont_gc_val();
7363 dont_gc_on();
7364
7365 data.objspace = objspace;
7366 data.references = st_init_numtable();
7367 init_mark_stack(&data.mark_stack);
7368
7369 mfd.mark_func = allrefs_roots_i;
7370 mfd.data = &data;
7371
7372 /* traverse root objects */
7373 PUSH_MARK_FUNC_DATA(&mfd);
7374 GET_RACTOR()->mfd = &mfd;
7375 gc_mark_roots(objspace, &data.category);
7376 POP_MARK_FUNC_DATA();
7377
7378 /* traverse rest objects reachable from root objects */
7379 while (pop_mark_stack(&data.mark_stack, &obj)) {
7380 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7381 }
7382 free_stack_chunks(&data.mark_stack);
7383
7384 dont_gc_set(prev_dont_gc);
7385 return data.references;
7386}
7387
7388static int
7389objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7390{
7391 struct reflist *refs = (struct reflist *)value;
7392 reflist_destruct(refs);
7393 return ST_CONTINUE;
7394}
7395
7396static void
7397objspace_allrefs_destruct(struct st_table *refs)
7398{
7399 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7400 st_free_table(refs);
7401}
7402
7403#if RGENGC_CHECK_MODE >= 5
7404static int
7405allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7406{
7407 VALUE obj = (VALUE)k;
7408 struct reflist *refs = (struct reflist *)v;
7409 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
7410 reflist_dump(refs);
7411 fprintf(stderr, "\n");
7412 return ST_CONTINUE;
7413}
7414
7415static void
7416allrefs_dump(rb_objspace_t *objspace)
7417{
7418 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7419 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
7420 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7421}
7422#endif
7423
7424static int
7425gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7426{
7427 VALUE obj = k;
7428 struct reflist *refs = (struct reflist *)v;
7429 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
7430
7431 /* object should be marked or oldgen */
7432 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7433 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7434 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
7435 reflist_dump(refs);
7436
7437 if (reflist_referred_from_machine_context(refs)) {
7438 fprintf(stderr, " (marked from machine stack).\n");
7439 /* marked from machine context can be false positive */
7440 }
7441 else {
7442 objspace->rgengc.error_count++;
7443 fprintf(stderr, "\n");
7444 }
7445 }
7446 return ST_CONTINUE;
7447}
7448
7449static void
7450gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
7451{
7452 size_t saved_malloc_increase = objspace->malloc_params.increase;
7453#if RGENGC_ESTIMATE_OLDMALLOC
7454 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7455#endif
7456 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7457
7458 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7459
7460 if (checker_func) {
7461 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7462 }
7463
7464 if (objspace->rgengc.error_count > 0) {
7465#if RGENGC_CHECK_MODE >= 5
7466 allrefs_dump(objspace);
7467#endif
7468 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
7469 }
7470
7471 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7472 objspace->rgengc.allrefs_table = 0;
7473
7474 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
7475 objspace->malloc_params.increase = saved_malloc_increase;
7476#if RGENGC_ESTIMATE_OLDMALLOC
7477 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7478#endif
7479}
7480#endif /* RGENGC_CHECK_MODE >= 4 */
7481
7483 rb_objspace_t *objspace;
7484 int err_count;
7485 size_t live_object_count;
7486 size_t zombie_object_count;
7487
7488 VALUE parent;
7489 size_t old_object_count;
7490 size_t remembered_shady_count;
7491};
7492
7493static void
7494check_generation_i(const VALUE child, void *ptr)
7495{
7497 const VALUE parent = data->parent;
7498
7499 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7500
7501 if (!RVALUE_OLD_P(child)) {
7502 if (!RVALUE_REMEMBERED(parent) &&
7503 !RVALUE_REMEMBERED(child) &&
7504 !RVALUE_UNCOLLECTIBLE(child)) {
7505 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7506 data->err_count++;
7507 }
7508 }
7509}
7510
7511static void
7512check_color_i(const VALUE child, void *ptr)
7513{
7515 const VALUE parent = data->parent;
7516
7517 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7518 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7519 obj_info(parent), obj_info(child));
7520 data->err_count++;
7521 }
7522}
7523
7524static void
7525check_children_i(const VALUE child, void *ptr)
7526{
7528 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7529 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
7530 obj_info(child), obj_info(data->parent));
7531 rb_print_backtrace(); /* C backtrace will help to debug */
7532
7533 data->err_count++;
7534 }
7535}
7536
7537static int
7538verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
7540{
7541 VALUE obj;
7542 rb_objspace_t *objspace = data->objspace;
7543
7544 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
7545 void *poisoned = asan_poisoned_object_p(obj);
7546 asan_unpoison_object(obj, false);
7547
7548 if (is_live_object(objspace, obj)) {
7549 /* count objects */
7550 data->live_object_count++;
7551 data->parent = obj;
7552
7553 /* Normally, we don't expect T_MOVED objects to be in the heap.
7554 * But they can stay alive on the stack, */
7555 if (!gc_object_moved_p(objspace, obj)) {
7556 /* moved slots don't have children */
7557 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
7558 }
7559
7560 /* check health of children */
7561 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7562 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7563
7564 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7565 /* reachable objects from an oldgen object should be old or (young with remember) */
7566 data->parent = obj;
7567 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
7568 }
7569
7570 if (is_incremental_marking(objspace)) {
7571 if (RVALUE_BLACK_P(obj)) {
7572 /* reachable objects from black objects should be black or grey objects */
7573 data->parent = obj;
7574 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
7575 }
7576 }
7577 }
7578 else {
7579 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
7580 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
7581 data->zombie_object_count++;
7582 }
7583 }
7584 if (poisoned) {
7585 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
7586 asan_poison_object(obj);
7587 }
7588 }
7589
7590 return 0;
7591}
7592
7593static int
7594gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
7595{
7596 int i;
7597 unsigned int has_remembered_shady = FALSE;
7598 unsigned int has_remembered_old = FALSE;
7599 int remembered_old_objects = 0;
7600 int free_objects = 0;
7601 int zombie_objects = 0;
7602 int stride = page->slot_size / sizeof(RVALUE);
7603
7604 for (i=0; i<page->total_slots; i+=stride) {
7605 VALUE val = (VALUE)&page->start[i];
7606 void *poisoned = asan_poisoned_object_p(val);
7607 asan_unpoison_object(val, false);
7608
7609 if (RBASIC(val) == 0) free_objects++;
7610 if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
7611 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7612 has_remembered_shady = TRUE;
7613 }
7614 if (RVALUE_PAGE_MARKING(page, val)) {
7615 has_remembered_old = TRUE;
7616 remembered_old_objects++;
7617 }
7618
7619 if (poisoned) {
7620 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
7621 asan_poison_object(val);
7622 }
7623 }
7624
7625 if (!is_incremental_marking(objspace) &&
7626 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7627
7628 for (i=0; i<page->total_slots; i++) {
7629 VALUE val = (VALUE)&page->start[i];
7630 if (RVALUE_PAGE_MARKING(page, val)) {
7631 fprintf(stderr, "marking -> %s\n", obj_info(val));
7632 }
7633 }
7634 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7635 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
7636 }
7637
7638 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
7639 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7640 (void *)page, obj ? obj_info(obj) : "");
7641 }
7642
7643 if (0) {
7644 /* free_slots may not equal to free_objects */
7645 if (page->free_slots != free_objects) {
7646 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, page->free_slots, free_objects);
7647 }
7648 }
7649 if (page->final_slots != zombie_objects) {
7650 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, page->final_slots, zombie_objects);
7651 }
7652
7653 return remembered_old_objects;
7654}
7655
7656static int
7657gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
7658{
7659 int remembered_old_objects = 0;
7660 struct heap_page *page = 0;
7661
7662 list_for_each(head, page, page_node) {
7663 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
7664 RVALUE *p = page->freelist;
7665 while (p) {
7666 VALUE vp = (VALUE)p;
7667 VALUE prev = vp;
7668 asan_unpoison_object(vp, false);
7669 if (BUILTIN_TYPE(vp) != T_NONE) {
7670 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
7671 }
7672 p = p->as.free.next;
7673 asan_poison_object(prev);
7674 }
7675 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
7676
7677 if (page->flags.has_remembered_objects == FALSE) {
7678 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
7679 }
7680 }
7681
7682 return remembered_old_objects;
7683}
7684
7685static int
7686gc_verify_heap_pages(rb_objspace_t *objspace)
7687{
7688 int remembered_old_objects = 0;
7689 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7690 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
7691 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
7692 }
7693 return remembered_old_objects;
7694}
7695
7696/*
7697 * call-seq:
7698 * GC.verify_internal_consistency -> nil
7699 *
7700 * Verify internal consistency.
7701 *
7702 * This method is implementation specific.
7703 * Now this method checks generational consistency
7704 * if RGenGC is supported.
7705 */
7706static VALUE
7707gc_verify_internal_consistency_m(VALUE dummy)
7708{
7709 gc_verify_internal_consistency(&rb_objspace);
7710 return Qnil;
7711}
7712
7713static void
7714gc_verify_internal_consistency_(rb_objspace_t *objspace)
7715{
7716 struct verify_internal_consistency_struct data = {0};
7717
7718 data.objspace = objspace;
7719 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
7720
7721 /* check relations */
7722 for (size_t i = 0; i < heap_allocated_pages; i++) {
7723 struct heap_page *page = heap_pages_sorted[i];
7724 short slot_size = page->slot_size;
7725
7726 uintptr_t start = (uintptr_t)page->start;
7727 uintptr_t end = start + page->total_slots * slot_size;
7728
7729 verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
7730 }
7731
7732 if (data.err_count != 0) {
7733#if RGENGC_CHECK_MODE >= 5
7734 objspace->rgengc.error_count = data.err_count;
7735 gc_marks_check(objspace, NULL, NULL);
7736 allrefs_dump(objspace);
7737#endif
7738 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
7739 }
7740
7741 /* check heap_page status */
7742 gc_verify_heap_pages(objspace);
7743
7744 /* check counters */
7745
7746 if (!is_lazy_sweeping(objspace) &&
7747 !finalizing &&
7748 ruby_single_main_ractor != NULL) {
7749 if (objspace_live_slots(objspace) != data.live_object_count) {
7750 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", "
7751 "objspace->profile.total_freed_objects: %"PRIdSIZE"\n",
7752 heap_pages_final_slots, objspace->profile.total_freed_objects);
7753 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
7754 objspace_live_slots(objspace), data.live_object_count);
7755 }
7756 }
7757
7758 if (!is_marking(objspace)) {
7759 if (objspace->rgengc.old_objects != data.old_object_count) {
7760 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
7761 objspace->rgengc.old_objects, data.old_object_count);
7762 }
7763 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
7764 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
7765 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
7766 }
7767 }
7768
7769 if (!finalizing) {
7770 size_t list_count = 0;
7771
7772 {
7773 VALUE z = heap_pages_deferred_final;
7774 while (z) {
7775 list_count++;
7776 z = RZOMBIE(z)->next;
7777 }
7778 }
7779
7780 if (heap_pages_final_slots != data.zombie_object_count ||
7781 heap_pages_final_slots != list_count) {
7782
7783 rb_bug("inconsistent finalizing object count:\n"
7784 " expect %"PRIuSIZE"\n"
7785 " but %"PRIuSIZE" zombies\n"
7786 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
7787 heap_pages_final_slots,
7788 data.zombie_object_count,
7789 list_count);
7790 }
7791 }
7792
7793 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
7794}
7795
7796static void
7797gc_verify_internal_consistency(rb_objspace_t *objspace)
7798{
7799 RB_VM_LOCK_ENTER();
7800 {
7801 rb_vm_barrier(); // stop other ractors
7802
7803 unsigned int prev_during_gc = during_gc;
7804 during_gc = FALSE; // stop gc here
7805 {
7806 gc_verify_internal_consistency_(objspace);
7807 }
7808 during_gc = prev_during_gc;
7809 }
7810 RB_VM_LOCK_LEAVE();
7811}
7812
7813void
7814rb_gc_verify_internal_consistency(void)
7815{
7816 gc_verify_internal_consistency(&rb_objspace);
7817}
7818
7819static VALUE
7820gc_verify_transient_heap_internal_consistency(VALUE dmy)
7821{
7822 rb_transient_heap_verify();
7823 return Qnil;
7824}
7825
7826/* marks */
7827
7828static void
7829gc_marks_start(rb_objspace_t *objspace, int full_mark)
7830{
7831 /* start marking */
7832 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
7833 gc_mode_transition(objspace, gc_mode_marking);
7834
7835 if (full_mark) {
7836#if GC_ENABLE_INCREMENTAL_MARK
7837 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
7838
7839 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
7840 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
7841 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
7842 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
7843#endif
7844 objspace->flags.during_minor_gc = FALSE;
7845 if (ruby_enable_autocompact) {
7846 objspace->flags.during_compacting |= TRUE;
7847 }
7848 objspace->profile.major_gc_count++;
7849 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
7850 objspace->rgengc.old_objects = 0;
7851 objspace->rgengc.last_major_gc = objspace->profile.count;
7852 objspace->marked_slots = 0;
7853
7854 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7855 rgengc_mark_and_rememberset_clear(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7856 }
7857 }
7858 else {
7859 objspace->flags.during_minor_gc = TRUE;
7860 objspace->marked_slots =
7861 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
7862 objspace->profile.minor_gc_count++;
7863
7864 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7865 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7866 }
7867 }
7868
7869 gc_mark_roots(objspace, NULL);
7870
7871 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
7872 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
7873}
7874
7875#if GC_ENABLE_INCREMENTAL_MARK
7876static inline void
7877gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
7878{
7879 if (bits) {
7880 do {
7881 if (bits & 1) {
7882 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
7883 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
7884 GC_ASSERT(RVALUE_MARKED((VALUE)p));
7885 gc_mark_children(objspace, (VALUE)p);
7886 }
7887 p += sizeof(RVALUE);
7888 bits >>= 1;
7889 } while (bits);
7890 }
7891}
7892
7893static void
7894gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
7895{
7896 struct heap_page *page = 0;
7897
7898 list_for_each(&heap->pages, page, page_node) {
7899 bits_t *mark_bits = page->mark_bits;
7900 bits_t *wbun_bits = page->wb_unprotected_bits;
7901 RVALUE *p = page->start;
7902 size_t j;
7903
7904 bits_t bits = mark_bits[0] & wbun_bits[0];
7905 bits >>= NUM_IN_PAGE(p);
7906 gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7907 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
7908
7909 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
7910 bits_t bits = mark_bits[j] & wbun_bits[j];
7911
7912 gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7913 p += BITS_BITLENGTH;
7914 }
7915 }
7916
7917 gc_mark_stacked_objects_all(objspace);
7918}
7919
7920static struct heap_page *
7921heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
7922{
7923 struct heap_page *page = heap->pooled_pages;
7924
7925 if (page) {
7926 heap->pooled_pages = page->free_next;
7927 heap_add_freepage(heap, page);
7928 }
7929
7930 return page;
7931}
7932#endif
7933
7934static int
7935gc_marks_finish(rb_objspace_t *objspace)
7936{
7937#if GC_ENABLE_INCREMENTAL_MARK
7938 /* finish incremental GC */
7939 if (is_incremental_marking(objspace)) {
7940 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7941 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
7942 if (heap->pooled_pages) {
7943 heap_move_pooled_pages_to_free_pages(heap);
7944 gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
7945 return FALSE; /* continue marking phase */
7946 }
7947 }
7948
7949 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
7950 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
7951 mark_stack_size(&objspace->mark_stack));
7952 }
7953
7954 gc_mark_roots(objspace, 0);
7955
7956 if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
7957 gc_report(1, objspace, "gc_marks_finish: not empty (%"PRIdSIZE"). retry.\n",
7958 mark_stack_size(&objspace->mark_stack));
7959 return FALSE;
7960 }
7961
7962#if RGENGC_CHECK_MODE >= 2
7963 if (gc_verify_heap_pages(objspace) != 0) {
7964 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
7965 }
7966#endif
7967
7968 objspace->flags.during_incremental_marking = FALSE;
7969 /* check children of all marked wb-unprotected objects */
7970 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7971 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7972 }
7973 }
7974#endif /* GC_ENABLE_INCREMENTAL_MARK */
7975
7976#if RGENGC_CHECK_MODE >= 2
7977 gc_verify_internal_consistency(objspace);
7978#endif
7979
7980 if (is_full_marking(objspace)) {
7981 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
7982 const double r = gc_params.oldobject_limit_factor;
7983 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
7984 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
7985 }
7986
7987#if RGENGC_CHECK_MODE >= 4
7988 during_gc = FALSE;
7989 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
7990 during_gc = TRUE;
7991#endif
7992
7993 {
7994 /* decide full GC is needed or not */
7995 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
7996 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
7997 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
7998 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
7999 int full_marking = is_full_marking(objspace);
8000 const int r_cnt = GET_VM()->ractor.cnt;
8001 const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
8002
8003 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8004
8005 /* setup free-able page counts */
8006 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8007 max_free_slots = gc_params.heap_init_slots * r_mul;
8008 }
8009
8010 if (sweep_slots > max_free_slots) {
8011 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8012 }
8013 else {
8014 heap_pages_freeable_pages = 0;
8015 }
8016
8017 /* check free_min */
8018 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8019 min_free_slots = gc_params.heap_free_slots * r_mul;
8020 }
8021
8022 if (sweep_slots < min_free_slots) {
8023 if (!full_marking) {
8024 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8025 full_marking = TRUE;
8026 /* do not update last_major_gc, because full marking is not done. */
8027 /* goto increment; */
8028 }
8029 else {
8030 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
8031 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8032 }
8033 }
8034
8035#if !USE_RVARGC
8036 if (full_marking) {
8037 /* increment: */
8038 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
8039 rb_size_pool_t *size_pool = &size_pools[0];
8040 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8041
8042 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8043 }
8044#endif
8045 }
8046
8047 if (full_marking) {
8048 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8049 const double r = gc_params.oldobject_limit_factor;
8050 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8051 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8052 }
8053
8054 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8055 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8056 }
8057 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8058 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8059 }
8060 if (RGENGC_FORCE_MAJOR_GC) {
8061 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8062 }
8063
8064 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
8065 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
8066 "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
8067 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8068 objspace->rgengc.need_major_gc ? "major" : "minor");
8069 }
8070
8071 rb_transient_heap_finish_marking();
8072 rb_ractor_finish_marking();
8073
8074 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
8075
8076 return TRUE;
8077}
8078
8079#if GC_ENABLE_INCREMENTAL_MARK
8080static void
8081gc_marks_step(rb_objspace_t *objspace, size_t slots)
8082{
8083 GC_ASSERT(is_marking(objspace));
8084
8085 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8086 if (gc_marks_finish(objspace)) {
8087 /* finish */
8088 gc_sweep(objspace);
8089 }
8090 }
8091 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE"\n", objspace->marked_slots);
8092}
8093#endif
8094
8095static void
8096gc_marks_rest(rb_objspace_t *objspace)
8097{
8098 gc_report(1, objspace, "gc_marks_rest\n");
8099
8100#if GC_ENABLE_INCREMENTAL_MARK
8101 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8102 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8103 }
8104#endif
8105
8106 if (is_incremental_marking(objspace)) {
8107 do {
8108 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8109 } while (gc_marks_finish(objspace) == FALSE);
8110 }
8111 else {
8112 gc_mark_stacked_objects_all(objspace);
8113 gc_marks_finish(objspace);
8114 }
8115
8116 /* move to sweep */
8117 gc_sweep(objspace);
8118}
8119
8120static void
8121gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8122{
8123 GC_ASSERT(dont_gc_val() == FALSE);
8124#if GC_ENABLE_INCREMENTAL_MARK
8125
8126 unsigned int lock_lev;
8127 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8128
8129 int slots = 0;
8130 const char *from;
8131
8132 if (heap->pooled_pages) {
8133 while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
8134 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
8135 slots += page->free_slots;
8136 }
8137 from = "pooled-pages";
8138 }
8139 else if (heap_increment(objspace, size_pool, heap)) {
8140 slots = heap->free_pages->free_slots;
8141 from = "incremented-pages";
8142 }
8143
8144 if (slots > 0) {
8145 gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n",
8146 slots, from);
8147 gc_marks_step(objspace, objspace->rincgc.step_slots);
8148 }
8149 else {
8150 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
8151 mark_stack_size(&objspace->mark_stack));
8152 gc_marks_rest(objspace);
8153 }
8154
8155 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8156#endif
8157}
8158
8159static void
8160gc_marks(rb_objspace_t *objspace, int full_mark)
8161{
8162 gc_prof_mark_timer_start(objspace);
8163
8164 /* setup marking */
8165
8166 gc_marks_start(objspace, full_mark);
8167 if (!is_incremental_marking(objspace)) {
8168 gc_marks_rest(objspace);
8169 }
8170
8171#if RGENGC_PROFILE > 0
8172 if (gc_prof_record(objspace)) {
8173 gc_profile_record *record = gc_prof_record(objspace);
8174 record->old_objects = objspace->rgengc.old_objects;
8175 }
8176#endif
8177 gc_prof_mark_timer_stop(objspace);
8178}
8179
8180/* RGENGC */
8181
8182static void
8183gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
8184{
8185 if (level <= RGENGC_DEBUG) {
8186 char buf[1024];
8187 FILE *out = stderr;
8188 va_list args;
8189 const char *status = " ";
8190
8191 if (during_gc) {
8192 status = is_full_marking(objspace) ? "+" : "-";
8193 }
8194 else {
8195 if (is_lazy_sweeping(objspace)) {
8196 status = "S";
8197 }
8198 if (is_incremental_marking(objspace)) {
8199 status = "M";
8200 }
8201 }
8202
8203 va_start(args, fmt);
8204 vsnprintf(buf, 1024, fmt, args);
8205 va_end(args);
8206
8207 fprintf(out, "%s|", status);
8208 fputs(buf, out);
8209 }
8210}
8211
8212/* bit operations */
8213
8214static int
8215rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
8216{
8217 return RVALUE_REMEMBERED(obj);
8218}
8219
8220static int
8221rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
8222{
8223 struct heap_page *page = GET_HEAP_PAGE(obj);
8224 bits_t *bits = &page->marking_bits[0];
8225
8226 GC_ASSERT(!is_incremental_marking(objspace));
8227
8228 if (MARKED_IN_BITMAP(bits, obj)) {
8229 return FALSE;
8230 }
8231 else {
8232 page->flags.has_remembered_objects = TRUE;
8233 MARK_IN_BITMAP(bits, obj);
8234 return TRUE;
8235 }
8236}
8237
8238/* wb, etc */
8239
8240/* return FALSE if already remembered */
8241static int
8242rgengc_remember(rb_objspace_t *objspace, VALUE obj)
8243{
8244 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
8245 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
8246
8247 check_rvalue_consistency(obj);
8248
8249 if (RGENGC_CHECK_MODE) {
8250 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
8251 }
8252
8253#if RGENGC_PROFILE > 0
8254 if (!rgengc_remembered(objspace, obj)) {
8255 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8256 objspace->profile.total_remembered_normal_object_count++;
8257#if RGENGC_PROFILE >= 2
8258 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
8259#endif
8260 }
8261 }
8262#endif /* RGENGC_PROFILE > 0 */
8263
8264 return rgengc_remembersetbits_set(objspace, obj);
8265}
8266
8267static int
8268rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
8269{
8270 int result = rgengc_remembersetbits_get(objspace, obj);
8271 check_rvalue_consistency(obj);
8272 return result;
8273}
8274
8275static int
8276rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
8277{
8278 gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
8279 return rgengc_remembered_sweep(objspace, obj);
8280}
8281
8282#ifndef PROFILE_REMEMBERSET_MARK
8283#define PROFILE_REMEMBERSET_MARK 0
8284#endif
8285
8286static inline void
8287rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8288{
8289 if (bitset) {
8290 do {
8291 if (bitset & 1) {
8292 VALUE obj = (VALUE)p;
8293 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8294 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8295 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8296
8297 gc_mark_children(objspace, obj);
8298 }
8299 p += sizeof(RVALUE);
8300 bitset >>= 1;
8301 } while (bitset);
8302 }
8303}
8304
8305static void
8306rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
8307{
8308 size_t j;
8309 struct heap_page *page = 0;
8310#if PROFILE_REMEMBERSET_MARK
8311 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8312#endif
8313 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
8314
8315 list_for_each(&heap->pages, page, page_node) {
8316 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8317 RVALUE *p = page->start;
8318 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8319 bits_t *marking_bits = page->marking_bits;
8320 bits_t *uncollectible_bits = page->uncollectible_bits;
8321 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8322#if PROFILE_REMEMBERSET_MARK
8323 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8324 else if (page->flags.has_remembered_objects) has_old++;
8325 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8326#endif
8327 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8328 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8329 marking_bits[j] = 0;
8330 }
8331 page->flags.has_remembered_objects = FALSE;
8332
8333 bitset = bits[0];
8334 bitset >>= NUM_IN_PAGE(p);
8335 rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8336 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
8337
8338 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8339 bitset = bits[j];
8340 rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8341 p += BITS_BITLENGTH;
8342 }
8343 }
8344#if PROFILE_REMEMBERSET_MARK
8345 else {
8346 skip++;
8347 }
8348#endif
8349 }
8350
8351#if PROFILE_REMEMBERSET_MARK
8352 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8353#endif
8354 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
8355}
8356
8357static void
8358rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
8359{
8360 struct heap_page *page = 0;
8361
8362 list_for_each(&heap->pages, page, page_node) {
8363 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8364 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8365 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8366 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8367 page->flags.has_uncollectible_shady_objects = FALSE;
8368 page->flags.has_remembered_objects = FALSE;
8369 }
8370}
8371
8372/* RGENGC: APIs */
8373
8374NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
8375
8376static void
8377gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
8378{
8379 if (RGENGC_CHECK_MODE) {
8380 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8381 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
8382 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8383 }
8384
8385#if 1
8386 /* mark `a' and remember (default behavior) */
8387 if (!rgengc_remembered(objspace, a)) {
8388 RB_VM_LOCK_ENTER_NO_BARRIER();
8389 {
8390 rgengc_remember(objspace, a);
8391 }
8392 RB_VM_LOCK_LEAVE_NO_BARRIER();
8393 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8394 }
8395#else
8396 /* mark `b' and remember */
8397 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8398 if (RVALUE_WB_UNPROTECTED(b)) {
8399 gc_remember_unprotected(objspace, b);
8400 }
8401 else {
8402 RVALUE_AGE_SET_OLD(objspace, b);
8403 rgengc_remember(objspace, b);
8404 }
8405
8406 gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8407#endif
8408
8409 check_rvalue_consistency(a);
8410 check_rvalue_consistency(b);
8411}
8412
8413#if GC_ENABLE_INCREMENTAL_MARK
8414static void
8415gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
8416{
8417 gc_mark_set_parent(objspace, parent);
8418 rgengc_check_relation(objspace, obj);
8419 if (gc_mark_set(objspace, obj) == FALSE) return;
8420 gc_aging(objspace, obj);
8421 gc_grey(objspace, obj);
8422}
8423
8424NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
8425
8426static void
8427gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
8428{
8429 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
8430
8431 if (RVALUE_BLACK_P(a)) {
8432 if (RVALUE_WHITE_P(b)) {
8433 if (!RVALUE_WB_UNPROTECTED(a)) {
8434 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
8435 gc_mark_from(objspace, b, a);
8436 }
8437 }
8438 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
8439 if (!RVALUE_WB_UNPROTECTED(b)) {
8440 gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
8441 RVALUE_AGE_SET_OLD(objspace, b);
8442
8443 if (RVALUE_BLACK_P(b)) {
8444 gc_grey(objspace, b);
8445 }
8446 }
8447 else {
8448 gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
8449 gc_remember_unprotected(objspace, b);
8450 }
8451 }
8452
8453 if (UNLIKELY(objspace->flags.during_compacting)) {
8454 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
8455 }
8456 }
8457}
8458#else
8459#define gc_writebarrier_incremental(a, b, objspace)
8460#endif
8461
8462void
8463rb_gc_writebarrier(VALUE a, VALUE b)
8464{
8465 rb_objspace_t *objspace = &rb_objspace;
8466
8467 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
8468 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
8469
8470 retry:
8471 if (!is_incremental_marking(objspace)) {
8472 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
8473 // do nothing
8474 }
8475 else {
8476 gc_writebarrier_generational(a, b, objspace);
8477 }
8478 }
8479 else {
8480 bool retry = false;
8481 /* slow path */
8482 RB_VM_LOCK_ENTER_NO_BARRIER();
8483 {
8484 if (is_incremental_marking(objspace)) {
8485 gc_writebarrier_incremental(a, b, objspace);
8486 }
8487 else {
8488 retry = true;
8489 }
8490 }
8491 RB_VM_LOCK_LEAVE_NO_BARRIER();
8492
8493 if (retry) goto retry;
8494 }
8495 return;
8496}
8497
8498void
8500{
8501 if (RVALUE_WB_UNPROTECTED(obj)) {
8502 return;
8503 }
8504 else {
8505 rb_objspace_t *objspace = &rb_objspace;
8506
8507 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
8508 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
8509
8510 if (RVALUE_OLD_P(obj)) {
8511 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
8512 RVALUE_DEMOTE(objspace, obj);
8513 gc_mark_set(objspace, obj);
8514 gc_remember_unprotected(objspace, obj);
8515
8516#if RGENGC_PROFILE
8517 objspace->profile.total_shade_operation_count++;
8518#if RGENGC_PROFILE >= 2
8519 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
8520#endif /* RGENGC_PROFILE >= 2 */
8521#endif /* RGENGC_PROFILE */
8522 }
8523 else {
8524 RVALUE_AGE_RESET(obj);
8525 }
8526
8527 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
8528 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
8529 }
8530}
8531
8532/*
8533 * remember `obj' if needed.
8534 */
8535MJIT_FUNC_EXPORTED void
8536rb_gc_writebarrier_remember(VALUE obj)
8537{
8538 rb_objspace_t *objspace = &rb_objspace;
8539
8540 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
8541
8542 if (is_incremental_marking(objspace)) {
8543 if (RVALUE_BLACK_P(obj)) {
8544 gc_grey(objspace, obj);
8545 }
8546 }
8547 else {
8548 if (RVALUE_OLD_P(obj)) {
8549 rgengc_remember(objspace, obj);
8550 }
8551 }
8552}
8553
8554static st_table *rgengc_unprotect_logging_table;
8555
8556static int
8557rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
8558{
8559 fprintf(stderr, "%s\t%"PRIuVALUE"\n", (char *)key, (VALUE)val);
8560 return ST_CONTINUE;
8561}
8562
8563static void
8564rgengc_unprotect_logging_exit_func(void)
8565{
8566 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
8567}
8568
8569void
8570rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
8571{
8572 VALUE obj = (VALUE)objptr;
8573
8574 if (rgengc_unprotect_logging_table == 0) {
8575 rgengc_unprotect_logging_table = st_init_strtable();
8576 atexit(rgengc_unprotect_logging_exit_func);
8577 }
8578
8579 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8580 char buff[0x100];
8581 st_data_t cnt = 1;
8582 char *ptr = buff;
8583
8584 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
8585
8586 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
8587 cnt++;
8588 }
8589 else {
8590 ptr = (strdup)(buff);
8591 if (!ptr) rb_memerror();
8592 }
8593 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
8594 }
8595}
8596
8597void
8598rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
8599{
8600 rb_objspace_t *objspace = &rb_objspace;
8601
8602 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
8603 if (!RVALUE_OLD_P(dest)) {
8604 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
8605 RVALUE_AGE_RESET_RAW(dest);
8606 }
8607 else {
8608 RVALUE_DEMOTE(objspace, dest);
8609 }
8610 }
8611
8612 check_rvalue_consistency(dest);
8613}
8614
8615/* RGENGC analysis information */
8616
8617VALUE
8618rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
8619{
8620 return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
8621}
8622
8623VALUE
8624rb_obj_rgengc_promoted_p(VALUE obj)
8625{
8626 return RBOOL(OBJ_PROMOTED(obj));
8627}
8628
8629size_t
8630rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
8631{
8632 size_t n = 0;
8633 static ID ID_marked;
8634 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
8635
8636 if (!ID_marked) {
8637#define I(s) ID_##s = rb_intern(#s);
8638 I(marked);
8639 I(wb_protected);
8640 I(old);
8641 I(marking);
8642 I(uncollectible);
8643 I(pinned);
8644#undef I
8645 }
8646
8647 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
8648 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
8649 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
8650 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
8651 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
8652 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
8653 return n;
8654}
8655
8656/* GC */
8657
8658void
8659rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
8660{
8661 for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
8662 rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
8663
8664 struct heap_page *page = cache->using_page;
8665 RVALUE *freelist = cache->freelist;
8666 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
8667
8668 heap_page_freelist_append(page, freelist);
8669
8670 cache->using_page = NULL;
8671 cache->freelist = NULL;
8672 }
8673}
8674
8675void
8677{
8678 /* no-op */
8679}
8680
8681#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8682#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8683#endif
8684
8685void
8687{
8688 if (!is_pointer_to_heap(&rb_objspace, (void *)obj))
8689 return;
8690
8691 RB_VM_LOCK_ENTER();
8692 {
8693 VALUE ary_ary = GET_VM()->mark_object_ary;
8694 VALUE ary = rb_ary_last(0, 0, ary_ary);
8695
8696 if (NIL_P(ary) || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
8697 ary = rb_ary_tmp_new(MARK_OBJECT_ARY_BUCKET_SIZE);
8698 rb_ary_push(ary_ary, ary);
8699 }
8700
8701 rb_ary_push(ary, obj);
8702 }
8703 RB_VM_LOCK_LEAVE();
8704}
8705
8706void
8708{
8709 rb_objspace_t *objspace = &rb_objspace;
8710 struct gc_list *tmp;
8711
8712 tmp = ALLOC(struct gc_list);
8713 tmp->next = global_list;
8714 tmp->varptr = addr;
8715 global_list = tmp;
8716}
8717
8718void
8720{
8721 rb_objspace_t *objspace = &rb_objspace;
8722 struct gc_list *tmp = global_list;
8723
8724 if (tmp->varptr == addr) {
8725 global_list = tmp->next;
8726 xfree(tmp);
8727 return;
8728 }
8729 while (tmp->next) {
8730 if (tmp->next->varptr == addr) {
8731 struct gc_list *t = tmp->next;
8732
8733 tmp->next = tmp->next->next;
8734 xfree(t);
8735 break;
8736 }
8737 tmp = tmp->next;
8738 }
8739}
8740
8741void
8743{
8745}
8746
8747#define GC_NOTIFY 0
8748
8749enum {
8750 gc_stress_no_major,
8751 gc_stress_no_immediate_sweep,
8752 gc_stress_full_mark_after_malloc,
8753 gc_stress_max
8754};
8755
8756#define gc_stress_full_mark_after_malloc_p() \
8757 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8758
8759static void
8760heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8761{
8762 if (!heap->free_pages) {
8763 if (!heap_increment(objspace, size_pool, heap)) {
8764 size_pool_allocatable_pages_set(objspace, size_pool, 1);
8765 heap_increment(objspace, size_pool, heap);
8766 }
8767 }
8768}
8769
8770static int
8771ready_to_gc(rb_objspace_t *objspace)
8772{
8773 if (dont_gc_val() || during_gc || ruby_disable_gc) {
8774 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8775 rb_size_pool_t *size_pool = &size_pools[i];
8776 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8777 }
8778 return FALSE;
8779 }
8780 else {
8781 return TRUE;
8782 }
8783}
8784
8785static void
8786gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
8787{
8788 gc_prof_set_malloc_info(objspace);
8789 {
8790 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
8791 size_t old_limit = malloc_limit;
8792
8793 if (inc > malloc_limit) {
8794 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
8795 if (malloc_limit > gc_params.malloc_limit_max) {
8796 malloc_limit = gc_params.malloc_limit_max;
8797 }
8798 }
8799 else {
8800 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
8801 if (malloc_limit < gc_params.malloc_limit_min) {
8802 malloc_limit = gc_params.malloc_limit_min;
8803 }
8804 }
8805
8806 if (0) {
8807 if (old_limit != malloc_limit) {
8808 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
8809 rb_gc_count(), old_limit, malloc_limit);
8810 }
8811 else {
8812 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
8813 rb_gc_count(), malloc_limit);
8814 }
8815 }
8816 }
8817
8818 /* reset oldmalloc info */
8819#if RGENGC_ESTIMATE_OLDMALLOC
8820 if (!full_mark) {
8821 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
8822 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
8823 objspace->rgengc.oldmalloc_increase_limit =
8824 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
8825
8826 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
8827 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
8828 }
8829 }
8830
8831 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
8832 rb_gc_count(),
8833 objspace->rgengc.need_major_gc,
8834 objspace->rgengc.oldmalloc_increase,
8835 objspace->rgengc.oldmalloc_increase_limit,
8836 gc_params.oldmalloc_limit_max);
8837 }
8838 else {
8839 /* major GC */
8840 objspace->rgengc.oldmalloc_increase = 0;
8841
8842 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
8843 objspace->rgengc.oldmalloc_increase_limit =
8844 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
8845 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
8846 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8847 }
8848 }
8849 }
8850#endif
8851}
8852
8853static int
8854garbage_collect(rb_objspace_t *objspace, unsigned int reason)
8855{
8856 int ret;
8857
8858 RB_VM_LOCK_ENTER();
8859 {
8860#if GC_PROFILE_MORE_DETAIL
8861 objspace->profile.prepare_time = getrusage_time();
8862#endif
8863
8864 gc_rest(objspace);
8865
8866#if GC_PROFILE_MORE_DETAIL
8867 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
8868#endif
8869
8870 ret = gc_start(objspace, reason);
8871 }
8872 RB_VM_LOCK_LEAVE();
8873
8874 return ret;
8875}
8876
8877static int
8878gc_start(rb_objspace_t *objspace, unsigned int reason)
8879{
8880 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
8881#if GC_ENABLE_INCREMENTAL_MARK
8882 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
8883#endif
8884
8885 /* reason may be clobbered, later, so keep set immediate_sweep here */
8886 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
8887
8888 /* Explicitly enable compaction (GC.compact) */
8889 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
8890
8891 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
8892 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
8893
8894 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
8895 GC_ASSERT(!is_lazy_sweeping(objspace));
8896 GC_ASSERT(!is_incremental_marking(objspace));
8897
8898 unsigned int lock_lev;
8899 gc_enter(objspace, gc_enter_event_start, &lock_lev);
8900
8901#if RGENGC_CHECK_MODE >= 2
8902 gc_verify_internal_consistency(objspace);
8903#endif
8904
8905 if (ruby_gc_stressful) {
8906 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
8907
8908 if ((flag & (1<<gc_stress_no_major)) == 0) {
8909 do_full_mark = TRUE;
8910 }
8911
8912 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
8913 }
8914 else {
8915 if (objspace->rgengc.need_major_gc) {
8916 reason |= objspace->rgengc.need_major_gc;
8917 do_full_mark = TRUE;
8918 }
8919 else if (RGENGC_FORCE_MAJOR_GC) {
8920 reason = GPR_FLAG_MAJOR_BY_FORCE;
8921 do_full_mark = TRUE;
8922 }
8923
8924 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
8925 }
8926
8927 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
8928 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
8929 }
8930
8931#if GC_ENABLE_INCREMENTAL_MARK
8932 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
8933 objspace->flags.during_incremental_marking = FALSE;
8934 }
8935 else {
8936 objspace->flags.during_incremental_marking = do_full_mark;
8937 }
8938#endif
8939
8940 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
8941 objspace->flags.immediate_sweep = TRUE;
8942 }
8943
8944 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
8945
8946 gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
8947 reason,
8948 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
8949
8950#if USE_DEBUG_COUNTER
8951 RB_DEBUG_COUNTER_INC(gc_count);
8952
8953 if (reason & GPR_FLAG_MAJOR_MASK) {
8954 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
8955 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
8956 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
8957 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
8958#if RGENGC_ESTIMATE_OLDMALLOC
8959 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
8960#endif
8961 }
8962 else {
8963 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
8964 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
8965 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
8966 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
8967 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
8968 }
8969#endif
8970
8971 objspace->profile.count++;
8972 objspace->profile.latest_gc_info = reason;
8973 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
8974 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
8975 gc_prof_setup_new_record(objspace, reason);
8976 gc_reset_malloc_info(objspace, do_full_mark);
8977 rb_transient_heap_start_marking(do_full_mark);
8978
8979 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
8980 GC_ASSERT(during_gc);
8981
8982 gc_prof_timer_start(objspace);
8983 {
8984 gc_marks(objspace, do_full_mark);
8985 }
8986 gc_prof_timer_stop(objspace);
8987
8988 gc_exit(objspace, gc_enter_event_start, &lock_lev);
8989 return TRUE;
8990}
8991
8992static void
8993gc_rest(rb_objspace_t *objspace)
8994{
8995 int marking = is_incremental_marking(objspace);
8996 int sweeping = is_lazy_sweeping(objspace);
8997
8998 if (marking || sweeping) {
8999 unsigned int lock_lev;
9000 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9001
9002 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9003
9004 if (is_incremental_marking(objspace)) {
9005 gc_marks_rest(objspace);
9006 }
9007 if (is_lazy_sweeping(objspace)) {
9008 gc_sweep_rest(objspace);
9009 }
9010 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9011 }
9012}
9013
9015 rb_objspace_t *objspace;
9016 unsigned int reason;
9017};
9018
9019static void
9020gc_current_status_fill(rb_objspace_t *objspace, char *buff)
9021{
9022 int i = 0;
9023 if (is_marking(objspace)) {
9024 buff[i++] = 'M';
9025 if (is_full_marking(objspace)) buff[i++] = 'F';
9026#if GC_ENABLE_INCREMENTAL_MARK
9027 if (is_incremental_marking(objspace)) buff[i++] = 'I';
9028#endif
9029 }
9030 else if (is_sweeping(objspace)) {
9031 buff[i++] = 'S';
9032 if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
9033 }
9034 else {
9035 buff[i++] = 'N';
9036 }
9037 buff[i] = '\0';
9038}
9039
9040static const char *
9041gc_current_status(rb_objspace_t *objspace)
9042{
9043 static char buff[0x10];
9044 gc_current_status_fill(objspace, buff);
9045 return buff;
9046}
9047
9048#if PRINT_ENTER_EXIT_TICK
9049
9050static tick_t last_exit_tick;
9051static tick_t enter_tick;
9052static int enter_count = 0;
9053static char last_gc_status[0x10];
9054
9055static inline void
9056gc_record(rb_objspace_t *objspace, int direction, const char *event)
9057{
9058 if (direction == 0) { /* enter */
9059 enter_count++;
9060 enter_tick = tick();
9061 gc_current_status_fill(objspace, last_gc_status);
9062 }
9063 else { /* exit */
9064 tick_t exit_tick = tick();
9065 char current_gc_status[0x10];
9066 gc_current_status_fill(objspace, current_gc_status);
9067#if 1
9068 /* [last mutator time] [gc time] [event] */
9069 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9070 enter_tick - last_exit_tick,
9071 exit_tick - enter_tick,
9072 event,
9073 last_gc_status, current_gc_status,
9074 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9075 last_exit_tick = exit_tick;
9076#else
9077 /* [enter_tick] [gc time] [event] */
9078 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9079 enter_tick,
9080 exit_tick - enter_tick,
9081 event,
9082 last_gc_status, current_gc_status,
9083 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9084#endif
9085 }
9086}
9087#else /* PRINT_ENTER_EXIT_TICK */
9088static inline void
9089gc_record(rb_objspace_t *objspace, int direction, const char *event)
9090{
9091 /* null */
9092}
9093#endif /* PRINT_ENTER_EXIT_TICK */
9094
9095static const char *
9096gc_enter_event_cstr(enum gc_enter_event event)
9097{
9098 switch (event) {
9099 case gc_enter_event_start: return "start";
9100 case gc_enter_event_mark_continue: return "mark_continue";
9101 case gc_enter_event_sweep_continue: return "sweep_continue";
9102 case gc_enter_event_rest: return "rest";
9103 case gc_enter_event_finalizer: return "finalizer";
9104 case gc_enter_event_rb_memerror: return "rb_memerror";
9105 }
9106 return NULL;
9107}
9108
9109static void
9110gc_enter_count(enum gc_enter_event event)
9111{
9112 switch (event) {
9113 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
9114 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue); break;
9115 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue); break;
9116 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
9117 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
9118 case gc_enter_event_rb_memerror: /* nothing */ break;
9119 }
9120}
9121
9122#ifndef MEASURE_GC
9123#define MEASURE_GC (objspace->flags.measure_gc)
9124#endif
9125
9126static bool
9127gc_enter_event_measure_p(rb_objspace_t *objspace, enum gc_enter_event event)
9128{
9129 if (!MEASURE_GC) return false;
9130
9131 switch (event) {
9132 case gc_enter_event_start:
9133 case gc_enter_event_mark_continue:
9134 case gc_enter_event_sweep_continue:
9135 case gc_enter_event_rest:
9136 return true;
9137
9138 default:
9139 // case gc_enter_event_finalizer:
9140 // case gc_enter_event_rb_memerror:
9141 return false;
9142 }
9143}
9144
9145static bool current_process_time(struct timespec *ts);
9146
9147static void
9148gc_enter_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9149{
9150 if (gc_enter_event_measure_p(objspace, event)) {
9151 if (!current_process_time(&objspace->profile.start_time)) {
9152 objspace->profile.start_time.tv_sec = 0;
9153 objspace->profile.start_time.tv_nsec = 0;
9154 }
9155 }
9156}
9157
9158static void
9159gc_exit_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9160{
9161 if (gc_enter_event_measure_p(objspace, event)) {
9162 struct timespec end_time;
9163
9164 if ((objspace->profile.start_time.tv_sec > 0 ||
9165 objspace->profile.start_time.tv_nsec > 0) &&
9166 current_process_time(&end_time)) {
9167
9168 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9169 return; // ignore
9170 }
9171 else {
9172 uint64_t ns =
9173 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9174 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9175 objspace->profile.total_time_ns += ns;
9176 }
9177 }
9178 }
9179}
9180
9181static inline void
9182gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9183{
9184 RB_VM_LOCK_ENTER_LEV(lock_lev);
9185
9186 gc_enter_clock(objspace, event);
9187
9188 switch (event) {
9189 case gc_enter_event_rest:
9190 if (!is_marking(objspace)) break;
9191 // fall through
9192 case gc_enter_event_start:
9193 case gc_enter_event_mark_continue:
9194 // stop other ractors
9195 rb_vm_barrier();
9196 break;
9197 default:
9198 break;
9199 }
9200
9201 gc_enter_count(event);
9202 if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
9203 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9204
9205 mjit_gc_start_hook();
9206
9207 during_gc = TRUE;
9208 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9209 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9210 gc_record(objspace, 0, gc_enter_event_cstr(event));
9211 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
9212}
9213
9214static inline void
9215gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9216{
9217 GC_ASSERT(during_gc != 0);
9218
9219 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
9220 gc_record(objspace, 1, gc_enter_event_cstr(event));
9221 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9222 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9223 during_gc = FALSE;
9224
9225 mjit_gc_exit_hook();
9226 gc_exit_clock(objspace, event);
9227 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9228}
9229
9230static void *
9231gc_with_gvl(void *ptr)
9232{
9233 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
9234 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
9235}
9236
9237static int
9238garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
9239{
9240 if (dont_gc_val()) return TRUE;
9241 if (ruby_thread_has_gvl_p()) {
9242 return garbage_collect(objspace, reason);
9243 }
9244 else {
9245 if (ruby_native_thread_p()) {
9246 struct objspace_and_reason oar;
9247 oar.objspace = objspace;
9248 oar.reason = reason;
9249 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
9250 }
9251 else {
9252 /* no ruby thread */
9253 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9254 exit(EXIT_FAILURE);
9255 }
9256 }
9257}
9258
9259static VALUE
9260gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9261{
9262 rb_objspace_t *objspace = &rb_objspace;
9263 unsigned int reason = (GPR_FLAG_FULL_MARK |
9264 GPR_FLAG_IMMEDIATE_MARK |
9265 GPR_FLAG_IMMEDIATE_SWEEP |
9266 GPR_FLAG_METHOD);
9267
9268 /* For now, compact implies full mark / sweep, so ignore other flags */
9269 if (RTEST(compact)) {
9270 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
9271 * the read barrier, so we must disable compaction. */
9272#if !defined(__MINGW32__) && !defined(_WIN32)
9273 if (!USE_MMAP_ALIGNED_ALLOC) {
9274 rb_raise(rb_eNotImpError, "Compaction isn't available on this platform");
9275 }
9276#endif
9277
9278 reason |= GPR_FLAG_COMPACT;
9279 }
9280 else {
9281 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9282 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9283 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9284 }
9285
9286 garbage_collect(objspace, reason);
9287 gc_finalize_deferred(objspace);
9288
9289 return Qnil;
9290}
9291
9292static int
9293gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
9294{
9295 GC_ASSERT(!SPECIAL_CONST_P(obj));
9296
9297 switch (BUILTIN_TYPE(obj)) {
9298 case T_NONE:
9299 case T_NIL:
9300 case T_MOVED:
9301 case T_ZOMBIE:
9302 return FALSE;
9303 case T_SYMBOL:
9304 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
9305 return FALSE;
9306 }
9307 /* fall through */
9308 case T_STRING:
9309 case T_OBJECT:
9310 case T_FLOAT:
9311 case T_IMEMO:
9312 case T_ARRAY:
9313 case T_BIGNUM:
9314 case T_ICLASS:
9315 case T_MODULE:
9316 case T_REGEXP:
9317 case T_DATA:
9318 case T_MATCH:
9319 case T_STRUCT:
9320 case T_HASH:
9321 case T_FILE:
9322 case T_COMPLEX:
9323 case T_RATIONAL:
9324 case T_NODE:
9325 case T_CLASS:
9326 if (FL_TEST(obj, FL_FINALIZE)) {
9327 /* The finalizer table is a numtable. It looks up objects by address.
9328 * We can't mark the keys in the finalizer table because that would
9329 * prevent the objects from being collected. This check prevents
9330 * objects that are keys in the finalizer table from being moved
9331 * without directly pinning them. */
9332 if (st_is_member(finalizer_table, obj)) {
9333 return FALSE;
9334 }
9335 }
9336 GC_ASSERT(RVALUE_MARKED(obj));
9337 GC_ASSERT(!RVALUE_PINNED(obj));
9338
9339 return TRUE;
9340
9341 default:
9342 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
9343 break;
9344 }
9345
9346 return FALSE;
9347}
9348
9349static VALUE
9350gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t slot_size)
9351{
9352 int marked;
9353 int wb_unprotected;
9354 int uncollectible;
9355 int marking;
9356 RVALUE *dest = (RVALUE *)free;
9357 RVALUE *src = (RVALUE *)scan;
9358
9359 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
9360
9361 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
9362 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9363
9364 /* Save off bits for current object. */
9365 marked = rb_objspace_marked_object_p((VALUE)src);
9366 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
9367 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
9368 marking = RVALUE_MARKING((VALUE)src);
9369
9370 /* Clear bits for eventual T_MOVED */
9371 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
9372 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
9373 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
9374 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)src), (VALUE)src);
9375
9376 if (FL_TEST((VALUE)src, FL_EXIVAR)) {
9377 /* Same deal as below. Generic ivars are held in st tables.
9378 * Resizing the table could cause a GC to happen and we can't allow it */
9379 VALUE already_disabled = rb_gc_disable_no_rest();
9380 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
9381 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9382 }
9383
9384 st_data_t srcid = (st_data_t)src, id;
9385
9386 /* If the source object's object_id has been seen, we need to update
9387 * the object to object id mapping. */
9388 if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
9389 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
9390 /* inserting in the st table can cause the GC to run. We need to
9391 * prevent re-entry in to the GC since `gc_move` is running in the GC,
9392 * so temporarily disable the GC around the st table mutation */
9393 VALUE already_disabled = rb_gc_disable_no_rest();
9394 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9395 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
9396 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9397 }
9398
9399 /* Move the object */
9400 memcpy(dest, src, slot_size);
9401 memset(src, 0, slot_size);
9402
9403 /* Set bits for object in new location */
9404 if (marking) {
9405 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9406 }
9407 else {
9408 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9409 }
9410
9411 if (marked) {
9412 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9413 }
9414 else {
9415 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9416 }
9417
9418 if (wb_unprotected) {
9419 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
9420 }
9421 else {
9422 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
9423 }
9424
9425 if (uncollectible) {
9426 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
9427 }
9428 else {
9429 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
9430 }
9431
9432 /* Assign forwarding address */
9433 src->as.moved.flags = T_MOVED;
9434 src->as.moved.dummy = Qundef;
9435 src->as.moved.destination = (VALUE)dest;
9436 GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
9437
9438 return (VALUE)src;
9439}
9440
9441static int
9442compare_free_slots(const void *left, const void *right, void *dummy)
9443{
9444 struct heap_page *left_page;
9445 struct heap_page *right_page;
9446
9447 left_page = *(struct heap_page * const *)left;
9448 right_page = *(struct heap_page * const *)right;
9449
9450 return left_page->free_slots - right_page->free_slots;
9451}
9452
9453static void
9454gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
9455{
9456 for (int j = 0; j < SIZE_POOL_COUNT; j++) {
9457 rb_size_pool_t *size_pool = &size_pools[j];
9458
9459 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
9460 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
9461 struct heap_page *page = 0, **page_list = malloc(size);
9462 size_t i = 0;
9463
9464 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
9465 page_list[i++] = page;
9466 GC_ASSERT(page);
9467 }
9468
9469 GC_ASSERT((size_t)i == total_pages);
9470
9471 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
9472 * head of the list, so empty pages will end up at the start of the heap */
9473 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
9474
9475 /* Reset the eden heap */
9476 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
9477
9478 for (i = 0; i < total_pages; i++) {
9479 list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
9480 if (page_list[i]->free_slots != 0) {
9481 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
9482 }
9483 }
9484
9485 free(page_list);
9486 }
9487}
9488
9489static void
9490gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
9491{
9492 long i, len;
9493
9494 if (FL_TEST(v, ELTS_SHARED))
9495 return;
9496
9497 len = RARRAY_LEN(v);
9498 if (len > 0) {
9499 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR_TRANSIENT(v);
9500 for (i = 0; i < len; i++) {
9501 UPDATE_IF_MOVED(objspace, ptr[i]);
9502 }
9503 }
9504}
9505
9506static void
9507gc_ref_update_object(rb_objspace_t * objspace, VALUE v)
9508{
9509 VALUE *ptr = ROBJECT_IVPTR(v);
9510
9511 uint32_t i, len = ROBJECT_NUMIV(v);
9512 for (i = 0; i < len; i++) {
9513 UPDATE_IF_MOVED(objspace, ptr[i]);
9514 }
9515}
9516
9517static int
9518hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
9519{
9520 rb_objspace_t *objspace = (rb_objspace_t *)argp;
9521
9522 if (gc_object_moved_p(objspace, (VALUE)*key)) {
9523 *key = rb_gc_location((VALUE)*key);
9524 }
9525
9526 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9527 *value = rb_gc_location((VALUE)*value);
9528 }
9529
9530 return ST_CONTINUE;
9531}
9532
9533static int
9534hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
9535{
9536 rb_objspace_t *objspace;
9537
9538 objspace = (rb_objspace_t *)argp;
9539
9540 if (gc_object_moved_p(objspace, (VALUE)key)) {
9541 return ST_REPLACE;
9542 }
9543
9544 if (gc_object_moved_p(objspace, (VALUE)value)) {
9545 return ST_REPLACE;
9546 }
9547 return ST_CONTINUE;
9548}
9549
9550static int
9551hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
9552{
9553 rb_objspace_t *objspace = (rb_objspace_t *)argp;
9554
9555 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9556 *value = rb_gc_location((VALUE)*value);
9557 }
9558
9559 return ST_CONTINUE;
9560}
9561
9562static int
9563hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
9564{
9565 rb_objspace_t *objspace;
9566
9567 objspace = (rb_objspace_t *)argp;
9568
9569 if (gc_object_moved_p(objspace, (VALUE)value)) {
9570 return ST_REPLACE;
9571 }
9572 return ST_CONTINUE;
9573}
9574
9575static void
9576gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
9577{
9578 if (!tbl || tbl->num_entries == 0) return;
9579
9580 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
9581 rb_raise(rb_eRuntimeError, "hash modified during iteration");
9582 }
9583}
9584
9585static void
9586gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
9587{
9588 if (!tbl || tbl->num_entries == 0) return;
9589
9590 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
9591 rb_raise(rb_eRuntimeError, "hash modified during iteration");
9592 }
9593}
9594
9595/* Update MOVED references in an st_table */
9596void
9598{
9599 rb_objspace_t *objspace = &rb_objspace;
9600 gc_update_table_refs(objspace, ptr);
9601}
9602
9603static void
9604gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
9605{
9606 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
9607}
9608
9609static void
9610gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
9611{
9612 rb_method_definition_t *def = me->def;
9613
9614 UPDATE_IF_MOVED(objspace, me->owner);
9615 UPDATE_IF_MOVED(objspace, me->defined_class);
9616
9617 if (def) {
9618 switch (def->type) {
9619 case VM_METHOD_TYPE_ISEQ:
9620 if (def->body.iseq.iseqptr) {
9621 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
9622 }
9623 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
9624 break;
9625 case VM_METHOD_TYPE_ATTRSET:
9626 case VM_METHOD_TYPE_IVAR:
9627 UPDATE_IF_MOVED(objspace, def->body.attr.location);
9628 break;
9629 case VM_METHOD_TYPE_BMETHOD:
9630 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
9631 break;
9632 case VM_METHOD_TYPE_ALIAS:
9633 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
9634 return;
9635 case VM_METHOD_TYPE_REFINED:
9636 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
9637 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
9638 break;
9639 case VM_METHOD_TYPE_CFUNC:
9640 case VM_METHOD_TYPE_ZSUPER:
9641 case VM_METHOD_TYPE_MISSING:
9642 case VM_METHOD_TYPE_OPTIMIZED:
9643 case VM_METHOD_TYPE_UNDEF:
9644 case VM_METHOD_TYPE_NOTIMPLEMENTED:
9645 break;
9646 }
9647 }
9648}
9649
9650static void
9651gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
9652{
9653 long i;
9654
9655 for (i=0; i<n; i++) {
9656 UPDATE_IF_MOVED(objspace, values[i]);
9657 }
9658}
9659
9660static void
9661gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
9662{
9663 switch (imemo_type(obj)) {
9664 case imemo_env:
9665 {
9666 rb_env_t *env = (rb_env_t *)obj;
9667 if (LIKELY(env->ep)) {
9668 // just after newobj() can be NULL here.
9669 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
9670 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
9671 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
9672 }
9673 }
9674 break;
9675 case imemo_cref:
9676 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
9677 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
9678 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
9679 break;
9680 case imemo_svar:
9681 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
9682 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
9683 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
9684 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
9685 break;
9686 case imemo_throw_data:
9687 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
9688 break;
9689 case imemo_ifunc:
9690 break;
9691 case imemo_memo:
9692 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
9693 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
9694 break;
9695 case imemo_ment:
9696 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
9697 break;
9698 case imemo_iseq:
9699 rb_iseq_update_references((rb_iseq_t *)obj);
9700 break;
9701 case imemo_ast:
9702 rb_ast_update_references((rb_ast_t *)obj);
9703 break;
9704 case imemo_callcache:
9705 {
9706 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
9707 if (cc->klass) {
9708 UPDATE_IF_MOVED(objspace, cc->klass);
9709 if (!is_live_object(objspace, cc->klass)) {
9710 *((VALUE *)(&cc->klass)) = (VALUE)0;
9711 }
9712 }
9713
9714 if (cc->cme_) {
9715 TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
9716 if (!is_live_object(objspace, (VALUE)cc->cme_)) {
9717 *((struct rb_callable_method_entry_struct **)(&cc->cme_)) = (struct rb_callable_method_entry_struct *)0;
9718 }
9719 }
9720 }
9721 break;
9722 case imemo_constcache:
9723 {
9725 UPDATE_IF_MOVED(objspace, ice->value);
9726 }
9727 break;
9728 case imemo_parser_strterm:
9729 case imemo_tmpbuf:
9730 case imemo_callinfo:
9731 break;
9732 default:
9733 rb_bug("not reachable %d", imemo_type(obj));
9734 break;
9735 }
9736}
9737
9738static enum rb_id_table_iterator_result
9739check_id_table_move(ID id, VALUE value, void *data)
9740{
9741 rb_objspace_t *objspace = (rb_objspace_t *)data;
9742
9743 if (gc_object_moved_p(objspace, (VALUE)value)) {
9744 return ID_TABLE_REPLACE;
9745 }
9746
9747 return ID_TABLE_CONTINUE;
9748}
9749
9750/* Returns the new location of an object, if it moved. Otherwise returns
9751 * the existing location. */
9752VALUE
9753rb_gc_location(VALUE value)
9754{
9755
9756 VALUE destination;
9757
9758 if (!SPECIAL_CONST_P(value)) {
9759 void *poisoned = asan_poisoned_object_p(value);
9760 asan_unpoison_object(value, false);
9761
9762 if (BUILTIN_TYPE(value) == T_MOVED) {
9763 destination = (VALUE)RMOVED(value)->destination;
9764 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
9765 }
9766 else {
9767 destination = value;
9768 }
9769
9770 /* Re-poison slot if it's not the one we want */
9771 if (poisoned) {
9772 GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
9773 asan_poison_object(value);
9774 }
9775 }
9776 else {
9777 destination = value;
9778 }
9779
9780 return destination;
9781}
9782
9783static enum rb_id_table_iterator_result
9784update_id_table(ID *key, VALUE * value, void *data, int existing)
9785{
9786 rb_objspace_t *objspace = (rb_objspace_t *)data;
9787
9788 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9789 *value = rb_gc_location((VALUE)*value);
9790 }
9791
9792 return ID_TABLE_CONTINUE;
9793}
9794
9795static void
9796update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
9797{
9798 if (tbl) {
9799 rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
9800 }
9801}
9802
9803static enum rb_id_table_iterator_result
9804update_cc_tbl_i(ID id, VALUE ccs_ptr, void *data)
9805{
9806 rb_objspace_t *objspace = (rb_objspace_t *)data;
9807 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
9808 VM_ASSERT(vm_ccs_p(ccs));
9809
9810 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
9811 ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
9812 }
9813
9814 for (int i=0; i<ccs->len; i++) {
9815 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
9816 ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
9817 }
9818 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
9819 ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
9820 }
9821 }
9822
9823 // do not replace
9824 return ID_TABLE_CONTINUE;
9825}
9826
9827static void
9828update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
9829{
9830 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
9831 if (tbl) {
9832 rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, 0, objspace);
9833 }
9834}
9835
9836static enum rb_id_table_iterator_result
9837update_cvc_tbl_i(ID id, VALUE cvc_entry, void *data)
9838{
9839 struct rb_cvar_class_tbl_entry *entry;
9840
9841 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
9842
9843 entry->class_value = rb_gc_location(entry->class_value);
9844
9845 return ID_TABLE_CONTINUE;
9846}
9847
9848static void
9849update_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
9850{
9851 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
9852 if (tbl) {
9853 rb_id_table_foreach_with_replace(tbl, update_cvc_tbl_i, 0, objspace);
9854 }
9855}
9856
9857static enum rb_id_table_iterator_result
9858update_const_table(VALUE value, void *data)
9859{
9860 rb_const_entry_t *ce = (rb_const_entry_t *)value;
9861 rb_objspace_t * objspace = (rb_objspace_t *)data;
9862
9863 if (gc_object_moved_p(objspace, ce->value)) {
9864 ce->value = rb_gc_location(ce->value);
9865 }
9866
9867 if (gc_object_moved_p(objspace, ce->file)) {
9868 ce->file = rb_gc_location(ce->file);
9869 }
9870
9871 return ID_TABLE_CONTINUE;
9872}
9873
9874static void
9875update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
9876{
9877 if (!tbl) return;
9878 rb_id_table_foreach_values(tbl, update_const_table, objspace);
9879}
9880
9881static void
9882update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
9883{
9884 while (entry) {
9885 UPDATE_IF_MOVED(objspace, entry->klass);
9886 entry = entry->next;
9887 }
9888}
9889
9890static int
9891update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
9892{
9893 rb_objspace_t *objspace = (rb_objspace_t *)arg;
9894 struct rb_iv_index_tbl_entry *ent = (struct rb_iv_index_tbl_entry *)value;
9895 UPDATE_IF_MOVED(objspace, ent->class_value);
9896 return ST_CONTINUE;
9897}
9898
9899static void
9900update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
9901{
9902 UPDATE_IF_MOVED(objspace, ext->origin_);
9903 UPDATE_IF_MOVED(objspace, ext->refined_class);
9904 update_subclass_entries(objspace, ext->subclasses);
9905
9906 // ext->iv_index_tbl
9907 if (ext->iv_index_tbl) {
9908 st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
9909 }
9910}
9911
9912static void
9913gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
9914{
9915 RVALUE *any = RANY(obj);
9916
9917 gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
9918
9919 switch (BUILTIN_TYPE(obj)) {
9920 case T_CLASS:
9921 case T_MODULE:
9922 if (RCLASS_SUPER((VALUE)obj)) {
9923 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
9924 }
9925 if (!RCLASS_EXT(obj)) break;
9926 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9927 update_cc_tbl(objspace, obj);
9928 update_cvc_tbl(objspace, obj);
9929
9930 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9931
9932 update_class_ext(objspace, RCLASS_EXT(obj));
9933 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
9934 break;
9935
9936 case T_ICLASS:
9937 if (FL_TEST(obj, RICLASS_IS_ORIGIN) &&
9938 !FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
9939 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9940 }
9941 if (RCLASS_SUPER((VALUE)obj)) {
9942 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
9943 }
9944 if (!RCLASS_EXT(obj)) break;
9945 if (RCLASS_IV_TBL(obj)) {
9946 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9947 }
9948 update_class_ext(objspace, RCLASS_EXT(obj));
9949 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
9950 update_cc_tbl(objspace, obj);
9951 break;
9952
9953 case T_IMEMO:
9954 gc_ref_update_imemo(objspace, obj);
9955 return;
9956
9957 case T_NIL:
9958 case T_FIXNUM:
9959 case T_NODE:
9960 case T_MOVED:
9961 case T_NONE:
9962 /* These can't move */
9963 return;
9964
9965 case T_ARRAY:
9966 if (FL_TEST(obj, ELTS_SHARED)) {
9967 UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.shared_root);
9968 }
9969 else {
9970 gc_ref_update_array(objspace, obj);
9971 }
9972 break;
9973
9974 case T_HASH:
9975 gc_ref_update_hash(objspace, obj);
9976 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
9977 break;
9978
9979 case T_STRING:
9980 if (STR_SHARED_P(obj)) {
9981#if USE_RVARGC
9982 VALUE orig_shared = any->as.string.as.heap.aux.shared;
9983#endif
9984 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
9985#if USE_RVARGC
9986 VALUE shared = any->as.string.as.heap.aux.shared;
9987 if (STR_EMBED_P(shared)) {
9988 size_t offset = (size_t)any->as.string.as.heap.ptr - (size_t)RSTRING(orig_shared)->as.embed.ary;
9989 GC_ASSERT(any->as.string.as.heap.ptr >= RSTRING(orig_shared)->as.embed.ary);
9990 GC_ASSERT(offset <= (size_t)RSTRING(shared)->as.embed.len);
9991 any->as.string.as.heap.ptr = RSTRING(shared)->as.embed.ary + offset;
9992 }
9993#endif
9994 }
9995 break;
9996
9997 case T_DATA:
9998 /* Call the compaction callback, if it exists */
9999 {
10000 void *const ptr = DATA_PTR(obj);
10001 if (ptr) {
10002 if (RTYPEDDATA_P(obj)) {
10003 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
10004 if (compact_func) (*compact_func)(ptr);
10005 }
10006 }
10007 }
10008 break;
10009
10010 case T_OBJECT:
10011 gc_ref_update_object(objspace, obj);
10012 break;
10013
10014 case T_FILE:
10015 if (any->as.file.fptr) {
10016 UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
10017 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
10018 UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
10019 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_asciicompat);
10020 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
10021 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
10022 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
10023 }
10024 break;
10025 case T_REGEXP:
10026 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
10027 break;
10028
10029 case T_SYMBOL:
10030 if (DYNAMIC_SYM_P((VALUE)any)) {
10031 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10032 }
10033 break;
10034
10035 case T_FLOAT:
10036 case T_BIGNUM:
10037 break;
10038
10039 case T_MATCH:
10040 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10041
10042 if (any->as.match.str) {
10043 UPDATE_IF_MOVED(objspace, any->as.match.str);
10044 }
10045 break;
10046
10047 case T_RATIONAL:
10048 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10049 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10050 break;
10051
10052 case T_COMPLEX:
10053 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10054 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10055
10056 break;
10057
10058 case T_STRUCT:
10059 {
10060 long i, len = RSTRUCT_LEN(obj);
10061 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10062
10063 for (i = 0; i < len; i++) {
10064 UPDATE_IF_MOVED(objspace, ptr[i]);
10065 }
10066 }
10067 break;
10068 default:
10069#if GC_DEBUG
10070 rb_gcdebug_print_obj_condition((VALUE)obj);
10071 rb_obj_info_dump(obj);
10072 rb_bug("unreachable");
10073#endif
10074 break;
10075
10076 }
10077
10078 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
10079
10080 gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
10081}
10082
10083static int
10084gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
10085{
10086 VALUE v = (VALUE)vstart;
10087 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
10088 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
10089 page->flags.has_uncollectible_shady_objects = FALSE;
10090 page->flags.has_remembered_objects = FALSE;
10091
10092 /* For each object on the page */
10093 for (; v != (VALUE)vend; v += stride) {
10094 void *poisoned = asan_poisoned_object_p(v);
10095 asan_unpoison_object(v, false);
10096
10097 switch (BUILTIN_TYPE(v)) {
10098 case T_NONE:
10099 case T_MOVED:
10100 case T_ZOMBIE:
10101 break;
10102 default:
10103 if (RVALUE_WB_UNPROTECTED(v)) {
10104 page->flags.has_uncollectible_shady_objects = TRUE;
10105 }
10106 if (RVALUE_PAGE_MARKING(page, v)) {
10107 page->flags.has_remembered_objects = TRUE;
10108 }
10109 if (page->flags.before_sweep) {
10110 if (RVALUE_MARKED(v)) {
10111 gc_update_object_references(objspace, v);
10112 }
10113 }
10114 else {
10115 gc_update_object_references(objspace, v);
10116 }
10117 }
10118
10119 if (poisoned) {
10120 asan_poison_object(v);
10121 }
10122 }
10123
10124 return 0;
10125}
10126
10127extern rb_symbols_t ruby_global_symbols;
10128#define global_symbols ruby_global_symbols
10129
10130static void
10131gc_update_references(rb_objspace_t *objspace)
10132{
10133 rb_execution_context_t *ec = GET_EC();
10134 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10135
10136 struct heap_page *page = NULL;
10137
10138 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10139 bool should_set_mark_bits = TRUE;
10140 rb_size_pool_t *size_pool = &size_pools[i];
10141 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10142
10143 list_for_each(&heap->pages, page, page_node) {
10144 uintptr_t start = (uintptr_t)page->start;
10145 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10146
10147 gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
10148 if (page == heap->sweeping_page) {
10149 should_set_mark_bits = FALSE;
10150 }
10151 if (should_set_mark_bits) {
10152 gc_setup_mark_bits(page);
10153 }
10154 }
10155 }
10156 rb_vm_update_references(vm);
10157 rb_transient_heap_update_references();
10158 rb_gc_update_global_tbl();
10159 global_symbols.ids = rb_gc_location(global_symbols.ids);
10160 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10161 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10162 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10163 gc_update_table_refs(objspace, global_symbols.str_sym);
10164 gc_update_table_refs(objspace, finalizer_table);
10165}
10166
10167static VALUE
10168gc_compact_stats(rb_execution_context_t *ec, VALUE self)
10169{
10170 size_t i;
10171 rb_objspace_t *objspace = &rb_objspace;
10172 VALUE h = rb_hash_new();
10173 VALUE considered = rb_hash_new();
10174 VALUE moved = rb_hash_new();
10175
10176 for (i=0; i<T_MASK; i++) {
10177 if (objspace->rcompactor.considered_count_table[i]) {
10178 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
10179 }
10180
10181 if (objspace->rcompactor.moved_count_table[i]) {
10182 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
10183 }
10184 }
10185
10186 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
10187 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
10188
10189 return h;
10190}
10191
10192static void
10193root_obj_check_moved_i(const char *category, VALUE obj, void *data)
10194{
10195 if (gc_object_moved_p(&rb_objspace, obj)) {
10196 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
10197 }
10198}
10199
10200static void
10201reachable_object_check_moved_i(VALUE ref, void *data)
10202{
10203 VALUE parent = (VALUE)data;
10204 if (gc_object_moved_p(&rb_objspace, ref)) {
10205 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
10206 }
10207}
10208
10209static int
10210heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
10211{
10212 VALUE v = (VALUE)vstart;
10213 for (; v != (VALUE)vend; v += stride) {
10214 if (gc_object_moved_p(&rb_objspace, v)) {
10215 /* Moved object still on the heap, something may have a reference. */
10216 }
10217 else {
10218 void *poisoned = asan_poisoned_object_p(v);
10219 asan_unpoison_object(v, false);
10220
10221 switch (BUILTIN_TYPE(v)) {
10222 case T_NONE:
10223 case T_ZOMBIE:
10224 break;
10225 default:
10226 if (!rb_objspace_garbage_object_p(v)) {
10227 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
10228 }
10229 }
10230
10231 if (poisoned) {
10232 GC_ASSERT(BUILTIN_TYPE(v) == T_NONE);
10233 asan_poison_object(v);
10234 }
10235 }
10236 }
10237
10238 return 0;
10239}
10240
10241static VALUE
10242gc_compact(rb_execution_context_t *ec, VALUE self)
10243{
10244 /* Run GC with compaction enabled */
10245 gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
10246
10247 return gc_compact_stats(ec, self);
10248}
10249
10250static VALUE
10251gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
10252{
10253 rb_objspace_t *objspace = &rb_objspace;
10254
10255 /* Clear the heap. */
10256 gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
10257
10258 RB_VM_LOCK_ENTER();
10259 {
10260 gc_rest(objspace);
10261
10262 if (RTEST(double_heap)) {
10263 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10264 rb_size_pool_t *size_pool = &size_pools[i];
10265 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10266 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10267 }
10268 }
10269
10270 if (RTEST(toward_empty)) {
10271 gc_sort_heap_by_empty_slots(objspace);
10272 }
10273 }
10274 RB_VM_LOCK_LEAVE();
10275
10276 gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
10277
10278 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
10279 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
10280
10281 return gc_compact_stats(ec, self);
10282}
10283
10284VALUE
10286{
10287 rb_gc();
10288 return Qnil;
10289}
10290
10291void
10293{
10294 rb_objspace_t *objspace = &rb_objspace;
10295 unsigned int reason = GPR_DEFAULT_REASON;
10296 garbage_collect(objspace, reason);
10297}
10298
10299int
10301{
10302 rb_objspace_t *objspace = &rb_objspace;
10303 return during_gc;
10304}
10305
10306#if RGENGC_PROFILE >= 2
10307
10308static const char *type_name(int type, VALUE obj);
10309
10310static void
10311gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
10312{
10313 VALUE result = rb_hash_new_with_size(T_MASK);
10314 int i;
10315 for (i=0; i<T_MASK; i++) {
10316 const char *type = type_name(i, 0);
10317 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
10318 }
10319 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
10320}
10321#endif
10322
10323size_t
10325{
10326 return rb_objspace.profile.count;
10327}
10328
10329static VALUE
10330gc_count(rb_execution_context_t *ec, VALUE self)
10331{
10332 return SIZET2NUM(rb_gc_count());
10333}
10334
10335static VALUE
10336gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
10337{
10338 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
10339 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
10340#if RGENGC_ESTIMATE_OLDMALLOC
10341 static VALUE sym_oldmalloc;
10342#endif
10343 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
10344 static VALUE sym_none, sym_marking, sym_sweeping;
10345 VALUE hash = Qnil, key = Qnil;
10346 VALUE major_by;
10347 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
10348
10349 if (SYMBOL_P(hash_or_key)) {
10350 key = hash_or_key;
10351 }
10352 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
10353 hash = hash_or_key;
10354 }
10355 else {
10356 rb_raise(rb_eTypeError, "non-hash or symbol given");
10357 }
10358
10359 if (NIL_P(sym_major_by)) {
10360#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10361 S(major_by);
10362 S(gc_by);
10363 S(immediate_sweep);
10364 S(have_finalizer);
10365 S(state);
10366
10367 S(stress);
10368 S(nofree);
10369 S(oldgen);
10370 S(shady);
10371 S(force);
10372#if RGENGC_ESTIMATE_OLDMALLOC
10373 S(oldmalloc);
10374#endif
10375 S(newobj);
10376 S(malloc);
10377 S(method);
10378 S(capi);
10379
10380 S(none);
10381 S(marking);
10382 S(sweeping);
10383#undef S
10384 }
10385
10386#define SET(name, attr) \
10387 if (key == sym_##name) \
10388 return (attr); \
10389 else if (hash != Qnil) \
10390 rb_hash_aset(hash, sym_##name, (attr));
10391
10392 major_by =
10393 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
10394 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
10395 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
10396 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
10397#if RGENGC_ESTIMATE_OLDMALLOC
10398 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
10399#endif
10400 Qnil;
10401 SET(major_by, major_by);
10402
10403 SET(gc_by,
10404 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
10405 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
10406 (flags & GPR_FLAG_METHOD) ? sym_method :
10407 (flags & GPR_FLAG_CAPI) ? sym_capi :
10408 (flags & GPR_FLAG_STRESS) ? sym_stress :
10409 Qnil
10410 );
10411
10412 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
10413 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
10414
10415 if (orig_flags == 0) {
10416 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
10417 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
10418 }
10419#undef SET
10420
10421 if (!NIL_P(key)) {/* matched key should return above */
10422 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
10423 }
10424
10425 return hash;
10426}
10427
10428VALUE
10430{
10431 rb_objspace_t *objspace = &rb_objspace;
10432 return gc_info_decode(objspace, key, 0);
10433}
10434
10435static VALUE
10436gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
10437{
10438 rb_objspace_t *objspace = &rb_objspace;
10439
10440 if (NIL_P(arg)) {
10441 arg = rb_hash_new();
10442 }
10443 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
10444 rb_raise(rb_eTypeError, "non-hash or symbol given");
10445 }
10446
10447 return gc_info_decode(objspace, arg, 0);
10448}
10449
10450enum gc_stat_sym {
10451 gc_stat_sym_count,
10452 gc_stat_sym_time,
10453 gc_stat_sym_heap_allocated_pages,
10454 gc_stat_sym_heap_sorted_length,
10455 gc_stat_sym_heap_allocatable_pages,
10456 gc_stat_sym_heap_available_slots,
10457 gc_stat_sym_heap_live_slots,
10458 gc_stat_sym_heap_free_slots,
10459 gc_stat_sym_heap_final_slots,
10460 gc_stat_sym_heap_marked_slots,
10461 gc_stat_sym_heap_eden_pages,
10462 gc_stat_sym_heap_tomb_pages,
10463 gc_stat_sym_total_allocated_pages,
10464 gc_stat_sym_total_freed_pages,
10465 gc_stat_sym_total_allocated_objects,
10466 gc_stat_sym_total_freed_objects,
10467 gc_stat_sym_malloc_increase_bytes,
10468 gc_stat_sym_malloc_increase_bytes_limit,
10469 gc_stat_sym_minor_gc_count,
10470 gc_stat_sym_major_gc_count,
10471 gc_stat_sym_compact_count,
10472 gc_stat_sym_read_barrier_faults,
10473 gc_stat_sym_total_moved_objects,
10474 gc_stat_sym_remembered_wb_unprotected_objects,
10475 gc_stat_sym_remembered_wb_unprotected_objects_limit,
10476 gc_stat_sym_old_objects,
10477 gc_stat_sym_old_objects_limit,
10478#if RGENGC_ESTIMATE_OLDMALLOC
10479 gc_stat_sym_oldmalloc_increase_bytes,
10480 gc_stat_sym_oldmalloc_increase_bytes_limit,
10481#endif
10482#if RGENGC_PROFILE
10483 gc_stat_sym_total_generated_normal_object_count,
10484 gc_stat_sym_total_generated_shady_object_count,
10485 gc_stat_sym_total_shade_operation_count,
10486 gc_stat_sym_total_promoted_count,
10487 gc_stat_sym_total_remembered_normal_object_count,
10488 gc_stat_sym_total_remembered_shady_object_count,
10489#endif
10490 gc_stat_sym_last
10491};
10492
10493static VALUE gc_stat_symbols[gc_stat_sym_last];
10494
10495static void
10496setup_gc_stat_symbols(void)
10497{
10498 if (gc_stat_symbols[0] == 0) {
10499#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10500 S(count);
10501 S(time);
10502 S(heap_allocated_pages);
10503 S(heap_sorted_length);
10504 S(heap_allocatable_pages);
10505 S(heap_available_slots);
10506 S(heap_live_slots);
10507 S(heap_free_slots);
10508 S(heap_final_slots);
10509 S(heap_marked_slots);
10510 S(heap_eden_pages);
10511 S(heap_tomb_pages);
10512 S(total_allocated_pages);
10513 S(total_freed_pages);
10514 S(total_allocated_objects);
10515 S(total_freed_objects);
10516 S(malloc_increase_bytes);
10517 S(malloc_increase_bytes_limit);
10518 S(minor_gc_count);
10519 S(major_gc_count);
10520 S(compact_count);
10521 S(read_barrier_faults);
10522 S(total_moved_objects);
10523 S(remembered_wb_unprotected_objects);
10524 S(remembered_wb_unprotected_objects_limit);
10525 S(old_objects);
10526 S(old_objects_limit);
10527#if RGENGC_ESTIMATE_OLDMALLOC
10528 S(oldmalloc_increase_bytes);
10529 S(oldmalloc_increase_bytes_limit);
10530#endif
10531#if RGENGC_PROFILE
10532 S(total_generated_normal_object_count);
10533 S(total_generated_shady_object_count);
10534 S(total_shade_operation_count);
10535 S(total_promoted_count);
10536 S(total_remembered_normal_object_count);
10537 S(total_remembered_shady_object_count);
10538#endif /* RGENGC_PROFILE */
10539#undef S
10540 }
10541}
10542
10543static size_t
10544gc_stat_internal(VALUE hash_or_sym)
10545{
10546 rb_objspace_t *objspace = &rb_objspace;
10547 VALUE hash = Qnil, key = Qnil;
10548
10549 setup_gc_stat_symbols();
10550
10551 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
10552 hash = hash_or_sym;
10553 }
10554 else if (SYMBOL_P(hash_or_sym)) {
10555 key = hash_or_sym;
10556 }
10557 else {
10558 rb_raise(rb_eTypeError, "non-hash or symbol argument");
10559 }
10560
10561#define SET(name, attr) \
10562 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10563 return attr; \
10564 else if (hash != Qnil) \
10565 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10566
10567 SET(count, objspace->profile.count);
10568 SET(time, (size_t) (objspace->profile.total_time_ns / (1000 * 1000) /* ns -> ms */)); // TODO: UINT64T2NUM
10569
10570 /* implementation dependent counters */
10571 SET(heap_allocated_pages, heap_allocated_pages);
10572 SET(heap_sorted_length, heap_pages_sorted_length);
10573 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
10574 SET(heap_available_slots, objspace_available_slots(objspace));
10575 SET(heap_live_slots, objspace_live_slots(objspace));
10576 SET(heap_free_slots, objspace_free_slots(objspace));
10577 SET(heap_final_slots, heap_pages_final_slots);
10578 SET(heap_marked_slots, objspace->marked_slots);
10579 SET(heap_eden_pages, heap_eden_total_pages(objspace));
10580 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
10581 SET(total_allocated_pages, objspace->profile.total_allocated_pages);
10582 SET(total_freed_pages, objspace->profile.total_freed_pages);
10583 SET(total_allocated_objects, objspace->total_allocated_objects);
10584 SET(total_freed_objects, objspace->profile.total_freed_objects);
10585 SET(malloc_increase_bytes, malloc_increase);
10586 SET(malloc_increase_bytes_limit, malloc_limit);
10587 SET(minor_gc_count, objspace->profile.minor_gc_count);
10588 SET(major_gc_count, objspace->profile.major_gc_count);
10589 SET(compact_count, objspace->profile.compact_count);
10590 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
10591 SET(total_moved_objects, objspace->rcompactor.total_moved);
10592 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
10593 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
10594 SET(old_objects, objspace->rgengc.old_objects);
10595 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
10596#if RGENGC_ESTIMATE_OLDMALLOC
10597 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
10598 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
10599#endif
10600
10601#if RGENGC_PROFILE
10602 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
10603 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
10604 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
10605 SET(total_promoted_count, objspace->profile.total_promoted_count);
10606 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
10607 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
10608#endif /* RGENGC_PROFILE */
10609#undef SET
10610
10611 if (!NIL_P(key)) { /* matched key should return above */
10612 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
10613 }
10614
10615#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10616 if (hash != Qnil) {
10617 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
10618 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
10619 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
10620 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
10621 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
10622 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
10623 }
10624#endif
10625
10626 return 0;
10627}
10628
10629static VALUE
10630gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
10631{
10632 if (NIL_P(arg)) {
10633 arg = rb_hash_new();
10634 }
10635 else if (SYMBOL_P(arg)) {
10636 size_t value = gc_stat_internal(arg);
10637 return SIZET2NUM(value);
10638 }
10639 else if (RB_TYPE_P(arg, T_HASH)) {
10640 // ok
10641 }
10642 else {
10643 rb_raise(rb_eTypeError, "non-hash or symbol given");
10644 }
10645
10646 gc_stat_internal(arg);
10647 return arg;
10648}
10649
10650size_t
10651rb_gc_stat(VALUE key)
10652{
10653 if (SYMBOL_P(key)) {
10654 size_t value = gc_stat_internal(key);
10655 return value;
10656 }
10657 else {
10658 gc_stat_internal(key);
10659 return 0;
10660 }
10661}
10662
10663static VALUE
10664gc_stress_get(rb_execution_context_t *ec, VALUE self)
10665{
10666 rb_objspace_t *objspace = &rb_objspace;
10667 return ruby_gc_stress_mode;
10668}
10669
10670static void
10671gc_stress_set(rb_objspace_t *objspace, VALUE flag)
10672{
10673 objspace->flags.gc_stressful = RTEST(flag);
10674 objspace->gc_stress_mode = flag;
10675}
10676
10677static VALUE
10678gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
10679{
10680 rb_objspace_t *objspace = &rb_objspace;
10681 gc_stress_set(objspace, flag);
10682 return flag;
10683}
10684
10685VALUE
10687{
10688 rb_objspace_t *objspace = &rb_objspace;
10689 return rb_objspace_gc_enable(objspace);
10690}
10691
10692VALUE
10693rb_objspace_gc_enable(rb_objspace_t *objspace)
10694{
10695 int old = dont_gc_val();
10696
10697 dont_gc_off();
10698 return RBOOL(old);
10699}
10700
10701static VALUE
10702gc_enable(rb_execution_context_t *ec, VALUE _)
10703{
10704 return rb_gc_enable();
10705}
10706
10707VALUE
10708rb_gc_disable_no_rest(void)
10709{
10710 rb_objspace_t *objspace = &rb_objspace;
10711 return gc_disable_no_rest(objspace);
10712}
10713
10714static VALUE
10715gc_disable_no_rest(rb_objspace_t *objspace)
10716{
10717 int old = dont_gc_val();
10718 dont_gc_on();
10719 return RBOOL(old);
10720}
10721
10722VALUE
10724{
10725 rb_objspace_t *objspace = &rb_objspace;
10726 return rb_objspace_gc_disable(objspace);
10727}
10728
10729VALUE
10730rb_objspace_gc_disable(rb_objspace_t *objspace)
10731{
10732 gc_rest(objspace);
10733 return gc_disable_no_rest(objspace);
10734}
10735
10736static VALUE
10737gc_disable(rb_execution_context_t *ec, VALUE _)
10738{
10739 return rb_gc_disable();
10740}
10741
10742static VALUE
10743gc_set_auto_compact(rb_execution_context_t *ec, VALUE _, VALUE v)
10744{
10745 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
10746 * the read barrier, so we must disable automatic compaction. */
10747#if !defined(__MINGW32__) && !defined(_WIN32)
10748 if (!USE_MMAP_ALIGNED_ALLOC) {
10749 rb_raise(rb_eNotImpError, "Automatic compaction isn't available on this platform");
10750 }
10751#endif
10752
10753 ruby_enable_autocompact = RTEST(v);
10754 return v;
10755}
10756
10757static VALUE
10758gc_get_auto_compact(rb_execution_context_t *ec, VALUE _)
10759{
10760 return RBOOL(ruby_enable_autocompact);
10761}
10762
10763static int
10764get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
10765{
10766 const char *ptr = getenv(name);
10767 ssize_t val;
10768
10769 if (ptr != NULL && *ptr) {
10770 size_t unit = 0;
10771 char *end;
10772#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
10773 val = strtoll(ptr, &end, 0);
10774#else
10775 val = strtol(ptr, &end, 0);
10776#endif
10777 switch (*end) {
10778 case 'k': case 'K':
10779 unit = 1024;
10780 ++end;
10781 break;
10782 case 'm': case 'M':
10783 unit = 1024*1024;
10784 ++end;
10785 break;
10786 case 'g': case 'G':
10787 unit = 1024*1024*1024;
10788 ++end;
10789 break;
10790 }
10791 while (*end && isspace((unsigned char)*end)) end++;
10792 if (*end) {
10793 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
10794 return 0;
10795 }
10796 if (unit > 0) {
10797 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
10798 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
10799 return 0;
10800 }
10801 val *= unit;
10802 }
10803 if (val > 0 && (size_t)val > lower_bound) {
10804 if (RTEST(ruby_verbose)) {
10805 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
10806 }
10807 *default_value = (size_t)val;
10808 return 1;
10809 }
10810 else {
10811 if (RTEST(ruby_verbose)) {
10812 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
10813 name, val, *default_value, lower_bound);
10814 }
10815 return 0;
10816 }
10817 }
10818 return 0;
10819}
10820
10821static int
10822get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
10823{
10824 const char *ptr = getenv(name);
10825 double val;
10826
10827 if (ptr != NULL && *ptr) {
10828 char *end;
10829 val = strtod(ptr, &end);
10830 if (!*ptr || *end) {
10831 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
10832 return 0;
10833 }
10834
10835 if (accept_zero && val == 0.0) {
10836 goto accept;
10837 }
10838 else if (val <= lower_bound) {
10839 if (RTEST(ruby_verbose)) {
10840 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10841 name, val, *default_value, lower_bound);
10842 }
10843 }
10844 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
10845 val > upper_bound) {
10846 if (RTEST(ruby_verbose)) {
10847 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10848 name, val, *default_value, upper_bound);
10849 }
10850 }
10851 else {
10852 goto accept;
10853 }
10854 }
10855 return 0;
10856
10857 accept:
10858 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
10859 *default_value = val;
10860 return 1;
10861}
10862
10863static void
10864gc_set_initial_pages(void)
10865{
10866 size_t min_pages;
10867 rb_objspace_t *objspace = &rb_objspace;
10868
10869 gc_rest(objspace);
10870
10871 min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
10872
10873 size_t pages_per_class = (min_pages - heap_eden_total_pages(objspace)) / SIZE_POOL_COUNT;
10874
10875 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10876 rb_size_pool_t *size_pool = &size_pools[i];
10877
10878 heap_add_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool), pages_per_class);
10879 }
10880
10881 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), min_pages - heap_eden_total_pages(objspace));
10882}
10883
10884/*
10885 * GC tuning environment variables
10886 *
10887 * * RUBY_GC_HEAP_INIT_SLOTS
10888 * - Initial allocation slots.
10889 * * RUBY_GC_HEAP_FREE_SLOTS
10890 * - Prepare at least this amount of slots after GC.
10891 * - Allocate slots if there are not enough slots.
10892 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
10893 * - Allocate slots by this factor.
10894 * - (next slots number) = (current slots number) * (this factor)
10895 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
10896 * - Allocation rate is limited to this number of slots.
10897 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
10898 * - Allocate additional pages when the number of free slots is
10899 * lower than the value (total_slots * (this ratio)).
10900 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
10901 * - Allocate slots to satisfy this formula:
10902 * free_slots = total_slots * goal_ratio
10903 * - In other words, prepare (total_slots * goal_ratio) free slots.
10904 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
10905 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
10906 * - Allow to free pages when the number of free slots is
10907 * greater than the value (total_slots * (this ratio)).
10908 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
10909 * - Do full GC when the number of old objects is more than R * N
10910 * where R is this factor and
10911 * N is the number of old objects just after last full GC.
10912 *
10913 * * obsolete
10914 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
10915 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
10916 *
10917 * * RUBY_GC_MALLOC_LIMIT
10918 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
10919 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10920 *
10921 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
10922 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
10923 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10924 */
10925
10926void
10927ruby_gc_set_params(void)
10928{
10929 /* RUBY_GC_HEAP_FREE_SLOTS */
10930 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
10931 /* ok */
10932 }
10933
10934 /* RUBY_GC_HEAP_INIT_SLOTS */
10935 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
10936 gc_set_initial_pages();
10937 }
10938
10939 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
10940 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
10941 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
10942 0.0, 1.0, FALSE);
10943 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
10944 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
10945 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
10946 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
10947 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
10948
10949 get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
10950 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
10951 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
10952 gc_params.malloc_limit_max = SIZE_MAX;
10953 }
10954 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
10955
10956#if RGENGC_ESTIMATE_OLDMALLOC
10957 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
10958 rb_objspace_t *objspace = &rb_objspace;
10959 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
10960 }
10961 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
10962 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
10963#endif
10964}
10965
10966static void
10967reachable_objects_from_callback(VALUE obj)
10968{
10969 rb_ractor_t *cr = GET_RACTOR();
10970 cr->mfd->mark_func(obj, cr->mfd->data);
10971}
10972
10973void
10974rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
10975{
10976 rb_objspace_t *objspace = &rb_objspace;
10977
10978 if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10979
10980 if (is_markable_object(objspace, obj)) {
10981 rb_ractor_t *cr = GET_RACTOR();
10982 struct gc_mark_func_data_struct mfd = {
10983 .mark_func = func,
10984 .data = data,
10985 }, *prev_mfd = cr->mfd;
10986
10987 cr->mfd = &mfd;
10988 gc_mark_children(objspace, obj);
10989 cr->mfd = prev_mfd;
10990 }
10991}
10992
10994 const char *category;
10995 void (*func)(const char *category, VALUE, void *);
10996 void *data;
10997};
10998
10999static void
11000root_objects_from(VALUE obj, void *ptr)
11001{
11002 const struct root_objects_data *data = (struct root_objects_data *)ptr;
11003 (*data->func)(data->category, obj, data->data);
11004}
11005
11006void
11007rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
11008{
11009 rb_objspace_t *objspace = &rb_objspace;
11010 objspace_reachable_objects_from_root(objspace, func, passing_data);
11011}
11012
11013static void
11014objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
11015{
11016 if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11017
11018 rb_ractor_t *cr = GET_RACTOR();
11019 struct root_objects_data data = {
11020 .func = func,
11021 .data = passing_data,
11022 };
11023 struct gc_mark_func_data_struct mfd = {
11024 .mark_func = root_objects_from,
11025 .data = &data,
11026 }, *prev_mfd = cr->mfd;
11027
11028 cr->mfd = &mfd;
11029 gc_mark_roots(objspace, &data.category);
11030 cr->mfd = prev_mfd;
11031}
11032
11033/*
11034 ------------------------ Extended allocator ------------------------
11035*/
11036
11038 VALUE exc;
11039 const char *fmt;
11040 va_list *ap;
11041};
11042
11043static void *
11044gc_vraise(void *ptr)
11045{
11046 struct gc_raise_tag *argv = ptr;
11047 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11048 UNREACHABLE_RETURN(NULL);
11049}
11050
11051static void
11052gc_raise(VALUE exc, const char *fmt, ...)
11053{
11054 va_list ap;
11055 va_start(ap, fmt);
11056 struct gc_raise_tag argv = {
11057 exc, fmt, &ap,
11058 };
11059
11060 if (ruby_thread_has_gvl_p()) {
11061 gc_vraise(&argv);
11063 }
11064 else if (ruby_native_thread_p()) {
11065 rb_thread_call_with_gvl(gc_vraise, &argv);
11067 }
11068 else {
11069 /* Not in a ruby thread */
11070 fprintf(stderr, "%s", "[FATAL] ");
11071 vfprintf(stderr, fmt, ap);
11072 }
11073
11074 va_end(ap);
11075 abort();
11076}
11077
11078static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
11079
11080static void
11081negative_size_allocation_error(const char *msg)
11082{
11083 gc_raise(rb_eNoMemError, "%s", msg);
11084}
11085
11086static void *
11087ruby_memerror_body(void *dummy)
11088{
11089 rb_memerror();
11090 return 0;
11091}
11092
11093NORETURN(static void ruby_memerror(void));
11095static void
11096ruby_memerror(void)
11097{
11098 if (ruby_thread_has_gvl_p()) {
11099 rb_memerror();
11100 }
11101 else {
11102 if (ruby_native_thread_p()) {
11103 rb_thread_call_with_gvl(ruby_memerror_body, 0);
11104 }
11105 else {
11106 /* no ruby thread */
11107 fprintf(stderr, "[FATAL] failed to allocate memory\n");
11108 }
11109 }
11110 exit(EXIT_FAILURE);
11111}
11112
11113void
11115{
11116 rb_execution_context_t *ec = GET_EC();
11117 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
11118 VALUE exc;
11119
11120 if (0) {
11121 // Print out pid, sleep, so you can attach debugger to see what went wrong:
11122 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
11123 sleep(60);
11124 }
11125
11126 if (during_gc) {
11127 // TODO: OMG!! How to implement it?
11128 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
11129 }
11130
11131 exc = nomem_error;
11132 if (!exc ||
11133 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11134 fprintf(stderr, "[FATAL] failed to allocate memory\n");
11135 exit(EXIT_FAILURE);
11136 }
11137 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11138 rb_ec_raised_clear(ec);
11139 }
11140 else {
11141 rb_ec_raised_set(ec, RAISED_NOMEMORY);
11142 exc = ruby_vm_special_exception_copy(exc);
11143 }
11144 ec->errinfo = exc;
11145 EC_JUMP_TAG(ec, TAG_RAISE);
11146}
11147
11148void *
11149rb_aligned_malloc(size_t alignment, size_t size)
11150{
11151 void *res;
11152
11153#if defined __MINGW32__
11154 res = __mingw_aligned_malloc(size, alignment);
11155#elif defined _WIN32
11156 void *_aligned_malloc(size_t, size_t);
11157 res = _aligned_malloc(size, alignment);
11158#else
11159 if (USE_MMAP_ALIGNED_ALLOC) {
11160 GC_ASSERT(alignment % sysconf(_SC_PAGE_SIZE) == 0);
11161
11162 char *ptr = mmap(NULL, alignment + size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
11163 if (ptr == MAP_FAILED) {
11164 return NULL;
11165 }
11166
11167 char *aligned = ptr + alignment;
11168 aligned -= ((VALUE)aligned & (alignment - 1));
11169 GC_ASSERT(aligned > ptr);
11170 GC_ASSERT(aligned <= ptr + alignment);
11171
11172 size_t start_out_of_range_size = aligned - ptr;
11173 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11174 if (start_out_of_range_size > 0) {
11175 if (munmap(ptr, start_out_of_range_size)) {
11176 rb_bug("rb_aligned_malloc: munmap failed for start");
11177 }
11178 }
11179
11180 size_t end_out_of_range_size = alignment - start_out_of_range_size;
11181 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11182 if (end_out_of_range_size > 0) {
11183 if (munmap(aligned + size, end_out_of_range_size)) {
11184 rb_bug("rb_aligned_malloc: munmap failed for end");
11185 }
11186 }
11187
11188 res = (void *)aligned;
11189 }
11190 else {
11191# if defined(HAVE_POSIX_MEMALIGN)
11192 if (posix_memalign(&res, alignment, size) != 0) {
11193 return NULL;
11194 }
11195# elif defined(HAVE_MEMALIGN)
11196 res = memalign(alignment, size);
11197# else
11198 char* aligned;
11199 res = malloc(alignment + size + sizeof(void*));
11200 aligned = (char*)res + alignment + sizeof(void*);
11201 aligned -= ((VALUE)aligned & (alignment - 1));
11202 ((void**)aligned)[-1] = res;
11203 res = (void*)aligned;
11204# endif
11205 }
11206#endif
11207
11208 /* alignment must be a power of 2 */
11209 GC_ASSERT(((alignment - 1) & alignment) == 0);
11210 GC_ASSERT(alignment % sizeof(void*) == 0);
11211 return res;
11212}
11213
11214static void
11215rb_aligned_free(void *ptr, size_t size)
11216{
11217#if defined __MINGW32__
11218 __mingw_aligned_free(ptr);
11219#elif defined _WIN32
11220 _aligned_free(ptr);
11221#else
11222 if (USE_MMAP_ALIGNED_ALLOC) {
11223 GC_ASSERT(size % sysconf(_SC_PAGE_SIZE) == 0);
11224 if (munmap(ptr, size)) {
11225 rb_bug("rb_aligned_free: munmap failed");
11226 }
11227 }
11228 else {
11229# if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11230 free(ptr);
11231# else
11232 free(((void**)ptr)[-1]);
11233# endif
11234 }
11235#endif
11236}
11237
11238static inline size_t
11239objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
11240{
11241#ifdef HAVE_MALLOC_USABLE_SIZE
11242 return malloc_usable_size(ptr);
11243#else
11244 return hint;
11245#endif
11246}
11247
11248enum memop_type {
11249 MEMOP_TYPE_MALLOC = 0,
11250 MEMOP_TYPE_FREE,
11251 MEMOP_TYPE_REALLOC
11252};
11253
11254static inline void
11255atomic_sub_nounderflow(size_t *var, size_t sub)
11256{
11257 if (sub == 0) return;
11258
11259 while (1) {
11260 size_t val = *var;
11261 if (val < sub) sub = val;
11262 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
11263 }
11264}
11265
11266static void
11267objspace_malloc_gc_stress(rb_objspace_t *objspace)
11268{
11269 if (ruby_gc_stressful && ruby_native_thread_p()) {
11270 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
11271 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
11272
11273 if (gc_stress_full_mark_after_malloc_p()) {
11274 reason |= GPR_FLAG_FULL_MARK;
11275 }
11276 garbage_collect_with_gvl(objspace, reason);
11277 }
11278}
11279
11280static inline bool
11281objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
11282{
11283 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
11284 mem,
11285 type == MEMOP_TYPE_MALLOC ? "malloc" :
11286 type == MEMOP_TYPE_FREE ? "free " :
11287 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
11288 new_size, old_size);
11289 return false;
11290}
11291
11292static bool
11293objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
11294{
11295 if (new_size > old_size) {
11296 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
11297#if RGENGC_ESTIMATE_OLDMALLOC
11298 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
11299#endif
11300 }
11301 else {
11302 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
11303#if RGENGC_ESTIMATE_OLDMALLOC
11304 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
11305#endif
11306 }
11307
11308 if (type == MEMOP_TYPE_MALLOC) {
11309 retry:
11310 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
11311 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
11312 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
11313 goto retry;
11314 }
11315 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
11316 }
11317 }
11318
11319#if MALLOC_ALLOCATED_SIZE
11320 if (new_size >= old_size) {
11321 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
11322 }
11323 else {
11324 size_t dec_size = old_size - new_size;
11325 size_t allocated_size = objspace->malloc_params.allocated_size;
11326
11327#if MALLOC_ALLOCATED_SIZE_CHECK
11328 if (allocated_size < dec_size) {
11329 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
11330 }
11331#endif
11332 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
11333 }
11334
11335 switch (type) {
11336 case MEMOP_TYPE_MALLOC:
11337 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
11338 break;
11339 case MEMOP_TYPE_FREE:
11340 {
11341 size_t allocations = objspace->malloc_params.allocations;
11342 if (allocations > 0) {
11343 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
11344 }
11345#if MALLOC_ALLOCATED_SIZE_CHECK
11346 else {
11347 GC_ASSERT(objspace->malloc_params.allocations > 0);
11348 }
11349#endif
11350 }
11351 break;
11352 case MEMOP_TYPE_REALLOC: /* ignore */ break;
11353 }
11354#endif
11355 return true;
11356}
11357
11358#define objspace_malloc_increase(...) \
11359 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11360 !malloc_increase_done; \
11361 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11362
11363struct malloc_obj_info { /* 4 words */
11364 size_t size;
11365#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11366 size_t gen;
11367 const char *file;
11368 size_t line;
11369#endif
11370};
11371
11372#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11373const char *ruby_malloc_info_file;
11374int ruby_malloc_info_line;
11375#endif
11376
11377static inline size_t
11378objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
11379{
11380 if (size == 0) size = 1;
11381
11382#if CALC_EXACT_MALLOC_SIZE
11383 size += sizeof(struct malloc_obj_info);
11384#endif
11385
11386 return size;
11387}
11388
11389static inline void *
11390objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
11391{
11392 size = objspace_malloc_size(objspace, mem, size);
11393 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
11394
11395#if CALC_EXACT_MALLOC_SIZE
11396 {
11397 struct malloc_obj_info *info = mem;
11398 info->size = size;
11399#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11400 info->gen = objspace->profile.count;
11401 info->file = ruby_malloc_info_file;
11402 info->line = info->file ? ruby_malloc_info_line : 0;
11403#endif
11404 mem = info + 1;
11405 }
11406#endif
11407
11408 return mem;
11409}
11410
11411#if defined(__GNUC__) && RUBY_DEBUG
11412#define RB_BUG_INSTEAD_OF_RB_MEMERROR
11413#endif
11414
11415#ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
11416#define TRY_WITH_GC(siz, expr) do { \
11417 const gc_profile_record_flag gpr = \
11418 GPR_FLAG_FULL_MARK | \
11419 GPR_FLAG_IMMEDIATE_MARK | \
11420 GPR_FLAG_IMMEDIATE_SWEEP | \
11421 GPR_FLAG_MALLOC; \
11422 objspace_malloc_gc_stress(objspace); \
11423 \
11424 if (LIKELY((expr))) { \
11425 /* Success on 1st try */ \
11426 } \
11427 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11428 /* @shyouhei thinks this doesn't happen */ \
11429 rb_bug("TRY_WITH_GC: could not GC"); \
11430 } \
11431 else if ((expr)) { \
11432 /* Success on 2nd try */ \
11433 } \
11434 else { \
11435 rb_bug("TRY_WITH_GC: could not allocate:" \
11436 "%"PRIdSIZE" bytes for %s", \
11437 siz, # expr); \
11438 } \
11439 } while (0)
11440#else
11441#define TRY_WITH_GC(siz, alloc) do { \
11442 objspace_malloc_gc_stress(objspace); \
11443 if (!(alloc) && \
11444 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
11445 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
11446 GPR_FLAG_MALLOC) || \
11447 !(alloc))) { \
11448 ruby_memerror(); \
11449 } \
11450 } while (0)
11451#endif
11452
11453/* these shouldn't be called directly.
11454 * objspace_* functions do not check allocation size.
11455 */
11456static void *
11457objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
11458{
11459 void *mem;
11460
11461 size = objspace_malloc_prepare(objspace, size);
11462 TRY_WITH_GC(size, mem = malloc(size));
11463 RB_DEBUG_COUNTER_INC(heap_xmalloc);
11464 return objspace_malloc_fixup(objspace, mem, size);
11465}
11466
11467static inline size_t
11468xmalloc2_size(const size_t count, const size_t elsize)
11469{
11470 return size_mul_or_raise(count, elsize, rb_eArgError);
11471}
11472
11473static void *
11474objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
11475{
11476 void *mem;
11477
11478 if (!ptr) return objspace_xmalloc0(objspace, new_size);
11479
11480 /*
11481 * The behavior of realloc(ptr, 0) is implementation defined.
11482 * Therefore we don't use realloc(ptr, 0) for portability reason.
11483 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
11484 */
11485 if (new_size == 0) {
11486 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
11487 /*
11488 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
11489 * returns a non-NULL pointer to an access-protected memory page.
11490 * The returned pointer cannot be read / written at all, but
11491 * still be a valid argument of free().
11492 *
11493 * https://man.openbsd.org/malloc.3
11494 *
11495 * - Linux's malloc(3) man page says that it _might_ perhaps return
11496 * a non-NULL pointer when its argument is 0. That return value
11497 * is safe (and is expected) to be passed to free().
11498 *
11499 * http://man7.org/linux/man-pages/man3/malloc.3.html
11500 *
11501 * - As I read the implementation jemalloc's malloc() returns fully
11502 * normal 16 bytes memory region when its argument is 0.
11503 *
11504 * - As I read the implementation musl libc's malloc() returns
11505 * fully normal 32 bytes memory region when its argument is 0.
11506 *
11507 * - Other malloc implementations can also return non-NULL.
11508 */
11509 objspace_xfree(objspace, ptr, old_size);
11510 return mem;
11511 }
11512 else {
11513 /*
11514 * It is dangerous to return NULL here, because that could lead to
11515 * RCE. Fallback to 1 byte instead of zero.
11516 *
11517 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
11518 */
11519 new_size = 1;
11520 }
11521 }
11522
11523#if CALC_EXACT_MALLOC_SIZE
11524 {
11525 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
11526 new_size += sizeof(struct malloc_obj_info);
11527 ptr = info;
11528 old_size = info->size;
11529 }
11530#endif
11531
11532 old_size = objspace_malloc_size(objspace, ptr, old_size);
11533 TRY_WITH_GC(new_size, mem = realloc(ptr, new_size));
11534 new_size = objspace_malloc_size(objspace, mem, new_size);
11535
11536#if CALC_EXACT_MALLOC_SIZE
11537 {
11538 struct malloc_obj_info *info = mem;
11539 info->size = new_size;
11540 mem = info + 1;
11541 }
11542#endif
11543
11544 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
11545
11546 RB_DEBUG_COUNTER_INC(heap_xrealloc);
11547 return mem;
11548}
11549
11550#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
11551
11552#define MALLOC_INFO_GEN_SIZE 100
11553#define MALLOC_INFO_SIZE_SIZE 10
11554static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
11555static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
11556static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
11557static st_table *malloc_info_file_table;
11558
11559static int
11560mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
11561{
11562 const char *file = (void *)key;
11563 const size_t *data = (void *)val;
11564
11565 fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
11566
11567 return ST_CONTINUE;
11568}
11569
11570__attribute__((destructor))
11571void
11572rb_malloc_info_show_results(void)
11573{
11574 int i;
11575
11576 fprintf(stderr, "* malloc_info gen statistics\n");
11577 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
11578 if (i == MALLOC_INFO_GEN_SIZE-1) {
11579 fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11580 }
11581 else {
11582 fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11583 }
11584 }
11585
11586 fprintf(stderr, "* malloc_info size statistics\n");
11587 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11588 int s = 16 << i;
11589 fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
11590 }
11591 fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
11592
11593 if (malloc_info_file_table) {
11594 fprintf(stderr, "* malloc_info file statistics\n");
11595 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
11596 }
11597}
11598#else
11599void
11600rb_malloc_info_show_results(void)
11601{
11602}
11603#endif
11604
11605static void
11606objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
11607{
11608 if (!ptr) {
11609 /*
11610 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
11611 * its first version. We would better follow.
11612 */
11613 return;
11614 }
11615#if CALC_EXACT_MALLOC_SIZE
11616 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
11617 ptr = info;
11618 old_size = info->size;
11619
11620#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11621 {
11622 int gen = (int)(objspace->profile.count - info->gen);
11623 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
11624 int i;
11625
11626 malloc_info_gen_cnt[gen_index]++;
11627 malloc_info_gen_size[gen_index] += info->size;
11628
11629 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11630 size_t s = 16 << i;
11631 if (info->size <= s) {
11632 malloc_info_size[i]++;
11633 goto found;
11634 }
11635 }
11636 malloc_info_size[i]++;
11637 found:;
11638
11639 {
11640 st_data_t key = (st_data_t)info->file, d;
11641 size_t *data;
11642
11643 if (malloc_info_file_table == NULL) {
11644 malloc_info_file_table = st_init_numtable_with_size(1024);
11645 }
11646 if (st_lookup(malloc_info_file_table, key, &d)) {
11647 /* hit */
11648 data = (size_t *)d;
11649 }
11650 else {
11651 data = malloc(xmalloc2_size(2, sizeof(size_t)));
11652 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
11653 data[0] = data[1] = 0;
11654 st_insert(malloc_info_file_table, key, (st_data_t)data);
11655 }
11656 data[0] ++;
11657 data[1] += info->size;
11658 };
11659 if (0 && gen >= 2) { /* verbose output */
11660 if (info->file) {
11661 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
11662 info->size, gen, info->file, info->line);
11663 }
11664 else {
11665 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
11666 info->size, gen);
11667 }
11668 }
11669 }
11670#endif
11671#endif
11672 old_size = objspace_malloc_size(objspace, ptr, old_size);
11673
11674 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
11675 free(ptr);
11676 RB_DEBUG_COUNTER_INC(heap_xfree);
11677 }
11678}
11679
11680static void *
11681ruby_xmalloc0(size_t size)
11682{
11683 return objspace_xmalloc0(&rb_objspace, size);
11684}
11685
11686void *
11687ruby_xmalloc_body(size_t size)
11688{
11689 if ((ssize_t)size < 0) {
11690 negative_size_allocation_error("too large allocation size");
11691 }
11692 return ruby_xmalloc0(size);
11693}
11694
11695void
11696ruby_malloc_size_overflow(size_t count, size_t elsize)
11697{
11698 rb_raise(rb_eArgError,
11699 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
11700 count, elsize);
11701}
11702
11703void *
11704ruby_xmalloc2_body(size_t n, size_t size)
11705{
11706 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
11707}
11708
11709static void *
11710objspace_xcalloc(rb_objspace_t *objspace, size_t size)
11711{
11712 void *mem;
11713
11714 size = objspace_malloc_prepare(objspace, size);
11715 TRY_WITH_GC(size, mem = calloc1(size));
11716 return objspace_malloc_fixup(objspace, mem, size);
11717}
11718
11719void *
11720ruby_xcalloc_body(size_t n, size_t size)
11721{
11722 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
11723}
11724
11725#ifdef ruby_sized_xrealloc
11726#undef ruby_sized_xrealloc
11727#endif
11728void *
11729ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
11730{
11731 if ((ssize_t)new_size < 0) {
11732 negative_size_allocation_error("too large allocation size");
11733 }
11734
11735 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
11736}
11737
11738void *
11739ruby_xrealloc_body(void *ptr, size_t new_size)
11740{
11741 return ruby_sized_xrealloc(ptr, new_size, 0);
11742}
11743
11744#ifdef ruby_sized_xrealloc2
11745#undef ruby_sized_xrealloc2
11746#endif
11747void *
11748ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
11749{
11750 size_t len = xmalloc2_size(n, size);
11751 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
11752}
11753
11754void *
11755ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
11756{
11757 return ruby_sized_xrealloc2(ptr, n, size, 0);
11758}
11759
11760#ifdef ruby_sized_xfree
11761#undef ruby_sized_xfree
11762#endif
11763void
11764ruby_sized_xfree(void *x, size_t size)
11765{
11766 if (x) {
11767 objspace_xfree(&rb_objspace, x, size);
11768 }
11769}
11770
11771void
11773{
11774 ruby_sized_xfree(x, 0);
11775}
11776
11777void *
11778rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
11779{
11780 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
11781 return ruby_xmalloc(w);
11782}
11783
11784void *
11785rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
11786{
11787 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
11788 return ruby_xrealloc((void *)p, w);
11789}
11790
11791void *
11792rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
11793{
11794 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
11795 return ruby_xmalloc(u);
11796}
11797
11798void *
11799rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
11800{
11801 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
11802 return ruby_xcalloc(u, 1);
11803}
11804
11805/* Mimic ruby_xmalloc, but need not rb_objspace.
11806 * should return pointer suitable for ruby_xfree
11807 */
11808void *
11809ruby_mimmalloc(size_t size)
11810{
11811 void *mem;
11812#if CALC_EXACT_MALLOC_SIZE
11813 size += sizeof(struct malloc_obj_info);
11814#endif
11815 mem = malloc(size);
11816#if CALC_EXACT_MALLOC_SIZE
11817 if (!mem) {
11818 return NULL;
11819 }
11820 else
11821 /* set 0 for consistency of allocated_size/allocations */
11822 {
11823 struct malloc_obj_info *info = mem;
11824 info->size = 0;
11825#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11826 info->gen = 0;
11827 info->file = NULL;
11828 info->line = 0;
11829#endif
11830 mem = info + 1;
11831 }
11832#endif
11833 return mem;
11834}
11835
11836void
11837ruby_mimfree(void *ptr)
11838{
11839#if CALC_EXACT_MALLOC_SIZE
11840 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
11841 ptr = info;
11842#endif
11843 free(ptr);
11844}
11845
11846void *
11847rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
11848{
11849 void *ptr;
11850 VALUE imemo;
11851 rb_imemo_tmpbuf_t *tmpbuf;
11852
11853 /* Keep the order; allocate an empty imemo first then xmalloc, to
11854 * get rid of potential memory leak */
11855 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
11856 *store = imemo;
11857 ptr = ruby_xmalloc0(size);
11858 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
11859 tmpbuf->ptr = ptr;
11860 tmpbuf->cnt = cnt;
11861 return ptr;
11862}
11863
11864void *
11865rb_alloc_tmp_buffer(volatile VALUE *store, long len)
11866{
11867 long cnt;
11868
11869 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
11870 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
11871 }
11872
11873 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
11874}
11875
11876void
11877rb_free_tmp_buffer(volatile VALUE *store)
11878{
11879 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
11880 if (s) {
11881 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
11882 s->cnt = 0;
11883 ruby_xfree(ptr);
11884 }
11885}
11886
11887#if MALLOC_ALLOCATED_SIZE
11888/*
11889 * call-seq:
11890 * GC.malloc_allocated_size -> Integer
11891 *
11892 * Returns the size of memory allocated by malloc().
11893 *
11894 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11895 */
11896
11897static VALUE
11898gc_malloc_allocated_size(VALUE self)
11899{
11900 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
11901}
11902
11903/*
11904 * call-seq:
11905 * GC.malloc_allocations -> Integer
11906 *
11907 * Returns the number of malloc() allocations.
11908 *
11909 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11910 */
11911
11912static VALUE
11913gc_malloc_allocations(VALUE self)
11914{
11915 return UINT2NUM(rb_objspace.malloc_params.allocations);
11916}
11917#endif
11918
11919void
11921{
11922 rb_objspace_t *objspace = &rb_objspace;
11923 if (diff > 0) {
11924 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
11925 }
11926 else if (diff < 0) {
11927 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
11928 }
11929}
11930
11931/*
11932 ------------------------------ WeakMap ------------------------------
11933*/
11934
11935struct weakmap {
11936 st_table *obj2wmap; /* obj -> [ref,...] */
11937 st_table *wmap2obj; /* ref -> obj */
11938 VALUE final;
11939};
11940
11941#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11942
11943#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11944static int
11945wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
11946{
11947 rb_objspace_t *objspace = (rb_objspace_t *)arg;
11948 VALUE obj = (VALUE)val;
11949 if (!is_live_object(objspace, obj)) return ST_DELETE;
11950 return ST_CONTINUE;
11951}
11952#endif
11953
11954static void
11955wmap_compact(void *ptr)
11956{
11957 struct weakmap *w = ptr;
11958 if (w->wmap2obj) rb_gc_update_tbl_refs(w->wmap2obj);
11959 if (w->obj2wmap) rb_gc_update_tbl_refs(w->obj2wmap);
11960 w->final = rb_gc_location(w->final);
11961}
11962
11963static void
11964wmap_mark(void *ptr)
11965{
11966 struct weakmap *w = ptr;
11967#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11968 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
11969#endif
11970 rb_gc_mark_movable(w->final);
11971}
11972
11973static int
11974wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
11975{
11976 VALUE *ptr = (VALUE *)val;
11977 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
11978 return ST_CONTINUE;
11979}
11980
11981static void
11982wmap_free(void *ptr)
11983{
11984 struct weakmap *w = ptr;
11985 st_foreach(w->obj2wmap, wmap_free_map, 0);
11986 st_free_table(w->obj2wmap);
11987 st_free_table(w->wmap2obj);
11988}
11989
11990static int
11991wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
11992{
11993 VALUE *ptr = (VALUE *)val;
11994 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
11995 return ST_CONTINUE;
11996}
11997
11998static size_t
11999wmap_memsize(const void *ptr)
12000{
12001 size_t size;
12002 const struct weakmap *w = ptr;
12003 size = sizeof(*w);
12004 size += st_memsize(w->obj2wmap);
12005 size += st_memsize(w->wmap2obj);
12006 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12007 return size;
12008}
12009
12010static const rb_data_type_t weakmap_type = {
12011 "weakmap",
12012 {
12013 wmap_mark,
12014 wmap_free,
12015 wmap_memsize,
12016 wmap_compact,
12017 },
12018 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12019};
12020
12021static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
12022
12023static VALUE
12024wmap_allocate(VALUE klass)
12025{
12026 struct weakmap *w;
12027 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
12028 w->obj2wmap = rb_init_identtable();
12029 w->wmap2obj = rb_init_identtable();
12030 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12031 return obj;
12032}
12033
12034static int
12035wmap_live_p(rb_objspace_t *objspace, VALUE obj)
12036{
12037 if (SPECIAL_CONST_P(obj)) return TRUE;
12038 /* If is_pointer_to_heap returns false, the page could be in the tomb heap
12039 * or have already been freed. */
12040 if (!is_pointer_to_heap(objspace, (void *)obj)) return FALSE;
12041
12042 void *poisoned = asan_unpoison_object_temporary(obj);
12043
12044 enum ruby_value_type t = BUILTIN_TYPE(obj);
12045 int ret = (!(t == T_NONE || t >= T_FIXNUM || t == T_ICLASS) &&
12046 is_live_object(objspace, obj));
12047
12048 if (poisoned) {
12049 asan_poison_object(obj);
12050 }
12051
12052 return ret;
12053}
12054
12055static int
12056wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
12057{
12058 VALUE wmap, *ptr, size, i, j;
12059 if (!existing) return ST_STOP;
12060 wmap = (VALUE)arg, ptr = (VALUE *)*value;
12061 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
12062 if (ptr[i] != wmap) {
12063 ptr[j++] = ptr[i];
12064 }
12065 }
12066 if (j == 1) {
12067 ruby_sized_xfree(ptr, i * sizeof(VALUE));
12068 return ST_DELETE;
12069 }
12070 if (j < i) {
12071 SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
12072 ptr[0] = j;
12073 *value = (st_data_t)ptr;
12074 }
12075 return ST_CONTINUE;
12076}
12077
12078/* :nodoc: */
12079static VALUE
12080wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
12081{
12082 st_data_t orig, wmap, data;
12083 VALUE obj, *rids, i, size;
12084 struct weakmap *w;
12085
12086 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12087 /* Get reference from object id. */
12088 if ((obj = id2ref_obj_tbl(&rb_objspace, objid)) == Qundef) {
12089 rb_bug("wmap_finalize: objid is not found.");
12090 }
12091
12092 /* obj is original referenced object and/or weak reference. */
12093 orig = (st_data_t)obj;
12094 if (st_delete(w->obj2wmap, &orig, &data)) {
12095 rids = (VALUE *)data;
12096 size = *rids++;
12097 for (i = 0; i < size; ++i) {
12098 wmap = (st_data_t)rids[i];
12099 st_delete(w->wmap2obj, &wmap, NULL);
12100 }
12101 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
12102 }
12103
12104 wmap = (st_data_t)obj;
12105 if (st_delete(w->wmap2obj, &wmap, &orig)) {
12106 wmap = (st_data_t)obj;
12107 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
12108 }
12109 return self;
12110}
12111
12113 rb_objspace_t *objspace;
12114 VALUE value;
12115};
12116
12117static VALUE
12118wmap_inspect_append(rb_objspace_t *objspace, VALUE str, VALUE obj)
12119{
12120 if (SPECIAL_CONST_P(obj)) {
12121 return rb_str_append(str, rb_inspect(obj));
12122 }
12123 else if (wmap_live_p(objspace, obj)) {
12124 return rb_str_append(str, rb_any_to_s(obj));
12125 }
12126 else {
12127 return rb_str_catf(str, "#<collected:%p>", (void*)obj);
12128 }
12129}
12130
12131static int
12132wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
12133{
12134 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12135 rb_objspace_t *objspace = argp->objspace;
12136 VALUE str = argp->value;
12137 VALUE k = (VALUE)key, v = (VALUE)val;
12138
12139 if (RSTRING_PTR(str)[0] == '#') {
12140 rb_str_cat2(str, ", ");
12141 }
12142 else {
12143 rb_str_cat2(str, ": ");
12144 RSTRING_PTR(str)[0] = '#';
12145 }
12146 wmap_inspect_append(objspace, str, k);
12147 rb_str_cat2(str, " => ");
12148 wmap_inspect_append(objspace, str, v);
12149
12150 return ST_CONTINUE;
12151}
12152
12153static VALUE
12154wmap_inspect(VALUE self)
12155{
12156 VALUE str;
12157 VALUE c = rb_class_name(CLASS_OF(self));
12158 struct weakmap *w;
12159 struct wmap_iter_arg args;
12160
12161 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12162 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
12163 if (w->wmap2obj) {
12164 args.objspace = &rb_objspace;
12165 args.value = str;
12166 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
12167 }
12168 RSTRING_PTR(str)[0] = '#';
12169 rb_str_cat2(str, ">");
12170 return str;
12171}
12172
12173static int
12174wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
12175{
12176 rb_objspace_t *objspace = (rb_objspace_t *)arg;
12177 VALUE obj = (VALUE)val;
12178 if (wmap_live_p(objspace, obj)) {
12179 rb_yield_values(2, (VALUE)key, obj);
12180 }
12181 return ST_CONTINUE;
12182}
12183
12184/* Iterates over keys and objects in a weakly referenced object */
12185static VALUE
12186wmap_each(VALUE self)
12187{
12188 struct weakmap *w;
12189 rb_objspace_t *objspace = &rb_objspace;
12190
12191 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12192 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
12193 return self;
12194}
12195
12196static int
12197wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
12198{
12199 rb_objspace_t *objspace = (rb_objspace_t *)arg;
12200 VALUE obj = (VALUE)val;
12201 if (wmap_live_p(objspace, obj)) {
12202 rb_yield((VALUE)key);
12203 }
12204 return ST_CONTINUE;
12205}
12206
12207/* Iterates over keys and objects in a weakly referenced object */
12208static VALUE
12209wmap_each_key(VALUE self)
12210{
12211 struct weakmap *w;
12212 rb_objspace_t *objspace = &rb_objspace;
12213
12214 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12215 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
12216 return self;
12217}
12218
12219static int
12220wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
12221{
12222 rb_objspace_t *objspace = (rb_objspace_t *)arg;
12223 VALUE obj = (VALUE)val;
12224 if (wmap_live_p(objspace, obj)) {
12225 rb_yield(obj);
12226 }
12227 return ST_CONTINUE;
12228}
12229
12230/* Iterates over keys and objects in a weakly referenced object */
12231static VALUE
12232wmap_each_value(VALUE self)
12233{
12234 struct weakmap *w;
12235 rb_objspace_t *objspace = &rb_objspace;
12236
12237 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12238 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
12239 return self;
12240}
12241
12242static int
12243wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
12244{
12245 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12246 rb_objspace_t *objspace = argp->objspace;
12247 VALUE ary = argp->value;
12248 VALUE obj = (VALUE)val;
12249 if (wmap_live_p(objspace, obj)) {
12250 rb_ary_push(ary, (VALUE)key);
12251 }
12252 return ST_CONTINUE;
12253}
12254
12255/* Iterates over keys and objects in a weakly referenced object */
12256static VALUE
12257wmap_keys(VALUE self)
12258{
12259 struct weakmap *w;
12260 struct wmap_iter_arg args;
12261
12262 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12263 args.objspace = &rb_objspace;
12264 args.value = rb_ary_new();
12265 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
12266 return args.value;
12267}
12268
12269static int
12270wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
12271{
12272 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12273 rb_objspace_t *objspace = argp->objspace;
12274 VALUE ary = argp->value;
12275 VALUE obj = (VALUE)val;
12276 if (wmap_live_p(objspace, obj)) {
12277 rb_ary_push(ary, obj);
12278 }
12279 return ST_CONTINUE;
12280}
12281
12282/* Iterates over values and objects in a weakly referenced object */
12283static VALUE
12284wmap_values(VALUE self)
12285{
12286 struct weakmap *w;
12287 struct wmap_iter_arg args;
12288
12289 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12290 args.objspace = &rb_objspace;
12291 args.value = rb_ary_new();
12292 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
12293 return args.value;
12294}
12295
12296static int
12297wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
12298{
12299 VALUE size, *ptr, *optr;
12300 if (existing) {
12301 size = (ptr = optr = (VALUE *)*val)[0];
12302 ++size;
12303 SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
12304 }
12305 else {
12306 optr = 0;
12307 size = 1;
12308 ptr = ruby_xmalloc0(2 * sizeof(VALUE));
12309 }
12310 ptr[0] = size;
12311 ptr[size] = (VALUE)arg;
12312 if (ptr == optr) return ST_STOP;
12313 *val = (st_data_t)ptr;
12314 return ST_CONTINUE;
12315}
12316
12317/* Creates a weak reference from the given key to the given value */
12318static VALUE
12319wmap_aset(VALUE self, VALUE key, VALUE value)
12320{
12321 struct weakmap *w;
12322
12323 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12324 if (FL_ABLE(value)) {
12325 define_final0(value, w->final);
12326 }
12327 if (FL_ABLE(key)) {
12328 define_final0(key, w->final);
12329 }
12330
12331 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
12332 st_insert(w->wmap2obj, (st_data_t)key, (st_data_t)value);
12333 return nonspecial_obj_id(value);
12334}
12335
12336/* Retrieves a weakly referenced object with the given key */
12337static VALUE
12338wmap_lookup(VALUE self, VALUE key)
12339{
12340 st_data_t data;
12341 VALUE obj;
12342 struct weakmap *w;
12343 rb_objspace_t *objspace = &rb_objspace;
12344
12345 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12346 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
12347 obj = (VALUE)data;
12348 if (!wmap_live_p(objspace, obj)) return Qundef;
12349 return obj;
12350}
12351
12352/* Retrieves a weakly referenced object with the given key */
12353static VALUE
12354wmap_aref(VALUE self, VALUE key)
12355{
12356 VALUE obj = wmap_lookup(self, key);
12357 return obj != Qundef ? obj : Qnil;
12358}
12359
12360/* Returns +true+ if +key+ is registered */
12361static VALUE
12362wmap_has_key(VALUE self, VALUE key)
12363{
12364 return wmap_lookup(self, key) == Qundef ? Qfalse : Qtrue;
12365}
12366
12367/* Returns the number of referenced objects */
12368static VALUE
12369wmap_size(VALUE self)
12370{
12371 struct weakmap *w;
12372 st_index_t n;
12373
12374 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12375 n = w->wmap2obj->num_entries;
12376#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
12377 return ULONG2NUM(n);
12378#else
12379 return ULL2NUM(n);
12380#endif
12381}
12382
12383/*
12384 ------------------------------ GC profiler ------------------------------
12385*/
12386
12387#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12388
12389static bool
12390current_process_time(struct timespec *ts)
12391{
12392#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12393 {
12394 static int try_clock_gettime = 1;
12395 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
12396 return true;
12397 }
12398 else {
12399 try_clock_gettime = 0;
12400 }
12401 }
12402#endif
12403
12404#ifdef RUSAGE_SELF
12405 {
12406 struct rusage usage;
12407 struct timeval time;
12408 if (getrusage(RUSAGE_SELF, &usage) == 0) {
12409 time = usage.ru_utime;
12410 ts->tv_sec = time.tv_sec;
12411 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
12412 return true;
12413 }
12414 }
12415#endif
12416
12417#ifdef _WIN32
12418 {
12419 FILETIME creation_time, exit_time, kernel_time, user_time;
12420 ULARGE_INTEGER ui;
12421
12422 if (GetProcessTimes(GetCurrentProcess(),
12423 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
12424 memcpy(&ui, &user_time, sizeof(FILETIME));
12425#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12426 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
12427 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
12428 return true;
12429 }
12430 }
12431#endif
12432
12433 return false;
12434}
12435
12436static double
12437getrusage_time(void)
12438{
12439 struct timespec ts;
12440 if (current_process_time(&ts)) {
12441 return ts.tv_sec + ts.tv_nsec * 1e-9;
12442 }
12443 else {
12444 return 0.0;
12445 }
12446}
12447
12448
12449static inline void
12450gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
12451{
12452 if (objspace->profile.run) {
12453 size_t index = objspace->profile.next_index;
12454 gc_profile_record *record;
12455
12456 /* create new record */
12457 objspace->profile.next_index++;
12458
12459 if (!objspace->profile.records) {
12460 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
12461 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
12462 }
12463 if (index >= objspace->profile.size) {
12464 void *ptr;
12465 objspace->profile.size += 1000;
12466 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
12467 if (!ptr) rb_memerror();
12468 objspace->profile.records = ptr;
12469 }
12470 if (!objspace->profile.records) {
12471 rb_bug("gc_profile malloc or realloc miss");
12472 }
12473 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
12474 MEMZERO(record, gc_profile_record, 1);
12475
12476 /* setup before-GC parameter */
12477 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
12478#if MALLOC_ALLOCATED_SIZE
12479 record->allocated_size = malloc_allocated_size;
12480#endif
12481#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12482#ifdef RUSAGE_SELF
12483 {
12484 struct rusage usage;
12485 if (getrusage(RUSAGE_SELF, &usage) == 0) {
12486 record->maxrss = usage.ru_maxrss;
12487 record->minflt = usage.ru_minflt;
12488 record->majflt = usage.ru_majflt;
12489 }
12490 }
12491#endif
12492#endif
12493 }
12494}
12495
12496static inline void
12497gc_prof_timer_start(rb_objspace_t *objspace)
12498{
12499 if (gc_prof_enabled(objspace)) {
12500 gc_profile_record *record = gc_prof_record(objspace);
12501#if GC_PROFILE_MORE_DETAIL
12502 record->prepare_time = objspace->profile.prepare_time;
12503#endif
12504 record->gc_time = 0;
12505 record->gc_invoke_time = getrusage_time();
12506 }
12507}
12508
12509static double
12510elapsed_time_from(double time)
12511{
12512 double now = getrusage_time();
12513 if (now > time) {
12514 return now - time;
12515 }
12516 else {
12517 return 0;
12518 }
12519}
12520
12521static inline void
12522gc_prof_timer_stop(rb_objspace_t *objspace)
12523{
12524 if (gc_prof_enabled(objspace)) {
12525 gc_profile_record *record = gc_prof_record(objspace);
12526 record->gc_time = elapsed_time_from(record->gc_invoke_time);
12527 record->gc_invoke_time -= objspace->profile.invoke_time;
12528 }
12529}
12530
12531#define RUBY_DTRACE_GC_HOOK(name) \
12532 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12533static inline void
12534gc_prof_mark_timer_start(rb_objspace_t *objspace)
12535{
12536 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
12537#if GC_PROFILE_MORE_DETAIL
12538 if (gc_prof_enabled(objspace)) {
12539 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
12540 }
12541#endif
12542}
12543
12544static inline void
12545gc_prof_mark_timer_stop(rb_objspace_t *objspace)
12546{
12547 RUBY_DTRACE_GC_HOOK(MARK_END);
12548#if GC_PROFILE_MORE_DETAIL
12549 if (gc_prof_enabled(objspace)) {
12550 gc_profile_record *record = gc_prof_record(objspace);
12551 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
12552 }
12553#endif
12554}
12555
12556static inline void
12557gc_prof_sweep_timer_start(rb_objspace_t *objspace)
12558{
12559 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
12560 if (gc_prof_enabled(objspace)) {
12561 gc_profile_record *record = gc_prof_record(objspace);
12562
12563 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
12564 objspace->profile.gc_sweep_start_time = getrusage_time();
12565 }
12566 }
12567}
12568
12569static inline void
12570gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
12571{
12572 RUBY_DTRACE_GC_HOOK(SWEEP_END);
12573
12574 if (gc_prof_enabled(objspace)) {
12575 double sweep_time;
12576 gc_profile_record *record = gc_prof_record(objspace);
12577
12578 if (record->gc_time > 0) {
12579 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12580 /* need to accumulate GC time for lazy sweep after gc() */
12581 record->gc_time += sweep_time;
12582 }
12583 else if (GC_PROFILE_MORE_DETAIL) {
12584 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12585 }
12586
12587#if GC_PROFILE_MORE_DETAIL
12588 record->gc_sweep_time += sweep_time;
12589 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
12590#endif
12591 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
12592 }
12593}
12594
12595static inline void
12596gc_prof_set_malloc_info(rb_objspace_t *objspace)
12597{
12598#if GC_PROFILE_MORE_DETAIL
12599 if (gc_prof_enabled(objspace)) {
12600 gc_profile_record *record = gc_prof_record(objspace);
12601 record->allocate_increase = malloc_increase;
12602 record->allocate_limit = malloc_limit;
12603 }
12604#endif
12605}
12606
12607static inline void
12608gc_prof_set_heap_info(rb_objspace_t *objspace)
12609{
12610 if (gc_prof_enabled(objspace)) {
12611 gc_profile_record *record = gc_prof_record(objspace);
12612 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
12613 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
12614
12615#if GC_PROFILE_MORE_DETAIL
12616 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
12617 record->heap_live_objects = live;
12618 record->heap_free_objects = total - live;
12619#endif
12620
12621 record->heap_total_objects = total;
12622 record->heap_use_size = live * sizeof(RVALUE);
12623 record->heap_total_size = total * sizeof(RVALUE);
12624 }
12625}
12626
12627/*
12628 * call-seq:
12629 * GC::Profiler.clear -> nil
12630 *
12631 * Clears the GC profiler data.
12632 *
12633 */
12634
12635static VALUE
12636gc_profile_clear(VALUE _)
12637{
12638 rb_objspace_t *objspace = &rb_objspace;
12639 void *p = objspace->profile.records;
12640 objspace->profile.records = NULL;
12641 objspace->profile.size = 0;
12642 objspace->profile.next_index = 0;
12643 objspace->profile.current_record = 0;
12644 if (p) {
12645 free(p);
12646 }
12647 return Qnil;
12648}
12649
12650/*
12651 * call-seq:
12652 * GC::Profiler.raw_data -> [Hash, ...]
12653 *
12654 * Returns an Array of individual raw profile data Hashes ordered
12655 * from earliest to latest by +:GC_INVOKE_TIME+.
12656 *
12657 * For example:
12658 *
12659 * [
12660 * {
12661 * :GC_TIME=>1.3000000000000858e-05,
12662 * :GC_INVOKE_TIME=>0.010634999999999999,
12663 * :HEAP_USE_SIZE=>289640,
12664 * :HEAP_TOTAL_SIZE=>588960,
12665 * :HEAP_TOTAL_OBJECTS=>14724,
12666 * :GC_IS_MARKED=>false
12667 * },
12668 * # ...
12669 * ]
12670 *
12671 * The keys mean:
12672 *
12673 * +:GC_TIME+::
12674 * Time elapsed in seconds for this GC run
12675 * +:GC_INVOKE_TIME+::
12676 * Time elapsed in seconds from startup to when the GC was invoked
12677 * +:HEAP_USE_SIZE+::
12678 * Total bytes of heap used
12679 * +:HEAP_TOTAL_SIZE+::
12680 * Total size of heap in bytes
12681 * +:HEAP_TOTAL_OBJECTS+::
12682 * Total number of objects
12683 * +:GC_IS_MARKED+::
12684 * Returns +true+ if the GC is in mark phase
12685 *
12686 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
12687 * to the following hash keys:
12688 *
12689 * +:GC_MARK_TIME+::
12690 * +:GC_SWEEP_TIME+::
12691 * +:ALLOCATE_INCREASE+::
12692 * +:ALLOCATE_LIMIT+::
12693 * +:HEAP_USE_PAGES+::
12694 * +:HEAP_LIVE_OBJECTS+::
12695 * +:HEAP_FREE_OBJECTS+::
12696 * +:HAVE_FINALIZE+::
12697 *
12698 */
12699
12700static VALUE
12701gc_profile_record_get(VALUE _)
12702{
12703 VALUE prof;
12704 VALUE gc_profile = rb_ary_new();
12705 size_t i;
12706 rb_objspace_t *objspace = (&rb_objspace);
12707
12708 if (!objspace->profile.run) {
12709 return Qnil;
12710 }
12711
12712 for (i =0; i < objspace->profile.next_index; i++) {
12713 gc_profile_record *record = &objspace->profile.records[i];
12714
12715 prof = rb_hash_new();
12716 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
12717 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
12718 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
12719 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
12720 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
12721 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
12722 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
12723 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
12724#if GC_PROFILE_MORE_DETAIL
12725 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
12726 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
12727 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
12728 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
12729 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
12730 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
12731 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
12732
12733 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
12734 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
12735
12736 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
12737#endif
12738
12739#if RGENGC_PROFILE > 0
12740 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
12741 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
12742 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
12743#endif
12744 rb_ary_push(gc_profile, prof);
12745 }
12746
12747 return gc_profile;
12748}
12749
12750#if GC_PROFILE_MORE_DETAIL
12751#define MAJOR_REASON_MAX 0x10
12752
12753static char *
12754gc_profile_dump_major_reason(unsigned int flags, char *buff)
12755{
12756 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
12757 int i = 0;
12758
12759 if (reason == GPR_FLAG_NONE) {
12760 buff[0] = '-';
12761 buff[1] = 0;
12762 }
12763 else {
12764#define C(x, s) \
12765 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12766 buff[i++] = #x[0]; \
12767 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12768 buff[i] = 0; \
12769 }
12770 C(NOFREE, N);
12771 C(OLDGEN, O);
12772 C(SHADY, S);
12773#if RGENGC_ESTIMATE_OLDMALLOC
12774 C(OLDMALLOC, M);
12775#endif
12776#undef C
12777 }
12778 return buff;
12779}
12780#endif
12781
12782static void
12783gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
12784{
12785 rb_objspace_t *objspace = &rb_objspace;
12786 size_t count = objspace->profile.next_index;
12787#ifdef MAJOR_REASON_MAX
12788 char reason_str[MAJOR_REASON_MAX];
12789#endif
12790
12791 if (objspace->profile.run && count /* > 1 */) {
12792 size_t i;
12793 const gc_profile_record *record;
12794
12795 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
12796 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12797
12798 for (i = 0; i < count; i++) {
12799 record = &objspace->profile.records[i];
12800 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
12801 i+1, record->gc_invoke_time, record->heap_use_size,
12802 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
12803 }
12804
12805#if GC_PROFILE_MORE_DETAIL
12806 const char *str = "\n\n" \
12807 "More detail.\n" \
12808 "Prepare Time = Previously GC's rest sweep time\n"
12809 "Index Flags Allocate Inc. Allocate Limit"
12810#if CALC_EXACT_MALLOC_SIZE
12811 " Allocated Size"
12812#endif
12813 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12814#if RGENGC_PROFILE
12815 " OldgenObj RemNormObj RemShadObj"
12816#endif
12817#if GC_PROFILE_DETAIL_MEMORY
12818 " MaxRSS(KB) MinorFLT MajorFLT"
12819#endif
12820 "\n";
12821 append(out, rb_str_new_cstr(str));
12822
12823 for (i = 0; i < count; i++) {
12824 record = &objspace->profile.records[i];
12825 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
12826#if CALC_EXACT_MALLOC_SIZE
12827 " %15"PRIuSIZE
12828#endif
12829 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
12830#if RGENGC_PROFILE
12831 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
12832#endif
12833#if GC_PROFILE_DETAIL_MEMORY
12834 "%11ld %8ld %8ld"
12835#endif
12836
12837 "\n",
12838 i+1,
12839 gc_profile_dump_major_reason(record->flags, reason_str),
12840 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
12841 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
12842 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
12843 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
12844 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
12845 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
12846 record->allocate_increase, record->allocate_limit,
12847#if CALC_EXACT_MALLOC_SIZE
12848 record->allocated_size,
12849#endif
12850 record->heap_use_pages,
12851 record->gc_mark_time*1000,
12852 record->gc_sweep_time*1000,
12853 record->prepare_time*1000,
12854
12855 record->heap_live_objects,
12856 record->heap_free_objects,
12857 record->removing_objects,
12858 record->empty_objects
12859#if RGENGC_PROFILE
12860 ,
12861 record->old_objects,
12862 record->remembered_normal_objects,
12863 record->remembered_shady_objects
12864#endif
12865#if GC_PROFILE_DETAIL_MEMORY
12866 ,
12867 record->maxrss / 1024,
12868 record->minflt,
12869 record->majflt
12870#endif
12871
12872 ));
12873 }
12874#endif
12875 }
12876}
12877
12878/*
12879 * call-seq:
12880 * GC::Profiler.result -> String
12881 *
12882 * Returns a profile data report such as:
12883 *
12884 * GC 1 invokes.
12885 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
12886 * 1 0.012 159240 212940 10647 0.00000000000001530000
12887 */
12888
12889static VALUE
12890gc_profile_result(VALUE _)
12891{
12892 VALUE str = rb_str_buf_new(0);
12893 gc_profile_dump_on(str, rb_str_buf_append);
12894 return str;
12895}
12896
12897/*
12898 * call-seq:
12899 * GC::Profiler.report
12900 * GC::Profiler.report(io)
12901 *
12902 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
12903 *
12904 */
12905
12906static VALUE
12907gc_profile_report(int argc, VALUE *argv, VALUE self)
12908{
12909 VALUE out;
12910
12911 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
12912 gc_profile_dump_on(out, rb_io_write);
12913
12914 return Qnil;
12915}
12916
12917/*
12918 * call-seq:
12919 * GC::Profiler.total_time -> float
12920 *
12921 * The total time used for garbage collection in seconds
12922 */
12923
12924static VALUE
12925gc_profile_total_time(VALUE self)
12926{
12927 double time = 0;
12928 rb_objspace_t *objspace = &rb_objspace;
12929
12930 if (objspace->profile.run && objspace->profile.next_index > 0) {
12931 size_t i;
12932 size_t count = objspace->profile.next_index;
12933
12934 for (i = 0; i < count; i++) {
12935 time += objspace->profile.records[i].gc_time;
12936 }
12937 }
12938 return DBL2NUM(time);
12939}
12940
12941/*
12942 * call-seq:
12943 * GC::Profiler.enabled? -> true or false
12944 *
12945 * The current status of GC profile mode.
12946 */
12947
12948static VALUE
12949gc_profile_enable_get(VALUE self)
12950{
12951 rb_objspace_t *objspace = &rb_objspace;
12952 return RBOOL(objspace->profile.run);
12953}
12954
12955/*
12956 * call-seq:
12957 * GC::Profiler.enable -> nil
12958 *
12959 * Starts the GC profiler.
12960 *
12961 */
12962
12963static VALUE
12964gc_profile_enable(VALUE _)
12965{
12966 rb_objspace_t *objspace = &rb_objspace;
12967 objspace->profile.run = TRUE;
12968 objspace->profile.current_record = 0;
12969 return Qnil;
12970}
12971
12972/*
12973 * call-seq:
12974 * GC::Profiler.disable -> nil
12975 *
12976 * Stops the GC profiler.
12977 *
12978 */
12979
12980static VALUE
12981gc_profile_disable(VALUE _)
12982{
12983 rb_objspace_t *objspace = &rb_objspace;
12984
12985 objspace->profile.run = FALSE;
12986 objspace->profile.current_record = 0;
12987 return Qnil;
12988}
12989
12990/*
12991 ------------------------------ DEBUG ------------------------------
12992*/
12993
12994static const char *
12995type_name(int type, VALUE obj)
12996{
12997 switch (type) {
12998#define TYPE_NAME(t) case (t): return #t;
12999 TYPE_NAME(T_NONE);
13000 TYPE_NAME(T_OBJECT);
13001 TYPE_NAME(T_CLASS);
13002 TYPE_NAME(T_MODULE);
13003 TYPE_NAME(T_FLOAT);
13004 TYPE_NAME(T_STRING);
13005 TYPE_NAME(T_REGEXP);
13006 TYPE_NAME(T_ARRAY);
13007 TYPE_NAME(T_HASH);
13008 TYPE_NAME(T_STRUCT);
13009 TYPE_NAME(T_BIGNUM);
13010 TYPE_NAME(T_FILE);
13011 TYPE_NAME(T_MATCH);
13012 TYPE_NAME(T_COMPLEX);
13013 TYPE_NAME(T_RATIONAL);
13014 TYPE_NAME(T_NIL);
13015 TYPE_NAME(T_TRUE);
13016 TYPE_NAME(T_FALSE);
13017 TYPE_NAME(T_SYMBOL);
13018 TYPE_NAME(T_FIXNUM);
13019 TYPE_NAME(T_UNDEF);
13020 TYPE_NAME(T_IMEMO);
13021 TYPE_NAME(T_ICLASS);
13022 TYPE_NAME(T_MOVED);
13023 TYPE_NAME(T_ZOMBIE);
13024 case T_DATA:
13025 if (obj && rb_objspace_data_type_name(obj)) {
13026 return rb_objspace_data_type_name(obj);
13027 }
13028 return "T_DATA";
13029#undef TYPE_NAME
13030 }
13031 return "unknown";
13032}
13033
13034static const char *
13035obj_type_name(VALUE obj)
13036{
13037 return type_name(TYPE(obj), obj);
13038}
13039
13040const char *
13041rb_method_type_name(rb_method_type_t type)
13042{
13043 switch (type) {
13044 case VM_METHOD_TYPE_ISEQ: return "iseq";
13045 case VM_METHOD_TYPE_ATTRSET: return "attrest";
13046 case VM_METHOD_TYPE_IVAR: return "ivar";
13047 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
13048 case VM_METHOD_TYPE_ALIAS: return "alias";
13049 case VM_METHOD_TYPE_REFINED: return "refined";
13050 case VM_METHOD_TYPE_CFUNC: return "cfunc";
13051 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
13052 case VM_METHOD_TYPE_MISSING: return "missing";
13053 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
13054 case VM_METHOD_TYPE_UNDEF: return "undef";
13055 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
13056 }
13057 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
13058}
13059
13060/* from array.c */
13061# define ARY_SHARED_P(ary) \
13062 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13063 FL_TEST((ary),ELTS_SHARED)!=0)
13064# define ARY_EMBED_P(ary) \
13065 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13066 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
13067
13068static void
13069rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
13070{
13071 if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
13072 VALUE path = rb_iseq_path(iseq);
13073 VALUE n = iseq->body->location.first_lineno;
13074 snprintf(buff, buff_size, " %s@%s:%d",
13075 RSTRING_PTR(iseq->body->location.label),
13076 RSTRING_PTR(path),
13077 n ? FIX2INT(n) : 0 );
13078 }
13079}
13080
13081static int
13082str_len_no_raise(VALUE str)
13083{
13084 long len = RSTRING_LEN(str);
13085 if (len < 0) return 0;
13086 if (len > INT_MAX) return INT_MAX;
13087 return (int)len;
13088}
13089
13090const char *
13091rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
13092{
13093 int pos = 0;
13094 void *poisoned = asan_poisoned_object_p(obj);
13095 asan_unpoison_object(obj, false);
13096
13097#define BUFF_ARGS buff + pos, buff_size - pos
13098#define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
13099 if (SPECIAL_CONST_P(obj)) {
13100 APPENDF((BUFF_ARGS, "%s", obj_type_name(obj)));
13101
13102 if (FIXNUM_P(obj)) {
13103 APPENDF((BUFF_ARGS, " %ld", FIX2LONG(obj)));
13104 }
13105 else if (SYMBOL_P(obj)) {
13106 APPENDF((BUFF_ARGS, " %s", rb_id2name(SYM2ID(obj))));
13107 }
13108 }
13109 else {
13110#define TF(c) ((c) != 0 ? "true" : "false")
13111#define C(c, s) ((c) != 0 ? (s) : " ")
13112 const int type = BUILTIN_TYPE(obj);
13113 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
13114
13115 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
13116 APPENDF((BUFF_ARGS, "%p [%d%s%s%s%s%s%s] %s ",
13117 (void *)obj, age,
13118 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
13119 C(RVALUE_MARK_BITMAP(obj), "M"),
13120 C(RVALUE_PIN_BITMAP(obj), "P"),
13121 C(RVALUE_MARKING_BITMAP(obj), "R"),
13122 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
13123 C(rb_objspace_garbage_object_p(obj), "G"),
13124 obj_type_name(obj)));
13125 }
13126 else {
13127 /* fake */
13128 APPENDF((BUFF_ARGS, "%p [%dXXXX] %s",
13129 (void *)obj, age,
13130 obj_type_name(obj)));
13131 }
13132
13133 if (internal_object_p(obj)) {
13134 /* ignore */
13135 }
13136 else if (RBASIC(obj)->klass == 0) {
13137 APPENDF((BUFF_ARGS, "(temporary internal)"));
13138 }
13139 else {
13140 if (RTEST(RBASIC(obj)->klass)) {
13141 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
13142 if (!NIL_P(class_path)) {
13143 APPENDF((BUFF_ARGS, "(%s)", RSTRING_PTR(class_path)));
13144 }
13145 }
13146 }
13147
13148#if GC_DEBUG
13149 APPENDF((BUFF_ARGS, "@%s:%d", RANY(obj)->file, RANY(obj)->line));
13150#endif
13151
13152 switch (type) {
13153 case T_NODE:
13154 UNEXPECTED_NODE(rb_raw_obj_info);
13155 break;
13156 case T_ARRAY:
13157 if (FL_TEST(obj, ELTS_SHARED)) {
13158 APPENDF((BUFF_ARGS, "shared -> %s",
13159 rb_obj_info(RARRAY(obj)->as.heap.aux.shared_root)));
13160 }
13161 else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
13162 APPENDF((BUFF_ARGS, "[%s%s] len: %ld (embed)",
13163 C(ARY_EMBED_P(obj), "E"),
13164 C(ARY_SHARED_P(obj), "S"),
13165 RARRAY_LEN(obj)));
13166 }
13167 else {
13168 APPENDF((BUFF_ARGS, "[%s%s%s] len: %ld, capa:%ld ptr:%p",
13169 C(ARY_EMBED_P(obj), "E"),
13170 C(ARY_SHARED_P(obj), "S"),
13171 C(RARRAY_TRANSIENT_P(obj), "T"),
13172 RARRAY_LEN(obj),
13173 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
13174 (void *)RARRAY_CONST_PTR_TRANSIENT(obj)));
13175 }
13176 break;
13177 case T_STRING: {
13178 if (STR_SHARED_P(obj)) APPENDF((BUFF_ARGS, " [shared] "));
13179 APPENDF((BUFF_ARGS, "%.*s", str_len_no_raise(obj), RSTRING_PTR(obj)));
13180 break;
13181 }
13182 case T_SYMBOL: {
13183 VALUE fstr = RSYMBOL(obj)->fstr;
13184 ID id = RSYMBOL(obj)->id;
13185 if (RB_TYPE_P(fstr, T_STRING)) {
13186 APPENDF((BUFF_ARGS, ":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id));
13187 }
13188 else {
13189 APPENDF((BUFF_ARGS, "(%p) id:%d", (void *)fstr, (unsigned int)id));
13190 }
13191 break;
13192 }
13193 case T_MOVED: {
13194 APPENDF((BUFF_ARGS, "-> %p", (void*)rb_gc_location(obj)));
13195 break;
13196 }
13197 case T_HASH: {
13198 APPENDF((BUFF_ARGS, "[%c%c] %"PRIdSIZE,
13199 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
13200 RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
13201 RHASH_SIZE(obj)));
13202 break;
13203 }
13204 case T_CLASS:
13205 case T_MODULE:
13206 {
13207 VALUE class_path = rb_class_path_cached(obj);
13208 if (!NIL_P(class_path)) {
13209 APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
13210 }
13211 else {
13212 APPENDF((BUFF_ARGS, "(annon)"));
13213 }
13214 break;
13215 }
13216 case T_ICLASS:
13217 {
13218 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
13219 if (!NIL_P(class_path)) {
13220 APPENDF((BUFF_ARGS, "src:%s", RSTRING_PTR(class_path)));
13221 }
13222 break;
13223 }
13224 case T_OBJECT:
13225 {
13226 uint32_t len = ROBJECT_NUMIV(obj);
13227
13228 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13229 APPENDF((BUFF_ARGS, "(embed) len:%d", len));
13230 }
13231 else {
13232 VALUE *ptr = ROBJECT_IVPTR(obj);
13233 APPENDF((BUFF_ARGS, "len:%d ptr:%p", len, (void *)ptr));
13234 }
13235 }
13236 break;
13237 case T_DATA: {
13238 const struct rb_block *block;
13239 const rb_iseq_t *iseq;
13240 if (rb_obj_is_proc(obj) &&
13241 (block = vm_proc_block(obj)) != NULL &&
13242 (vm_block_type(block) == block_type_iseq) &&
13243 (iseq = vm_block_iseq(block)) != NULL) {
13244 rb_raw_iseq_info(BUFF_ARGS, iseq);
13245 }
13246 else if (rb_ractor_p(obj)) {
13247 rb_ractor_t *r = (void *)DATA_PTR(obj);
13248 if (r) {
13249 APPENDF((BUFF_ARGS, "r:%d", r->pub.id));
13250 }
13251 }
13252 else {
13253 const char * const type_name = rb_objspace_data_type_name(obj);
13254 if (type_name) {
13255 APPENDF((BUFF_ARGS, "%s", type_name));
13256 }
13257 }
13258 break;
13259 }
13260 case T_IMEMO: {
13261 APPENDF((BUFF_ARGS, "<%s> ", rb_imemo_name(imemo_type(obj))));
13262
13263 switch (imemo_type(obj)) {
13264 case imemo_ment:
13265 {
13266 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
13267
13268 APPENDF((BUFF_ARGS, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
13269 rb_id2name(me->called_id),
13270 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
13271 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
13272 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
13273 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
13274 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
13275 me->def ? rb_method_type_name(me->def->type) : "NULL",
13276 me->def ? me->def->alias_count : -1,
13277 (void *)me->owner, // obj_info(me->owner),
13278 (void *)me->defined_class)); //obj_info(me->defined_class)));
13279
13280 if (me->def) {
13281 switch (me->def->type) {
13282 case VM_METHOD_TYPE_ISEQ:
13283 APPENDF((BUFF_ARGS, " (iseq:%s)", obj_info((VALUE)me->def->body.iseq.iseqptr)));
13284 break;
13285 default:
13286 break;
13287 }
13288 }
13289
13290 break;
13291 }
13292 case imemo_iseq: {
13293 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
13294 rb_raw_iseq_info(BUFF_ARGS, iseq);
13295 break;
13296 }
13297 case imemo_callinfo:
13298 {
13299 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
13300 APPENDF((BUFF_ARGS, "(mid:%s, flag:%x argc:%d, kwarg:%s)",
13301 rb_id2name(vm_ci_mid(ci)),
13302 vm_ci_flag(ci),
13303 vm_ci_argc(ci),
13304 vm_ci_kwarg(ci) ? "available" : "NULL"));
13305 break;
13306 }
13307 case imemo_callcache:
13308 {
13309 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
13310 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
13311 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
13312
13313 APPENDF((BUFF_ARGS, "(klass:%s cme:%s%s (%p) call:%p",
13314 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
13315 cme ? rb_id2name(cme->called_id) : "<NULL>",
13316 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
13317 (void *)cme,
13318 (void *)vm_cc_call(cc)));
13319 break;
13320 }
13321 default:
13322 break;
13323 }
13324 }
13325 default:
13326 break;
13327 }
13328#undef TF
13329#undef C
13330 }
13331 end:
13332 if (poisoned) {
13333 asan_poison_object(obj);
13334 }
13335
13336 return buff;
13337#undef APPENDF
13338#undef BUFF_ARGS
13339}
13340
13341#if RGENGC_OBJ_INFO
13342#define OBJ_INFO_BUFFERS_NUM 10
13343#define OBJ_INFO_BUFFERS_SIZE 0x100
13344static int obj_info_buffers_index = 0;
13345static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
13346
13347static const char *
13348obj_info(VALUE obj)
13349{
13350 const int index = obj_info_buffers_index++;
13351 char *const buff = &obj_info_buffers[index][0];
13352
13353 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
13354 obj_info_buffers_index = 0;
13355 }
13356
13357 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
13358}
13359#else
13360static const char *
13361obj_info(VALUE obj)
13362{
13363 return obj_type_name(obj);
13364}
13365#endif
13366
13367MJIT_FUNC_EXPORTED const char *
13368rb_obj_info(VALUE obj)
13369{
13370 return obj_info(obj);
13371}
13372
13373void
13374rb_obj_info_dump(VALUE obj)
13375{
13376 char buff[0x100];
13377 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
13378}
13379
13380MJIT_FUNC_EXPORTED void
13381rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
13382{
13383 char buff[0x100];
13384 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
13385}
13386
13387#if GC_DEBUG
13388
13389void
13390rb_gcdebug_print_obj_condition(VALUE obj)
13391{
13392 rb_objspace_t *objspace = &rb_objspace;
13393
13394 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
13395
13396 if (BUILTIN_TYPE(obj) == T_MOVED) {
13397 fprintf(stderr, "moved?: true\n");
13398 }
13399 else {
13400 fprintf(stderr, "moved?: false\n");
13401 }
13402 if (is_pointer_to_heap(objspace, (void *)obj)) {
13403 fprintf(stderr, "pointer to heap?: true\n");
13404 }
13405 else {
13406 fprintf(stderr, "pointer to heap?: false\n");
13407 return;
13408 }
13409
13410 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
13411 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
13412 fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
13413 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
13414 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
13415 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
13416
13417 if (is_lazy_sweeping(objspace)) {
13418 fprintf(stderr, "lazy sweeping?: true\n");
13419 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
13420 }
13421 else {
13422 fprintf(stderr, "lazy sweeping?: false\n");
13423 }
13424}
13425
13426static VALUE
13427gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
13428{
13429 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
13430 return Qnil;
13431}
13432
13433void
13434rb_gcdebug_sentinel(VALUE obj, const char *name)
13435{
13436 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
13437}
13438
13439#endif /* GC_DEBUG */
13440
13441#if GC_DEBUG_STRESS_TO_CLASS
13442/*
13443 * call-seq:
13444 * GC.add_stress_to_class(class[, ...])
13445 *
13446 * Raises NoMemoryError when allocating an instance of the given classes.
13447 *
13448 */
13449static VALUE
13450rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
13451{
13452 rb_objspace_t *objspace = &rb_objspace;
13453
13454 if (!stress_to_class) {
13455 stress_to_class = rb_ary_tmp_new(argc);
13456 }
13457 rb_ary_cat(stress_to_class, argv, argc);
13458 return self;
13459}
13460
13461/*
13462 * call-seq:
13463 * GC.remove_stress_to_class(class[, ...])
13464 *
13465 * No longer raises NoMemoryError when allocating an instance of the
13466 * given classes.
13467 *
13468 */
13469static VALUE
13470rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
13471{
13472 rb_objspace_t *objspace = &rb_objspace;
13473 int i;
13474
13475 if (stress_to_class) {
13476 for (i = 0; i < argc; ++i) {
13477 rb_ary_delete_same(stress_to_class, argv[i]);
13478 }
13479 if (RARRAY_LEN(stress_to_class) == 0) {
13480 stress_to_class = 0;
13481 }
13482 }
13483 return Qnil;
13484}
13485#endif
13486
13487/*
13488 * Document-module: ObjectSpace
13489 *
13490 * The ObjectSpace module contains a number of routines
13491 * that interact with the garbage collection facility and allow you to
13492 * traverse all living objects with an iterator.
13493 *
13494 * ObjectSpace also provides support for object finalizers, procs that will be
13495 * called when a specific object is about to be destroyed by garbage
13496 * collection. See the documentation for
13497 * <code>ObjectSpace.define_finalizer</code> for important information on
13498 * how to use this method correctly.
13499 *
13500 * a = "A"
13501 * b = "B"
13502 *
13503 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
13504 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
13505 *
13506 * a = nil
13507 * b = nil
13508 *
13509 * _produces:_
13510 *
13511 * Finalizer two on 537763470
13512 * Finalizer one on 537763480
13513 */
13514
13515/*
13516 * Document-class: ObjectSpace::WeakMap
13517 *
13518 * An ObjectSpace::WeakMap object holds references to
13519 * any objects, but those objects can get garbage collected.
13520 *
13521 * This class is mostly used internally by WeakRef, please use
13522 * +lib/weakref.rb+ for the public interface.
13523 */
13524
13525/* Document-class: GC::Profiler
13526 *
13527 * The GC profiler provides access to information on GC runs including time,
13528 * length and object space size.
13529 *
13530 * Example:
13531 *
13532 * GC::Profiler.enable
13533 *
13534 * require 'rdoc/rdoc'
13535 *
13536 * GC::Profiler.report
13537 *
13538 * GC::Profiler.disable
13539 *
13540 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
13541 */
13542
13543#include "gc.rbinc"
13544
13545void
13546Init_GC(void)
13547{
13548#undef rb_intern
13549 VALUE rb_mObjSpace;
13550 VALUE rb_mProfiler;
13551 VALUE gc_constants;
13552
13553 rb_mGC = rb_define_module("GC");
13554
13555 gc_constants = rb_hash_new();
13556 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
13557 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
13558 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
13559 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
13560 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
13561 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT));
13562 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
13563 OBJ_FREEZE(gc_constants);
13564 /* internal constants */
13565 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
13566
13567 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
13568 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
13569 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
13570 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
13571 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
13572 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
13573 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
13574 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
13575 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
13576
13577 rb_mObjSpace = rb_define_module("ObjectSpace");
13578
13579 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
13580
13581 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
13582 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
13583
13584 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
13585
13586 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
13587
13588 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
13589 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
13590
13591 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
13592
13593 {
13594 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
13595 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
13596 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
13597 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
13598 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
13599 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
13600 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
13601 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
13602 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
13603 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
13604 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
13605 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
13606 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
13607 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
13608 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
13609 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
13610 rb_include_module(rb_cWeakMap, rb_mEnumerable);
13611 }
13612
13613 /* internal methods */
13614 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
13615 rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
13616#if MALLOC_ALLOCATED_SIZE
13617 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
13618 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
13619#endif
13620
13621#if GC_DEBUG_STRESS_TO_CLASS
13622 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
13623 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
13624#endif
13625
13626 {
13627 VALUE opts;
13628 /* GC build options */
13629 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
13630#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13631 OPT(GC_DEBUG);
13632 OPT(USE_RGENGC);
13633 OPT(RGENGC_DEBUG);
13634 OPT(RGENGC_CHECK_MODE);
13635 OPT(RGENGC_PROFILE);
13636 OPT(RGENGC_ESTIMATE_OLDMALLOC);
13637 OPT(GC_PROFILE_MORE_DETAIL);
13638 OPT(GC_ENABLE_LAZY_SWEEP);
13639 OPT(CALC_EXACT_MALLOC_SIZE);
13640 OPT(MALLOC_ALLOCATED_SIZE);
13641 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
13642 OPT(GC_PROFILE_DETAIL_MEMORY);
13643#undef OPT
13644 OBJ_FREEZE(opts);
13645 }
13646}
13647
13648#ifdef ruby_xmalloc
13649#undef ruby_xmalloc
13650#endif
13651#ifdef ruby_xmalloc2
13652#undef ruby_xmalloc2
13653#endif
13654#ifdef ruby_xcalloc
13655#undef ruby_xcalloc
13656#endif
13657#ifdef ruby_xrealloc
13658#undef ruby_xrealloc
13659#endif
13660#ifdef ruby_xrealloc2
13661#undef ruby_xrealloc2
13662#endif
13663
13664void *
13665ruby_xmalloc(size_t size)
13666{
13667#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13668 ruby_malloc_info_file = __FILE__;
13669 ruby_malloc_info_line = __LINE__;
13670#endif
13671 return ruby_xmalloc_body(size);
13672}
13673
13674void *
13675ruby_xmalloc2(size_t n, size_t size)
13676{
13677#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13678 ruby_malloc_info_file = __FILE__;
13679 ruby_malloc_info_line = __LINE__;
13680#endif
13681 return ruby_xmalloc2_body(n, size);
13682}
13683
13684void *
13685ruby_xcalloc(size_t n, size_t size)
13686{
13687#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13688 ruby_malloc_info_file = __FILE__;
13689 ruby_malloc_info_line = __LINE__;
13690#endif
13691 return ruby_xcalloc_body(n, size);
13692}
13693
13694void *
13695ruby_xrealloc(void *ptr, size_t new_size)
13696{
13697#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13698 ruby_malloc_info_file = __FILE__;
13699 ruby_malloc_info_line = __LINE__;
13700#endif
13701 return ruby_xrealloc_body(ptr, new_size);
13702}
13703
13704void *
13705ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
13706{
13707#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13708 ruby_malloc_info_file = __FILE__;
13709 ruby_malloc_info_line = __LINE__;
13710#endif
13711 return ruby_xrealloc2_body(ptr, n, new_size);
13712}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:167
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
Definition: stdalign.h:28
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
Definition: vm_trace.c:1653
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition: defines.h:89
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition: event.h:94
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition: event.h:93
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition: event.h:92
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition: event.h:91
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition: event.h:95
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition: event.h:89
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition: event.h:90
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition: event.h:88
@ RUBY_FL_WB_PROTECTED
Definition: fl_type.h:207
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
Definition: class.c:1043
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:869
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition: class.c:948
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition: class.c:972
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for a module.
Definition: class.c:2100
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition: class.c:2406
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:1914
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition: value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition: value_type.h:107
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition: fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition: value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:67
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition: value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition: assume.h:30
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:143
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition: long.h:60
#define ELTS_SHARED
Old name of RUBY_ELTS_SHARED.
Definition: fl_type.h:93
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:31
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition: value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition: fl_type.h:66
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
Definition: fl_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition: long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define T_NONE
Old name of RUBY_T_NONE.
Definition: value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition: value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition: size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition: fl_type.h:62
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition: value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:393
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition: fl_type.h:130
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:140
#define FL_SET
Old name of RB_FL_SET.
Definition: fl_type.h:137
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:652
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition: long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition: long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition: value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
Definition: value_type.h:86
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition: value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition: fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition: double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition: value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
Definition: rgengc.h:237
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition: value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:139
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
Definition: fl_type.h:61
#define xcalloc
Old name of ruby_xcalloc.
Definition: xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition: fl_type.h:141
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition: int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition: fl_type.h:70
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition: value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition: gc.c:6140
int ruby_stack_check(void)
Checks for stack overflow.
Definition: gc.c:6180
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3021
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition: error.h:459
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition: error.c:418
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:979
VALUE rb_mEnumerable
Enumerable module.
Definition: enum.c:27
VALUE rb_mGC
GC module.
Definition: gc.c:1104
VALUE rb_stdout
STDOUT constant.
Definition: io.c:198
void rb_gc_register_address(VALUE *valptr)
Inform the garbage collector that valptr points to a live Ruby object that should not be moved.
Definition: gc.c:8707
void rb_gc_unregister_address(VALUE *valptr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
Definition: gc.c:8719
void rb_global_variable(VALUE *)
An alias for rb_gc_register_address().
Definition: gc.c:8742
void rb_gc_register_mark_object(VALUE object)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:8686
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
Definition: array.c:865
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
Definition: array.c:1321
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:750
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
Definition: array.c:847
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1308
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
Definition: bignum.c:5542
VALUE rb_obj_is_fiber(VALUE obj)
Queries if an object is a fiber.
Definition: cont.c:1106
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition: enumerator.h:239
#define rb_check_frozen
Just another name of rb_check_frozen.
Definition: error.h:278
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:294
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:6774
void rb_mark_tbl_no_pin(struct st_table *tbl)
Identical to rb_mark_tbl(), except it marks objects using rb_gc_mark_movable().
Definition: gc.c:6561
void rb_memerror(void)
Triggers out-of-memory error.
Definition: gc.c:11114
size_t rb_gc_stat(VALUE key_or_buf)
Obtains various GC related profiles.
Definition: gc.c:10651
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
Definition: gc.c:6768
VALUE rb_gc_disable(void)
Disables GC.
Definition: gc.c:10723
VALUE rb_gc_start(void)
Identical to rb_gc(), except the return value.
Definition: gc.c:10285
VALUE rb_gc_latest_gc_info(VALUE key_or_buf)
Obtains various info regarding the most recent GC run.
Definition: gc.c:10429
void rb_mark_tbl(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only values of the table and leave their associated keys...
Definition: gc.c:6555
VALUE rb_gc_enable(void)
(Re-) enables GC.
Definition: gc.c:10686
void rb_mark_hash(struct st_table *tbl)
Marks keys and values associated inside of the given table.
Definition: gc.c:6378
VALUE rb_undefine_finalizer(VALUE obj)
Modifies the object so that it has no finalisers at all.
Definition: gc.c:3785
int rb_during_gc(void)
Queries if the GC is busy.
Definition: gc.c:10300
void rb_gc_mark_maybe(VALUE obj)
Identical to rb_gc_mark(), except it allows the passed value be a non-object.
Definition: gc.c:6593
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
Definition: gc.c:9753
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
Definition: gc.c:6208
void rb_gc(void)
Triggers a GC process.
Definition: gc.c:10292
void rb_gc_force_recycle(VALUE obj)
Asserts that the passed object is no longer needed.
Definition: gc.c:8676
void rb_gc_update_tbl_refs(st_table *ptr)
Updates references inside of tables.
Definition: gc.c:9597
void rb_mark_set(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only keys of the table and leave their associated values...
Definition: gc.c:6314
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Assigns a finaliser for an object.
Definition: gc.c:3937
void rb_gc_copy_finalizer(VALUE dst, VALUE src)
Copy&paste an object's finaliser to another.
Definition: gc.c:3945
void rb_gc_adjust_memory_usage(ssize_t diff)
Informs that there are external memory usages.
Definition: gc.c:11920
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
Definition: gc.c:10324
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2903
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1529
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
Definition: io.c:2063
VALUE rb_obj_id(VALUE obj)
Finds or creates an integer primary key of the given object.
Definition: gc.c:4446
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
Definition: gc.c:4413
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:848
VALUE rb_proc_new(rb_block_call_func_t func, VALUE callback_arg)
This is an rb_iterate() + rb_block_proc() combo.
Definition: proc.c:3241
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:175
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition: string.c:1546
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition: string.c:3317
VALUE rb_str_cat2(VALUE, const char *)
Just another name of rb_str_cat_cstr.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition: string.c:3302
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:952
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition: string.c:1506
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition: variable.c:181
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
Definition: variable.c:294
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition: variable.c:1155
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition: vm_method.c:1117
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition: vm_eval.c:664
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition: vm_method.c:1123
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition: vm_method.c:2749
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:782
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition: symbol.c:924
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:941
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
Definition: symbol.c:935
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3253
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1888
#define strtod(s, e)
Just another name of ruby_strtod.
Definition: util.h:212
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
Definition: util.h:176
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1201
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1241
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition: iterator.h:58
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
Definition: vm_eval.c:1369
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1357
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
Definition: maybe_unused.h:33
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:354
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition: memory.h:378
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
Definition: cxxanyargs.hpp:432
VALUE rb_newobj(void)
This is the implementation detail of RB_NEWOBJ.
Definition: gc.c:2594
VALUE rb_newobj_of(VALUE klass, VALUE flags)
This is the implementation detail of RB_NEWOBJ_OF.
Definition: gc.c:2600
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
Definition: pid_t.h:38
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition: rarray.h:70
#define RARRAY(obj)
Convenient casting macro.
Definition: rarray.h:56
static bool RARRAY_TRANSIENT_P(VALUE ary)
Queries if the array is a transient array.
Definition: rarray.h:345
#define RARRAY_AREF(a, i)
Definition: rarray.h:588
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:46
#define RCLASS(obj)
Convenient casting macro.
Definition: rclass.h:40
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition: gc.c:2749
#define DATA_PTR(obj)
Convenient getter macro.
Definition: rdata.h:71
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition: gc.c:2757
#define RDATA(obj)
Convenient casting macro.
Definition: rdata.h:63
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition: rdata.h:82
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition: rdata.h:108
#define RFILE(obj)
Convenient casting macro.
Definition: rfile.h:50
void rb_gc_writebarrier(VALUE old, VALUE young)
This is the implementation of RB_OBJ_WRITE().
Definition: gc.c:8463
void rb_gc_writebarrier_unprotect(VALUE obj)
This is the implementation of RB_OBJ_WB_UNPROTECT().
Definition: gc.c:8499
#define USE_RGENGC
Definition: rgengc.h:44
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition: rgengc.h:118
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition: rhash.h:82
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:92
#define RMATCH(obj)
Convenient casting macro.
Definition: rmatch.h:37
@ ROBJECT_EMBED_LEN_MAX
Max possible number of instance variables that can be embedded.
Definition: robject.h:84
static uint32_t ROBJECT_NUMIV(VALUE obj)
Queries the number of instance variables.
Definition: robject.h:145
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition: robject.h:171
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition: rregexp.h:45
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
Definition: rstring.h:483
#define RSTRING(obj)
Convenient casting macro.
Definition: rstring.h:41
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition: rstring.h:497
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition: rstruct.h:94
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition: rtypeddata.h:540
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition: rtypeddata.h:102
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition: gc.c:2765
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition: gc.c:2773
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:507
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:489
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition: rtypeddata.h:563
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition: variable.c:309
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5528
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Ruby's array.
const VALUE shared_root
Parent of the array.
Ruby's object's, base components.
Definition: rbasic.h:64
const VALUE klass
Class of an object.
Definition: rbasic.h:88
VALUE flags
Per-object flags.
Definition: rbasic.h:77
Internal header for Complex.
Definition: complex.h:13
void * data
Pointer to the actual C level struct that you want to wrap.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Ruby's File and IO.
struct rb_io_t * fptr
IO's specific fields.
Regular expression execution context.
Definition: rmatch.h:94
VALUE regexp
The expression of this match.
Definition: rmatch.h:112
VALUE str
The target string that the match was made against.
Definition: rmatch.h:102
Definition: gc.c:564
Ruby's ordinal objects.
Internal header for Rational.
Definition: rational.h:17
Ruby's regular expression.
const VALUE src
Source code of this expression.
Ruby's String.
VALUE shared
Parent of the string.
char * ptr
Pointer to the contents of the string.
Definition: symbol.h:26
"Typed" user data.
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
Definition: gc.c:572
Definition: gc.c:1090
Definition: gc.c:645
Definition: gc.c:885
Definition: gc.c:657
CREF (Class REFerence)
This is the struct that holds necessary info for a struct.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
const char * wrap_struct_name
Name of structs of this kind.
VALUE ecopts
Flags as Ruby hash.
Definition: io.h:165
Ruby's IO, metadata and buffers.
Definition: io.h:95
struct rb_io_t::rb_io_enc_t encs
Decomposed encoding flags.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
Definition: io.h:186
VALUE pathv
pathname for file
Definition: io.h:116
VALUE write_lock
This is a Ruby level mutex.
Definition: io.h:210
VALUE self
The IO's Ruby level counterpart.
Definition: io.h:98
VALUE writeconv_pre_ecopts
Value of rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
Definition: io.h:201
VALUE tied_io_for_writing
Duplex IO object, if set.
Definition: io.h:135
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
rb_cref_t * cref
class reference, should be marked
Internal header for Class.
This is an implementation detail of rbimpl_size_mul_overflow().
size_t right
Multiplication result.
Represents the region of a capture group.
Definition: rmatch.h:65
Represents a match.
Definition: rmatch.h:71
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
Definition: rmatch.h:82
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition: rmatch.h:79
struct re_registers regs
"Registers" of a match.
Definition: rmatch.h:76
IFUNC (Internal FUNCtion)
SVAR (Special VARiable)
Definition: gc.c:11935
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition: value.h:63
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:375
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition: value_type.h:144
void * ruby_xrealloc(void *ptr, size_t newsiz)
Resize the storage instance.
Definition: gc.c:13695
void * ruby_xrealloc2(void *ptr, size_t newelems, size_t newsiz)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...
Definition: gc.c:13705
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:11772
void * ruby_xmalloc2(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
Definition: gc.c:13675
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:13665
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.
Definition: gc.c:13685