12#include "ruby/internal/config.h"
21#ifdef NEED_MADVICE_PROTOTYPE_USING_CADDR_T
23extern int madvise(caddr_t,
size_t,
int);
28#include "eval_intern.h"
31#include "internal/cont.h"
32#include "internal/proc.h"
33#include "internal/warnings.h"
38#include "ractor_core.h"
40static const int DEBUG = 0;
42#define RB_PAGE_SIZE (pagesize)
43#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
47static VALUE rb_cContinuation;
48static VALUE rb_cFiber;
49static VALUE rb_eFiberError;
50#ifdef RB_EXPERIMENTAL_FIBER_POOL
51static VALUE rb_cFiberPool;
54#define CAPTURE_JUST_VALID_VM_STACK 1
57#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
58#define FIBER_POOL_ALLOCATION_FREE
59#define FIBER_POOL_INITIAL_SIZE 8
60#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
62#define FIBER_POOL_INITIAL_SIZE 32
63#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
67 CONTINUATION_CONTEXT = 0,
73#ifdef CAPTURE_JUST_VALID_VM_STACK
110#ifdef FIBER_POOL_ALLOCATION_FREE
153#ifdef FIBER_POOL_ALLOCATION_FREE
161#ifdef FIBER_POOL_ALLOCATION_FREE
182 size_t initial_count;
191 size_t vm_stack_size;
195 enum context_type type;
235#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
236#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
237#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
238#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
239#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
247 BITFIELD(
enum fiber_status, status, 2);
249 unsigned int yielding : 1;
250 unsigned int blocking : 1;
256static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
258static ID fiber_initialize_keywords[2] = {0};
265#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
266#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
268#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
271#define ERRNOMSG strerror(errno)
276fiber_pool_vacancy_pointer(
void * base,
size_t size)
278 STACK_GROW_DIR_DETECTION;
281 (
char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
289 STACK_GROW_DIR_DETECTION;
291 stack->current = (
char*)stack->base + STACK_DIR_UPPER(0, stack->size);
292 stack->available = stack->size;
299 STACK_GROW_DIR_DETECTION;
301 VM_ASSERT(stack->current);
303 return STACK_DIR_UPPER(stack->current, (
char*)stack->current - stack->available);
311 STACK_GROW_DIR_DETECTION;
313 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_alloca(%p): %"PRIuSIZE
"/%"PRIuSIZE
"\n", (
void*)stack, offset, stack->available);
314 VM_ASSERT(stack->available >= offset);
317 void * pointer = STACK_DIR_UPPER(stack->current, (
char*)stack->current - offset);
320 stack->current = STACK_DIR_UPPER((
char*)stack->current + offset, (
char*)stack->current - offset);
321 stack->available -= offset;
330 fiber_pool_stack_reset(&vacancy->stack);
333 fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
339 vacancy->next = head;
341#ifdef FIBER_POOL_ALLOCATION_FREE
343 head->previous = vacancy;
344 vacancy->previous = NULL;
351#ifdef FIBER_POOL_ALLOCATION_FREE
356 vacancy->next->previous = vacancy->previous;
359 if (vacancy->previous) {
360 vacancy->previous->next = vacancy->next;
364 vacancy->stack.pool->vacancies = vacancy->next;
369fiber_pool_vacancy_pop(
struct fiber_pool * pool)
374 fiber_pool_vacancy_remove(vacancy);
381fiber_pool_vacancy_pop(
struct fiber_pool * pool)
386 pool->vacancies = vacancy->next;
401 vacancy->stack.base = base;
402 vacancy->stack.size = size;
404 fiber_pool_vacancy_reset(vacancy);
408 return fiber_pool_vacancy_push(vacancy, vacancies);
416fiber_pool_allocate_memory(
size_t * count,
size_t stride)
426 void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
429 *count = (*count) >> 1;
436 void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
438 if (base == MAP_FAILED) {
440 *count = (*count) >> 1;
443#if defined(MADV_FREE_REUSE)
447 while (madvise(base, (*count)*stride, MADV_FREE_REUSE) == -1 && errno == EAGAIN);
464 STACK_GROW_DIR_DETECTION;
467 size_t stride = size + RB_PAGE_SIZE;
470 void * base = fiber_pool_allocate_memory(&count, stride);
473 rb_raise(rb_eFiberError,
"can't alloc machine stack to fiber (%"PRIuSIZE
" x %"PRIuSIZE
" bytes): %s", count, size, ERRNOMSG);
480 allocation->base = base;
481 allocation->size = size;
482 allocation->stride = stride;
483 allocation->count = count;
484#ifdef FIBER_POOL_ALLOCATION_FREE
485 allocation->used = 0;
490 fprintf(stderr,
"fiber_pool_expand(%"PRIuSIZE
"): %p, %"PRIuSIZE
"/%"PRIuSIZE
" x [%"PRIuSIZE
":%"PRIuSIZE
"]\n",
495 for (
size_t i = 0; i < count; i += 1) {
496 void * base = (
char*)allocation->base + (stride * i);
497 void * page = (
char*)base + STACK_DIR_UPPER(size, 0);
502 if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
503 VirtualFree(allocation->base, 0, MEM_RELEASE);
504 rb_raise(rb_eFiberError,
"can't set a guard page: %s", ERRNOMSG);
507 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
508 munmap(allocation->base, count*stride);
509 rb_raise(rb_eFiberError,
"can't set a guard page: %s", ERRNOMSG);
513 vacancies = fiber_pool_vacancy_initialize(
515 (
char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
519#ifdef FIBER_POOL_ALLOCATION_FREE
520 vacancies->stack.allocation = allocation;
527#ifdef FIBER_POOL_ALLOCATION_FREE
528 if (allocation->next) {
529 allocation->next->previous = allocation;
532 allocation->previous = NULL;
545fiber_pool_initialize(
struct fiber_pool *
fiber_pool,
size_t size,
size_t count,
size_t vm_stack_size)
547 VM_ASSERT(vm_stack_size < size);
551 fiber_pool->size = ((size / RB_PAGE_SIZE) + 1) * RB_PAGE_SIZE;
562#ifdef FIBER_POOL_ALLOCATION_FREE
567 STACK_GROW_DIR_DETECTION;
569 VM_ASSERT(allocation->used == 0);
571 if (DEBUG) fprintf(stderr,
"fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE
"\n", (
void*)allocation, allocation->base, allocation->count);
574 for (i = 0; i < allocation->count; i += 1) {
575 void * base = (
char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
577 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
580 fiber_pool_vacancy_remove(vacancy);
584 VirtualFree(allocation->base, 0, MEM_RELEASE);
586 munmap(allocation->base, allocation->stride * allocation->count);
589 if (allocation->previous) {
590 allocation->previous->next = allocation->next;
594 allocation->pool->allocations = allocation->next;
597 if (allocation->next) {
598 allocation->next->previous = allocation->previous;
601 allocation->pool->count -= allocation->count;
613 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_acquire: %p used=%"PRIuSIZE
"\n", (
void*)
fiber_pool->vacancies,
fiber_pool->used);
616 const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
617 const size_t minimum =
fiber_pool->initial_count;
620 if (count > maximum) count = maximum;
621 if (count < minimum) count = minimum;
632 VM_ASSERT(vacancy->stack.base);
637#ifdef FIBER_POOL_ALLOCATION_FREE
638 vacancy->stack.allocation->used += 1;
641 fiber_pool_stack_reset(&vacancy->stack);
643 return vacancy->stack;
651 void * base = fiber_pool_stack_base(stack);
652 size_t size = stack->available;
655 VM_ASSERT(size <= (stack->size - RB_PAGE_SIZE));
657 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_free: %p+%"PRIuSIZE
" [base=%p, size=%"PRIuSIZE
"]\n", base, size, stack->base, stack->size);
670#elif VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
672 madvise(base, size, MADV_DONTNEED);
673#elif defined(MADV_FREE_REUSABLE)
679 while (madvise(base, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN);
680#elif defined(MADV_FREE)
682 madvise(base, size, MADV_FREE);
683#elif defined(MADV_DONTNEED)
685 madvise(base, size, MADV_DONTNEED);
686#elif defined(POSIX_MADV_DONTNEED)
688 posix_madvise(base, size, POSIX_MADV_DONTNEED);
690 VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
701 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
703 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_release: %p used=%"PRIuSIZE
"\n", stack->base, stack->pool->used);
706 vacancy->stack = *stack;
710 fiber_pool_vacancy_reset(vacancy);
713 pool->vacancies = fiber_pool_vacancy_push(vacancy, stack->pool->vacancies);
716#ifdef FIBER_POOL_ALLOCATION_FREE
719 allocation->used -= 1;
722 if (allocation->used == 0) {
723 fiber_pool_allocation_free(allocation);
725 else if (stack->pool->free_stacks) {
726 fiber_pool_stack_free(&vacancy->stack);
730 if (stack->pool->free_stacks) {
731 fiber_pool_stack_free(&vacancy->stack);
740 rb_ractor_set_current_ec(th->ractor, th->ec = ec);
747 if (th->vm->ractor.main_thread == th &&
748 rb_signal_buff_size() > 0) {
749 RUBY_VM_SET_TRAP_INTERRUPT(ec);
752 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
758 ec_switch(th, fiber);
759 VM_ASSERT(th->ec->fiber_ptr == fiber);
766 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
768#ifdef COROUTINE_PTHREAD_CONTEXT
769 ruby_thread_set_native(thread);
772 fiber_restore_thread(thread, fiber);
774 rb_fiber_start(fiber);
776#ifndef COROUTINE_PTHREAD_CONTEXT
777 VM_UNREACHABLE(fiber_entry);
783fiber_initialize_coroutine(
rb_fiber_t *fiber,
size_t * vm_stack_size)
787 void * vm_stack = NULL;
791 fiber->stack = fiber_pool_stack_acquire(
fiber_pool);
792 vm_stack = fiber_pool_stack_alloca(&fiber->stack,
fiber_pool->vm_stack_size);
795 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
798 sec->machine.stack_start = fiber->stack.current;
799 sec->machine.stack_maxsize = fiber->stack.available;
801 fiber->context.argument = (
void*)fiber;
812 if (DEBUG) fprintf(stderr,
"fiber_stack_release: %p, stack.base=%p\n", (
void*)fiber, fiber->stack.base);
815 if (fiber->stack.base) {
816 fiber_pool_stack_release(&fiber->stack);
817 fiber->stack.base = NULL;
821 rb_ec_clear_vm_stack(ec);
825fiber_status_name(
enum fiber_status s)
828 case FIBER_CREATED:
return "created";
829 case FIBER_RESUMED:
return "resumed";
830 case FIBER_SUSPENDED:
return "suspended";
831 case FIBER_TERMINATED:
return "terminated";
833 VM_UNREACHABLE(fiber_status_name);
841 VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
843 switch (fiber->status) {
845 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
847 case FIBER_SUSPENDED:
848 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
851 case FIBER_TERMINATED:
855 VM_UNREACHABLE(fiber_verify);
861fiber_status_set(
rb_fiber_t *fiber,
enum fiber_status s)
864 VM_ASSERT(!FIBER_TERMINATED_P(fiber));
865 VM_ASSERT(fiber->status != s);
886 if (!fiber)
rb_raise(rb_eFiberError,
"uninitialized fiber");
891NOINLINE(
static VALUE cont_capture(
volatile int *
volatile stat));
893#define THREAD_MUST_BE_RUNNING(th) do { \
894 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
900 return fiber->cont.saved_ec.thread_ptr;
906 return cont->saved_ec.thread_ptr->self;
910cont_compact(
void *ptr)
918 rb_execution_context_update(&cont->saved_ec);
926 RUBY_MARK_ENTER(
"cont");
932 rb_execution_context_mark(&cont->saved_ec);
935 if (cont->saved_vm_stack.ptr) {
936#ifdef CAPTURE_JUST_VALID_VM_STACK
938 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
941 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
945 if (cont->machine.stack) {
946 if (cont->type == CONTINUATION_CONTEXT) {
949 cont->machine.stack + cont->machine.stack_size);
955 if (!FIBER_TERMINATED_P(fiber)) {
957 cont->machine.stack + cont->machine.stack_size);
962 RUBY_MARK_LEAVE(
"cont");
969 return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
978 RUBY_FREE_ENTER(
"cont");
980 if (cont->type == CONTINUATION_CONTEXT) {
983 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
987 coroutine_destroy(&fiber->context);
988 fiber_stack_release(fiber);
991 RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
994 VM_ASSERT(cont->mjit_cont != NULL);
995 mjit_cont_free(cont->mjit_cont);
999 RUBY_FREE_LEAVE(
"cont");
1003cont_memsize(
const void *ptr)
1008 size =
sizeof(*cont);
1009 if (cont->saved_vm_stack.ptr) {
1010#ifdef CAPTURE_JUST_VALID_VM_STACK
1011 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1013 size_t n = cont->saved_ec.vm_stack_size;
1015 size += n *
sizeof(*cont->saved_vm_stack.ptr);
1018 if (cont->machine.stack) {
1019 size += cont->machine.stack_size *
sizeof(*cont->machine.stack);
1028 if (fiber->cont.self) {
1032 rb_execution_context_update(&fiber->cont.saved_ec);
1039 if (fiber->cont.self) {
1043 rb_execution_context_mark(&fiber->cont.saved_ec);
1048fiber_compact(
void *ptr)
1053 if (fiber->prev) rb_fiber_update_self(fiber->prev);
1055 cont_compact(&fiber->cont);
1056 fiber_verify(fiber);
1060fiber_mark(
void *ptr)
1063 RUBY_MARK_ENTER(
"cont");
1064 fiber_verify(fiber);
1066 if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1067 cont_mark(&fiber->cont);
1068 RUBY_MARK_LEAVE(
"cont");
1072fiber_free(
void *ptr)
1075 RUBY_FREE_ENTER(
"fiber");
1077 if (DEBUG) fprintf(stderr,
"fiber_free: %p[%p]\n", (
void *)fiber, fiber->stack.base);
1079 if (fiber->cont.saved_ec.local_storage) {
1080 rb_id_table_free(fiber->cont.saved_ec.local_storage);
1083 cont_free(&fiber->cont);
1084 RUBY_FREE_LEAVE(
"fiber");
1088fiber_memsize(
const void *ptr)
1091 size_t size =
sizeof(*fiber);
1093 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1098 if (saved_ec->local_storage && fiber != th->root_fiber) {
1099 size += rb_id_table_memsize(saved_ec->local_storage);
1101 size += cont_memsize(&fiber->cont);
1108 return RBOOL(rb_typeddata_is_kind_of(obj, &fiber_data_type));
1116 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1118 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1119 size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1120 cont->machine.stack_src = th->ec->machine.stack_end;
1123 size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1124 cont->machine.stack_src = th->ec->machine.stack_start;
1127 if (cont->machine.stack) {
1128 REALLOC_N(cont->machine.stack, VALUE, size);
1131 cont->machine.stack =
ALLOC_N(VALUE, size);
1134 FLUSH_REGISTER_WINDOWS;
1135 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1140 {cont_mark, cont_free, cont_memsize, cont_compact},
1141 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1149 VM_ASSERT(th->status == THREAD_RUNNABLE);
1156 sec->machine.stack_end = NULL;
1162 VM_ASSERT(cont->mjit_cont == NULL);
1164 cont->mjit_cont = mjit_cont_new(&(cont->saved_ec));
1172 cont_save_thread(cont, th);
1173 cont->saved_ec.thread_ptr = th;
1174 cont->saved_ec.local_storage = NULL;
1175 cont->saved_ec.local_storage_recursive_hash =
Qnil;
1176 cont->saved_ec.local_storage_recursive_hash_for_trace =
Qnil;
1177 cont_init_mjit_cont(cont);
1181cont_new(VALUE klass)
1184 volatile VALUE contval;
1187 THREAD_MUST_BE_RUNNING(th);
1189 cont->self = contval;
1190 cont_init(cont, th);
1197 return fiber->cont.self;
1203 return fiber->blocking;
1210 cont_init_mjit_cont(&fiber->cont);
1217 VALUE *p = ec->vm_stack;
1218 while (p < ec->cfp->sp) {
1219 fprintf(stderr,
"%3d ", (
int)(p - ec->vm_stack));
1220 rb_obj_info_dump(*p);
1230 while (cfp != end_of_cfp) {
1233 pc = cfp->pc - cfp->iseq->body->iseq_encoded;
1235 fprintf(stderr,
"%2d pc: %d\n", i++, pc);
1236 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1240COMPILER_WARNING_PUSH
1242COMPILER_WARNING_IGNORED(-Wduplicate-decl-specifier)
1245cont_capture(
volatile int *
volatile stat)
1249 volatile VALUE contval;
1252 THREAD_MUST_BE_RUNNING(th);
1253 rb_vm_stack_to_heap(th->ec);
1254 cont = cont_new(rb_cContinuation);
1255 contval = cont->self;
1257#ifdef CAPTURE_JUST_VALID_VM_STACK
1258 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1259 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1260 cont->saved_vm_stack.ptr =
ALLOC_N(VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1261 MEMCPY(cont->saved_vm_stack.ptr,
1263 VALUE, cont->saved_vm_stack.slen);
1264 MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1267 cont->saved_vm_stack.clen);
1269 cont->saved_vm_stack.ptr =
ALLOC_N(VALUE, ec->vm_stack_size);
1270 MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size);
1273 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1274 VM_ASSERT(cont->saved_ec.cfp != NULL);
1275 cont_save_machine_stack(th, cont);
1282 for (p=th->ec->ensure_list; p; p=p->next)
1285 for (p=th->ec->ensure_list; p; p=p->next) {
1286 if (!p->entry.marker)
1288 *entry++ = p->entry;
1293 if (ruby_setjmp(cont->jmpbuf)) {
1296 VAR_INITIALIZED(cont);
1297 value = cont->value;
1316 if (cont->type == CONTINUATION_CONTEXT) {
1321 if (sec->fiber_ptr != NULL) {
1322 fiber = sec->fiber_ptr;
1324 else if (th->root_fiber) {
1325 fiber = th->root_fiber;
1328 if (fiber && th->ec != &fiber->cont.saved_ec) {
1329 ec_switch(th, fiber);
1332 if (th->ec->trace_arg != sec->trace_arg) {
1333 rb_raise(rb_eRuntimeError,
"can't call across trace_func");
1337#ifdef CAPTURE_JUST_VALID_VM_STACK
1339 cont->saved_vm_stack.ptr,
1340 VALUE, cont->saved_vm_stack.slen);
1341 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1342 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1343 VALUE, cont->saved_vm_stack.clen);
1345 MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
1349 th->ec->cfp = sec->cfp;
1350 th->ec->raised_flag = sec->raised_flag;
1351 th->ec->tag = sec->tag;
1352 th->ec->root_lep = sec->root_lep;
1353 th->ec->root_svar = sec->root_svar;
1354 th->ec->ensure_list = sec->ensure_list;
1355 th->ec->errinfo = sec->errinfo;
1357 VM_ASSERT(th->ec->vm_stack != NULL);
1373 if (!FIBER_TERMINATED_P(old_fiber)) {
1374 STACK_GROW_DIR_DETECTION;
1375 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1376 if (STACK_DIR_UPPER(0, 1)) {
1377 old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1378 old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1381 old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1382 old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1387 old_fiber->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
1390 old_fiber->cont.saved_ec.machine.stack_end = NULL;
1395 struct coroutine_context * from = coroutine_transfer(&old_fiber->context, &new_fiber->context);
1402 fiber_restore_thread(th, old_fiber);
1408NOINLINE(NORETURN(
static void cont_restore_1(
rb_context_t *)));
1413 cont_restore_thread(cont);
1421 _JUMP_BUFFER *bp = (
void*)&cont->jmpbuf;
1422 bp->Frame = ((_JUMP_BUFFER*)((
void*)&buf))->Frame;
1425 if (cont->machine.stack_src) {
1426 FLUSH_REGISTER_WINDOWS;
1427 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1428 VALUE, cont->machine.stack_size);
1431 ruby_longjmp(cont->jmpbuf, 1);
1434NORETURN(NOINLINE(
static void cont_restore_0(
rb_context_t *, VALUE *)));
1437cont_restore_0(
rb_context_t *cont, VALUE *addr_in_prev_frame)
1439 if (cont->machine.stack_src) {
1441#define STACK_PAD_SIZE 1
1443#define STACK_PAD_SIZE 1024
1445 VALUE space[STACK_PAD_SIZE];
1447#if !STACK_GROW_DIRECTION
1448 if (addr_in_prev_frame > &space[0]) {
1451#if STACK_GROW_DIRECTION <= 0
1452 volatile VALUE *
const end = cont->machine.stack_src;
1453 if (&space[0] > end) {
1455 volatile VALUE *sp =
ALLOCA_N(VALUE, &space[0] - end);
1458 cont_restore_0(cont, &space[0]);
1462#if !STACK_GROW_DIRECTION
1467#if STACK_GROW_DIRECTION >= 0
1468 volatile VALUE *
const end = cont->machine.stack_src + cont->machine.stack_size;
1469 if (&space[STACK_PAD_SIZE] < end) {
1471 volatile VALUE *sp =
ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1474 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1478#if !STACK_GROW_DIRECTION
1482 cont_restore_1(cont);
1569rb_callcc(VALUE
self)
1571 volatile int called;
1572 volatile VALUE val = cont_capture(&called);
1583make_passing_arg(
int argc,
const VALUE *argv)
1597typedef VALUE e_proc(VALUE);
1602ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
1604 st_table **table_p = &GET_VM()->ensure_rollback_table;
1605 if (UNLIKELY(*table_p == NULL)) {
1606 *table_p = st_init_numtable();
1608 st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1611static inline e_proc *
1612lookup_rollback_func(e_proc *ensure_func)
1614 st_table *table = GET_VM()->ensure_rollback_table;
1616 if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1617 return (e_proc *) val;
1618 return (e_proc *)
Qundef;
1634 for (p=current; p; p=p->next)
1637 for (entry=target; entry->marker; entry++)
1642 base_point = cur_size;
1643 while (base_point) {
1644 if (target_size >= base_point &&
1645 p->entry.marker == target[target_size - base_point].marker)
1652 for (i=0; i < target_size - base_point; i++) {
1653 if (!lookup_rollback_func(target[i].e_proc)) {
1654 rb_raise(rb_eRuntimeError,
"continuation called from out of critical rb_ensure scope");
1658 while (cur_size > base_point) {
1660 (*current->entry.e_proc)(current->entry.data2);
1661 current = current->next;
1665 for (j = 0; j < i; j++) {
1666 func = lookup_rollback_func(target[i - j - 1].e_proc);
1667 if ((VALUE)func !=
Qundef) {
1668 (*func)(target[i - j - 1].data2);
1673NORETURN(
static VALUE rb_cont_call(
int argc, VALUE *argv, VALUE contval));
1691rb_cont_call(
int argc, VALUE *argv, VALUE contval)
1696 if (cont_thread_value(cont) != th->self) {
1697 rb_raise(rb_eRuntimeError,
"continuation called across threads");
1699 if (cont->saved_ec.fiber_ptr) {
1700 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1701 rb_raise(rb_eRuntimeError,
"continuation called across fiber");
1704 rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
1707 cont->value = make_passing_arg(argc, argv);
1709 cont_restore_0(cont, &contval);
1802 {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1803 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1807fiber_alloc(VALUE klass)
1813fiber_t_alloc(VALUE fiber_value,
unsigned int blocking)
1819 rb_raise(rb_eRuntimeError,
"cannot initialize twice");
1822 THREAD_MUST_BE_RUNNING(th);
1824 fiber->cont.self = fiber_value;
1825 fiber->cont.type = FIBER_CONTEXT;
1826 fiber->blocking = blocking;
1827 cont_init(&fiber->cont, th);
1829 fiber->cont.saved_ec.fiber_ptr = fiber;
1830 rb_ec_clear_vm_stack(&fiber->cont.saved_ec);
1836 VM_ASSERT(FIBER_CREATED_P(fiber));
1846 rb_fiber_t *fiber = fiber_t_alloc(
self, blocking);
1848 fiber->first_proc = proc;
1849 fiber->stack.base = NULL;
1861 size_t vm_stack_size = 0;
1862 VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
1865 cont->saved_vm_stack.ptr = NULL;
1866 rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size /
sizeof(VALUE));
1869 sec->local_storage = NULL;
1870 sec->local_storage_recursive_hash =
Qnil;
1871 sec->local_storage_recursive_hash_for_trace =
Qnil;
1875rb_fiber_pool_default(VALUE pool)
1877 return &shared_fiber_pool;
1882rb_fiber_initialize_kw(
int argc, VALUE* argv, VALUE
self,
int kw_splat)
1888 VALUE options =
Qnil;
1889 VALUE arguments[2] = {
Qundef};
1892 rb_get_kwargs(options, fiber_initialize_keywords, 0, 2, arguments);
1894 if (arguments[0] !=
Qundef) {
1895 blocking = arguments[0];
1898 if (arguments[1] !=
Qundef) {
1899 pool = arguments[1];
1903 return fiber_initialize(
self,
rb_block_proc(), rb_fiber_pool_default(pool),
RTEST(blocking));
1930rb_fiber_initialize(
int argc, VALUE* argv, VALUE
self)
1938 return fiber_initialize(fiber_alloc(rb_cFiber),
rb_proc_new(func, obj), rb_fiber_pool_default(
Qnil), 1);
1942rb_fiber_s_schedule_kw(
int argc, VALUE* argv,
int kw_splat)
1945 VALUE scheduler = th->scheduler;
1948 if (scheduler !=
Qnil) {
1952 rb_raise(rb_eRuntimeError,
"No scheduler is available!");
2000rb_fiber_s_schedule(
int argc, VALUE *argv, VALUE obj)
2016rb_fiber_s_scheduler(VALUE klass)
2030rb_fiber_current_scheduler(VALUE klass)
2052rb_fiber_set_scheduler(VALUE klass, VALUE scheduler)
2057NORETURN(
static void rb_fiber_terminate(
rb_fiber_t *fiber,
int need_interrupt, VALUE err));
2062 rb_thread_t *
volatile th = fiber->cont.saved_ec.thread_ptr;
2065 enum ruby_tag_type state;
2066 int need_interrupt = TRUE;
2068 VM_ASSERT(th->ec == GET_EC());
2069 VM_ASSERT(FIBER_RESUMED_P(fiber));
2071 if (fiber->blocking) {
2075 EC_PUSH_TAG(th->ec);
2076 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2079 const VALUE *argv, args = cont->value;
2080 GetProcPtr(fiber->first_proc, proc);
2083 th->ec->errinfo =
Qnil;
2084 th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
2085 th->ec->root_svar =
Qfalse;
2088 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, cont->kw_splat, VM_BLOCK_HANDLER_NONE);
2094 err = th->ec->errinfo;
2095 VM_ASSERT(FIBER_RESUMED_P(fiber));
2097 if (state == TAG_RAISE) {
2100 else if (state == TAG_FATAL) {
2101 rb_threadptr_pending_interrupt_enque(th, err);
2104 err = rb_vm_make_jump_tag_but_local_jump(state, err);
2106 need_interrupt = TRUE;
2109 rb_fiber_terminate(fiber, need_interrupt, err);
2115 VALUE fiber_value = fiber_alloc(rb_cFiber);
2118 VM_ASSERT(
DATA_PTR(fiber_value) == NULL);
2119 VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
2120 VM_ASSERT(fiber->status == FIBER_RESUMED);
2122 th->root_fiber = fiber;
2124 fiber->cont.self = fiber_value;
2126 coroutine_initialize_main(&fiber->context);
2136 rb_bug(
"%s", strerror(errno));
2139 fiber->cont.type = FIBER_CONTEXT;
2140 fiber->cont.saved_ec.fiber_ptr = fiber;
2141 fiber->cont.saved_ec.thread_ptr = th;
2142 fiber->blocking = 1;
2143 fiber_status_set(fiber, FIBER_RESUMED);
2144 th->ec = &fiber->cont.saved_ec;
2147 rb_fiber_init_mjit_cont(fiber);
2153 if (th->root_fiber) {
2159 VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
2160 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
2163 rb_ractor_set_current_ec(th->ractor, NULL);
2165 fiber_free(th->ec->fiber_ptr);
2175 fiber->status = FIBER_TERMINATED;
2178 rb_ec_clear_vm_stack(th->ec);
2185 if (ec->fiber_ptr->cont.self == 0) {
2186 root_fiber_alloc(rb_ec_thread_ptr(ec));
2188 return ec->fiber_ptr;
2192return_fiber(
bool terminate)
2199 prev->resuming_fiber = NULL;
2204 rb_raise(rb_eFiberError,
"attempt to yield on a not resumed fiber");
2210 VM_ASSERT(root_fiber != NULL);
2213 for (fiber = root_fiber; fiber->resuming_fiber; fiber = fiber->resuming_fiber) {
2223 return fiber_current()->cont.self;
2232 if (th->ec->fiber_ptr != NULL) {
2233 fiber = th->ec->fiber_ptr;
2237 fiber = root_fiber_alloc(th);
2240 if (FIBER_CREATED_P(next_fiber)) {
2241 fiber_prepare_stack(next_fiber);
2244 VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
2245 VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
2247 if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
2249 fiber_status_set(next_fiber, FIBER_RESUMED);
2250 fiber_setcontext(next_fiber, fiber);
2254fiber_switch(
rb_fiber_t *fiber,
int argc,
const VALUE *argv,
int kw_splat,
rb_fiber_t *resuming_fiber,
bool yielding)
2261 if (th->root_fiber == NULL) root_fiber_alloc(th);
2263 if (th->ec->fiber_ptr == fiber) {
2267 return make_passing_arg(argc, argv);
2270 if (cont_thread_value(cont) != th->self) {
2271 rb_raise(rb_eFiberError,
"fiber called across threads");
2274 if (FIBER_TERMINATED_P(fiber)) {
2275 value =
rb_exc_new2(rb_eFiberError,
"dead fiber called");
2277 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2279 VM_UNREACHABLE(fiber_switch);
2285 VM_ASSERT(FIBER_SUSPENDED_P(th->root_fiber));
2287 cont = &th->root_fiber->cont;
2289 cont->value = value;
2291 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2293 VM_UNREACHABLE(fiber_switch);
2297 VM_ASSERT(FIBER_RUNNABLE_P(fiber));
2301 VM_ASSERT(!current_fiber->resuming_fiber);
2303 if (resuming_fiber) {
2304 current_fiber->resuming_fiber = resuming_fiber;
2305 fiber->prev = fiber_current();
2306 fiber->yielding = 0;
2309 VM_ASSERT(!current_fiber->yielding);
2311 current_fiber->yielding = 1;
2314 if (current_fiber->blocking) {
2319 cont->kw_splat = kw_splat;
2320 cont->value = make_passing_arg(argc, argv);
2322 fiber_store(fiber, th);
2325#ifndef COROUTINE_PTHREAD_CONTEXT
2326 if (resuming_fiber && FIBER_TERMINATED_P(fiber)) {
2327 fiber_stack_release(fiber);
2331 if (fiber_current()->blocking) {
2335 RUBY_VM_CHECK_INTS(th->ec);
2339 current_fiber = th->ec->fiber_ptr;
2340 value = current_fiber->cont.value;
2341 if (current_fiber->cont.argc == -1)
rb_exc_raise(value);
2348 return fiber_switch(fiber_ptr(fiber_value), argc, argv,
RB_NO_KEYWORDS, NULL,
false);
2366rb_fiber_blocking_p(VALUE fiber)
2368 return RBOOL(fiber_ptr(fiber)->blocking != 0);
2390rb_fiber_s_blocking_p(VALUE klass)
2393 unsigned blocking = thread->blocking;
2404 fiber_status_set(fiber, FIBER_TERMINATED);
2408rb_fiber_terminate(
rb_fiber_t *fiber,
int need_interrupt, VALUE error)
2410 VALUE value = fiber->cont.value;
2412 VM_ASSERT(FIBER_RESUMED_P(fiber));
2413 rb_fiber_close(fiber);
2415 fiber->cont.machine.stack = NULL;
2416 fiber->cont.machine.stack_size = 0;
2420 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2423 fiber_switch(next_fiber, -1, &error,
RB_NO_KEYWORDS, NULL,
false);
2425 fiber_switch(next_fiber, 1, &value,
RB_NO_KEYWORDS, NULL,
false);
2430fiber_resume_kw(
rb_fiber_t *fiber,
int argc,
const VALUE *argv,
int kw_splat)
2434 if (argc == -1 && FIBER_CREATED_P(fiber)) {
2435 rb_raise(rb_eFiberError,
"cannot raise exception on unborn fiber");
2437 else if (FIBER_TERMINATED_P(fiber)) {
2438 rb_raise(rb_eFiberError,
"attempt to resume a terminated fiber");
2440 else if (fiber == current_fiber) {
2441 rb_raise(rb_eFiberError,
"attempt to resume the current fiber");
2443 else if (fiber->prev != NULL) {
2444 rb_raise(rb_eFiberError,
"attempt to resume a resumed fiber (double resume)");
2446 else if (fiber->resuming_fiber) {
2447 rb_raise(rb_eFiberError,
"attempt to resume a resuming fiber");
2449 else if (fiber->prev == NULL &&
2450 (!fiber->yielding && fiber->status != FIBER_CREATED)) {
2451 rb_raise(rb_eFiberError,
"attempt to resume a transferring fiber");
2454 VALUE result = fiber_switch(fiber, argc, argv, kw_splat, fiber,
false);
2462 return fiber_resume_kw(fiber_ptr(
self), argc, argv, kw_splat);
2468 return fiber_resume_kw(fiber_ptr(
self), argc, argv,
RB_NO_KEYWORDS);
2474 return fiber_switch(return_fiber(
false), argc, argv, kw_splat, NULL,
true);
2480 return fiber_switch(return_fiber(
false), argc, argv,
RB_NO_KEYWORDS, NULL,
true);
2486 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2487 th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
2502 return FIBER_TERMINATED_P(fiber_ptr(fiber_value)) ?
Qfalse :
Qtrue;
2521rb_fiber_m_resume(
int argc, VALUE *argv, VALUE fiber)
2573rb_fiber_backtrace(
int argc, VALUE *argv, VALUE fiber)
2575 return rb_vm_backtrace(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
2598rb_fiber_backtrace_locations(
int argc, VALUE *argv, VALUE fiber)
2600 return rb_vm_backtrace_locations(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
2686rb_fiber_m_transfer(
int argc, VALUE *argv, VALUE
self)
2692fiber_transfer_kw(
rb_fiber_t *fiber,
int argc,
const VALUE *argv,
int kw_splat)
2694 if (fiber->resuming_fiber) {
2695 rb_raise(rb_eFiberError,
"attempt to transfer to a resuming fiber");
2698 if (fiber->yielding) {
2699 rb_raise(rb_eFiberError,
"attempt to transfer to a yielding fiber");
2702 return fiber_switch(fiber, argc, argv, kw_splat, NULL,
false);
2708 return fiber_transfer_kw(fiber_ptr(
self), argc, argv, kw_splat);
2722rb_fiber_s_yield(
int argc, VALUE *argv, VALUE klass)
2728fiber_raise(
rb_fiber_t *fiber,
int argc,
const VALUE *argv)
2732 if (fiber->resuming_fiber) {
2733 rb_raise(rb_eFiberError,
"attempt to raise a resuming fiber");
2735 else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
2746 return fiber_raise(fiber_ptr(fiber), argc, argv);
2771rb_fiber_m_raise(
int argc, VALUE *argv, VALUE
self)
2784rb_fiber_s_current(VALUE klass)
2790fiber_to_s(VALUE fiber_value)
2792 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
2794 char status_info[0x20];
2796 if (fiber->resuming_fiber) {
2797 snprintf(status_info, 0x20,
" (%s by resuming)", fiber_status_name(fiber->status));
2800 snprintf(status_info, 0x20,
" (%s)", fiber_status_name(fiber->status));
2804 VALUE str = rb_any_to_s(fiber_value);
2805 strlcat(status_info,
">",
sizeof(status_info));
2810 GetProcPtr(fiber->first_proc, proc);
2811 return rb_block_to_s(fiber_value, &proc->block, status_info);
2814#ifdef HAVE_WORKING_FORK
2818 if (th->root_fiber) {
2819 if (&th->root_fiber->cont.saved_ec != th->ec) {
2820 th->root_fiber = th->ec->fiber_ptr;
2822 th->root_fiber->prev = 0;
2827#ifdef RB_EXPERIMENTAL_FIBER_POOL
2829fiber_pool_free(
void *ptr)
2832 RUBY_FREE_ENTER(
"fiber_pool");
2834 fiber_pool_free_allocations(
fiber_pool->allocations);
2837 RUBY_FREE_LEAVE(
"fiber_pool");
2841fiber_pool_memsize(
const void *ptr)
2844 size_t size =
sizeof(*fiber_pool);
2853 {NULL, fiber_pool_free, fiber_pool_memsize,},
2854 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
2858fiber_pool_alloc(VALUE klass)
2866rb_fiber_pool_initialize(
int argc, VALUE* argv, VALUE
self)
2873 rb_scan_args(argc, argv,
"03", &size, &count, &vm_stack_size);
2876 size =
INT2NUM(th->vm->default_params.fiber_machine_stack_size);
2883 if (
NIL_P(vm_stack_size)) {
2884 vm_stack_size =
INT2NUM(th->vm->default_params.fiber_vm_stack_size);
2972rb_fiber_scheduler_interface_close(VALUE
self)
2995rb_fiber_scheduler_interface_process_wait(VALUE
self)
3019rb_fiber_scheduler_interface_io_wait(VALUE
self)
3051rb_fiber_scheduler_interface_io_read(VALUE
self)
3083rb_fiber_scheduler_interface_io_write(VALUE
self)
3099rb_fiber_scheduler_interface_kernel_sleep(VALUE
self)
3136rb_fiber_scheduler_interface_address_resolve(VALUE
self)
3169rb_fiber_scheduler_interface_timeout_after(VALUE
self)
3188rb_fiber_scheduler_interface_block(VALUE
self)
3206rb_fiber_scheduler_interface_unblock(VALUE
self)
3226rb_fiber_scheduler_interface_fiber(VALUE
self)
3235 size_t vm_stack_size = th->vm->default_params.fiber_vm_stack_size;
3236 size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
3237 size_t stack_size = machine_stack_size + vm_stack_size;
3241 GetSystemInfo(&info);
3242 pagesize = info.dwPageSize;
3244 pagesize = sysconf(_SC_PAGESIZE);
3246 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
3248 fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
3253 const char *fiber_shared_fiber_pool_free_stacks = getenv(
"RUBY_SHARED_FIBER_POOL_FREE_STACKS");
3254 if (fiber_shared_fiber_pool_free_stacks) {
3255 shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
3268 rb_define_method(rb_cFiber,
"backtrace_locations", rb_fiber_backtrace_locations, -1);
3283 rb_define_method(rb_cFiberScheduler,
"close", rb_fiber_scheduler_interface_close, 0);
3284 rb_define_method(rb_cFiberScheduler,
"process_wait", rb_fiber_scheduler_interface_process_wait, 0);
3285 rb_define_method(rb_cFiberScheduler,
"io_wait", rb_fiber_scheduler_interface_io_wait, 0);
3286 rb_define_method(rb_cFiberScheduler,
"io_read", rb_fiber_scheduler_interface_io_read, 0);
3287 rb_define_method(rb_cFiberScheduler,
"io_write", rb_fiber_scheduler_interface_io_write, 0);
3288 rb_define_method(rb_cFiberScheduler,
"kernel_sleep", rb_fiber_scheduler_interface_kernel_sleep, 0);
3289 rb_define_method(rb_cFiberScheduler,
"address_resolve", rb_fiber_scheduler_interface_address_resolve, 0);
3290 rb_define_method(rb_cFiberScheduler,
"timeout_after", rb_fiber_scheduler_interface_timeout_after, 0);
3291 rb_define_method(rb_cFiberScheduler,
"block", rb_fiber_scheduler_interface_block, 0);
3292 rb_define_method(rb_cFiberScheduler,
"unblock", rb_fiber_scheduler_interface_unblock, 0);
3293 rb_define_method(rb_cFiberScheduler,
"fiber", rb_fiber_scheduler_interface_fiber, 0);
3296#ifdef RB_EXPERIMENTAL_FIBER_POOL
3299 rb_define_method(rb_cFiberPool,
"initialize", rb_fiber_pool_initialize, -1);
3305RUBY_SYMBOL_EXPORT_BEGIN
3308ruby_Init_Continuation_body(
void)
3318RUBY_SYMBOL_EXPORT_END
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_EVENT_FIBER_SWITCH
Encountered a Fiber#yield.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
#define REALLOC_N
Old name of RB_REALLOC_N.
#define Qundef
Old name of RUBY_Qundef.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define ZALLOC
Old name of RB_ZALLOC.
#define CLASS_OF
Old name of rb_class_of.
#define rb_ary_new4
Old name of rb_ary_new_from_values.
#define rb_exc_new2
Old name of rb_exc_new_cstr.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
VALUE rb_funcall_passing_block_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_splat)
Identical to rb_funcallv_passing_block(), except you can specify how to handle the last element of th...
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
VALUE rb_fiber_transfer_kw(VALUE fiber, int argc, const VALUE *argv, int kw_splat)
Identical to rb_fiber_transfer(), except you can specify how to handle the last element of the given ...
VALUE rb_fiber_raise(VALUE fiber, int argc, const VALUE *argv)
Identical to rb_fiber_resume() but instead of resuming normal execution of the passed fiber,...
VALUE rb_fiber_current(void)
Queries the fiber which is calling this function.
VALUE rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
Identical to rb_fiber_yield(), except you can specify how to handle the last element of the given arr...
VALUE rb_fiber_transfer(VALUE fiber, int argc, const VALUE *argv)
Transfers control to another fiber, resuming it from where it last stopped or starting it if it was n...
VALUE rb_fiber_resume_kw(VALUE fiber, int argc, const VALUE *argv, int kw_splat)
Identical to rb_fiber_resume(), except you can specify how to handle the last element of the given ar...
VALUE rb_fiber_alive_p(VALUE fiber)
Queries the liveness of the passed fiber.
VALUE rb_fiber_new(rb_block_call_func_t func, VALUE callback_obj)
Creates a Fiber instance from a C-backended block.
VALUE rb_obj_is_fiber(VALUE obj)
Queries if an object is a fiber.
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Yields the control back to the point where the current fiber was resumed.
VALUE rb_fiber_resume(VALUE fiber, int argc, const VALUE *argv)
Resumes the execution of the passed fiber, either from the point at which the last rb_fiber_yield() w...
VALUE rb_make_exception(int argc, const VALUE *argv)
Constructs an exception object from the list of arguments, in a manner similar to Ruby's raise.
void rb_gc_mark(VALUE obj)
Marks an object.
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_proc_new(rb_block_call_func_t func, VALUE callback_arg)
This is an rb_iterate() + rb_block_proc() combo.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_set_len(VALUE str, long len)
Overwrites the length of the string.
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
VALUE rb_yield(VALUE val)
Yields the block.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define ALLOCA_N(type, n)
#define RB_ALLOC(type)
Shorthand of RB_ALLOC_N with n=1.
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
#define DATA_PTR(obj)
Convenient getter macro.
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define RB_NO_KEYWORDS
Do not pass keywords.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
void ruby_xfree(void *ptr)
Deallocates a storage instance.