Ruby 3.1.3p185 (2022-11-24 revision 1a6b16756e0ba6b95ab71a441357ed5484e33498)
thread_win32.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_win32.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include <process.h>
15
16#define TIME_QUANTUM_USEC (10 * 1000)
17#define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
18
19#undef Sleep
20
21#define native_thread_yield() Sleep(0)
22#define unregister_ubf_list(th)
23#define ubf_wakeup_all_threads() do {} while (0)
24#define ubf_threads_empty() (1)
25#define ubf_timer_disarm() do {} while (0)
26#define ubf_list_atfork() do {} while (0)
27
28static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
29
30static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
31
33static void
34w32_error(const char *func)
35{
36 LPVOID lpMsgBuf;
37 DWORD err = GetLastError();
38 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
39 FORMAT_MESSAGE_FROM_SYSTEM |
40 FORMAT_MESSAGE_IGNORE_INSERTS,
41 NULL,
42 err,
43 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
44 (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
45 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
46 FORMAT_MESSAGE_FROM_SYSTEM |
47 FORMAT_MESSAGE_IGNORE_INSERTS,
48 NULL,
49 err,
50 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
51 (LPTSTR) & lpMsgBuf, 0, NULL);
52 rb_bug("%s: %s", func, (char*)lpMsgBuf);
54}
55
56static int
57w32_mutex_lock(HANDLE lock, bool try)
58{
59 DWORD result;
60 while (1) {
61 thread_debug("rb_native_mutex_lock: %p\n", lock);
62 result = w32_wait_events(&lock, 1, try ? 0 : INFINITE, 0);
63 switch (result) {
64 case WAIT_OBJECT_0:
65 /* get mutex object */
66 thread_debug("acquire mutex: %p\n", lock);
67 return 0;
68 case WAIT_OBJECT_0 + 1:
69 /* interrupt */
70 errno = EINTR;
71 thread_debug("acquire mutex interrupted: %p\n", lock);
72 return 0;
73 case WAIT_TIMEOUT:
74 thread_debug("timeout mutex: %p\n", lock);
75 return EBUSY;
76 case WAIT_ABANDONED:
77 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
78 break;
79 default:
80 rb_bug("win32_mutex_lock: unknown result (%ld)", result);
81 break;
82 }
83 }
84 return 0;
85}
86
87static HANDLE
88w32_mutex_create(void)
89{
90 HANDLE lock = CreateMutex(NULL, FALSE, NULL);
91 if (lock == NULL) {
92 w32_error("rb_native_mutex_initialize");
93 }
94 return lock;
95}
96
97#define GVL_DEBUG 0
98
99static void
100gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
101{
102 w32_mutex_lock(gvl->lock, false);
103 if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
104}
105
106static void
107gvl_release(rb_global_vm_lock_t *gvl)
108{
109 ReleaseMutex(gvl->lock);
110}
111
112static void
113gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
114{
115 gvl_release(gvl);
116 native_thread_yield();
117 gvl_acquire(gvl, th);
118}
119
120void
121rb_gvl_init(rb_global_vm_lock_t *gvl)
122{
123 if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
124 gvl->lock = w32_mutex_create();
125}
126
127static void
128gvl_destroy(rb_global_vm_lock_t *gvl)
129{
130 if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
131 CloseHandle(gvl->lock);
132}
133
135ruby_thread_from_native(void)
136{
137 return TlsGetValue(ruby_native_thread_key);
138}
139
140int
141ruby_thread_set_native(rb_thread_t *th)
142{
143 if (th && th->ec) {
144 rb_ractor_set_current_ec(th->ractor, th->ec);
145 }
146 return TlsSetValue(ruby_native_thread_key, th);
147}
148
149void
150Init_native_thread(rb_thread_t *th)
151{
152 if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
153 rb_bug("TlsAlloc() for ruby_current_ec_key fails");
154 }
155 if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
156 rb_bug("TlsAlloc() for ruby_native_thread_key fails");
157 }
158 ruby_thread_set_native(th);
159 DuplicateHandle(GetCurrentProcess(),
160 GetCurrentThread(),
161 GetCurrentProcess(),
162 &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
163
164 th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
165
166 thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
167 th, GET_THREAD()->thread_id,
168 th->native_thread_data.interrupt_event);
169}
170
171static int
172w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
173{
174 HANDLE *targets = events;
175 HANDLE intr;
176 const int initcount = count;
177 DWORD ret;
178
179 thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
180 events, count, timeout, th);
181 if (th && (intr = th->native_thread_data.interrupt_event)) {
182 if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
183 targets = ALLOCA_N(HANDLE, count + 1);
184 memcpy(targets, events, sizeof(HANDLE) * count);
185
186 targets[count++] = intr;
187 thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
188 }
189 else if (intr == th->native_thread_data.interrupt_event) {
190 w32_error("w32_wait_events");
191 }
192 }
193
194 thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
195 ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
196 thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret);
197
198 if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
199 errno = EINTR;
200 }
201 if (ret == WAIT_FAILED && THREAD_DEBUG) {
202 int i;
203 DWORD dmy;
204 for (i = 0; i < count; i++) {
205 thread_debug(" * error handle %d - %s\n", i,
206 GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
207 }
208 }
209 return ret;
210}
211
212static void ubf_handle(void *ptr);
213#define ubf_select ubf_handle
214
215int
216rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
217{
218 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
219}
220
221int
222rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
223{
224 int ret;
225 rb_thread_t *th = GET_THREAD();
226
227 BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
228 ubf_handle, ruby_thread_from_native(), FALSE);
229 return ret;
230}
231
232static void
233w32_close_handle(HANDLE handle)
234{
235 if (CloseHandle(handle) == 0) {
236 w32_error("w32_close_handle");
237 }
238}
239
240static void
241w32_resume_thread(HANDLE handle)
242{
243 if (ResumeThread(handle) == (DWORD)-1) {
244 w32_error("w32_resume_thread");
245 }
246}
247
248#ifdef _MSC_VER
249#define HAVE__BEGINTHREADEX 1
250#else
251#undef HAVE__BEGINTHREADEX
252#endif
253
254#ifdef HAVE__BEGINTHREADEX
255#define start_thread (HANDLE)_beginthreadex
256#define thread_errno errno
257typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
258#else
259#define start_thread CreateThread
260#define thread_errno rb_w32_map_errno(GetLastError())
261typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
262#endif
263
264static HANDLE
265w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
266{
267 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
268}
269
270int
271rb_w32_sleep(unsigned long msec)
272{
273 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
274}
275
276int WINAPI
277rb_w32_Sleep(unsigned long msec)
278{
279 int ret;
280 rb_thread_t *th = GET_THREAD();
281
282 BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
283 ubf_handle, ruby_thread_from_native(), FALSE);
284 return ret;
285}
286
287static DWORD
288hrtime2msec(rb_hrtime_t hrt)
289{
290 return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
291}
292
293static void
294native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
295{
296 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
297
298 GVL_UNLOCK_BEGIN(th);
299 {
300 DWORD ret;
301
302 rb_native_mutex_lock(&th->interrupt_lock);
303 th->unblock.func = ubf_handle;
304 th->unblock.arg = th;
305 rb_native_mutex_unlock(&th->interrupt_lock);
306
307 if (RUBY_VM_INTERRUPTED(th->ec)) {
308 /* interrupted. return immediate */
309 }
310 else {
311 thread_debug("native_sleep start (%lu)\n", msec);
312 ret = w32_wait_events(0, 0, msec, th);
313 thread_debug("native_sleep done (%lu)\n", ret);
314 }
315
316 rb_native_mutex_lock(&th->interrupt_lock);
317 th->unblock.func = 0;
318 th->unblock.arg = 0;
319 rb_native_mutex_unlock(&th->interrupt_lock);
320 }
321 GVL_UNLOCK_END(th);
322}
323
324void
326{
327#ifdef USE_WIN32_MUTEX
328 w32_mutex_lock(lock->mutex, false);
329#else
330 EnterCriticalSection(&lock->crit);
331#endif
332}
333
334int
336{
337#ifdef USE_WIN32_MUTEX
338 return w32_mutex_lock(lock->mutex, true);
339#else
340 return TryEnterCriticalSection(&lock->crit) == 0 ? EBUSY : 0;
341#endif
342}
343
344void
346{
347#ifdef USE_WIN32_MUTEX
348 thread_debug("release mutex: %p\n", lock->mutex);
349 ReleaseMutex(lock->mutex);
350#else
351 LeaveCriticalSection(&lock->crit);
352#endif
353}
354
355void
357{
358#ifdef USE_WIN32_MUTEX
359 lock->mutex = w32_mutex_create();
360 /* thread_debug("initialize mutex: %p\n", lock->mutex); */
361#else
362 InitializeCriticalSection(&lock->crit);
363#endif
364}
365
366void
368{
369#ifdef USE_WIN32_MUTEX
370 w32_close_handle(lock->mutex);
371#else
372 DeleteCriticalSection(&lock->crit);
373#endif
374}
375
376struct cond_event_entry {
377 struct cond_event_entry* next;
378 struct cond_event_entry* prev;
379 HANDLE event;
380};
381
382void
384{
385 /* cond is guarded by mutex */
386 struct cond_event_entry *e = cond->next;
387 struct cond_event_entry *head = (struct cond_event_entry*)cond;
388
389 if (e != head) {
390 struct cond_event_entry *next = e->next;
391 struct cond_event_entry *prev = e->prev;
392
393 prev->next = next;
394 next->prev = prev;
395 e->next = e->prev = e;
396
397 SetEvent(e->event);
398 }
399}
400
401void
403{
404 /* cond is guarded by mutex */
405 struct cond_event_entry *e = cond->next;
406 struct cond_event_entry *head = (struct cond_event_entry*)cond;
407
408 while (e != head) {
409 struct cond_event_entry *next = e->next;
410 struct cond_event_entry *prev = e->prev;
411
412 SetEvent(e->event);
413
414 prev->next = next;
415 next->prev = prev;
416 e->next = e->prev = e;
417
418 e = next;
419 }
420}
421
422static int
423native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
424{
425 DWORD r;
426 struct cond_event_entry entry;
427 struct cond_event_entry *head = (struct cond_event_entry*)cond;
428
429 entry.event = CreateEvent(0, FALSE, FALSE, 0);
430
431 /* cond is guarded by mutex */
432 entry.next = head;
433 entry.prev = head->prev;
434 head->prev->next = &entry;
435 head->prev = &entry;
436
438 {
439 r = WaitForSingleObject(entry.event, msec);
440 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
441 rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
442 }
443 }
445
446 entry.prev->next = entry.next;
447 entry.next->prev = entry.prev;
448
449 w32_close_handle(entry.event);
450 return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
451}
452
453void
455{
456 native_cond_timedwait_ms(cond, mutex, INFINITE);
457}
458
459static unsigned long
460abs_timespec_to_timeout_ms(const struct timespec *ts)
461{
462 struct timeval tv;
463 struct timeval now;
464
465 gettimeofday(&now, NULL);
466 tv.tv_sec = ts->tv_sec;
467 tv.tv_usec = ts->tv_nsec / 1000;
468
469 if (!rb_w32_time_subtract(&tv, &now))
470 return 0;
471
472 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
473}
474
475static int
476native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
477{
478 unsigned long timeout_ms;
479
480 timeout_ms = abs_timespec_to_timeout_ms(ts);
481 if (!timeout_ms)
482 return ETIMEDOUT;
483
484 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
485}
486
487static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
488
489void
491{
492 struct timespec rel = {
493 .tv_sec = msec / 1000,
494 .tv_nsec = (msec % 1000) * 1000 * 1000,
495 };
496 struct timespec ts = native_cond_timeout(cond, rel);
497 native_cond_timedwait(cond, mutex, &ts);
498}
499
500static struct timespec
501native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
502{
503 int ret;
504 struct timeval tv;
505 struct timespec timeout;
506 struct timespec now;
507
508 ret = gettimeofday(&tv, 0);
509 if (ret != 0)
510 rb_sys_fail(0);
511 now.tv_sec = tv.tv_sec;
512 now.tv_nsec = tv.tv_usec * 1000;
513
514 timeout.tv_sec = now.tv_sec;
515 timeout.tv_nsec = now.tv_nsec;
516 timeout.tv_sec += timeout_rel.tv_sec;
517 timeout.tv_nsec += timeout_rel.tv_nsec;
518
519 if (timeout.tv_nsec >= 1000*1000*1000) {
520 timeout.tv_sec++;
521 timeout.tv_nsec -= 1000*1000*1000;
522 }
523
524 if (timeout.tv_sec < now.tv_sec)
525 timeout.tv_sec = TIMET_MAX;
526
527 return timeout;
528}
529
530void
532{
533 cond->next = (struct cond_event_entry *)cond;
534 cond->prev = (struct cond_event_entry *)cond;
535}
536
537void
539{
540 /* */
541}
542
543void
544ruby_init_stack(volatile VALUE *addr)
545{
546}
547
548#define CHECK_ERR(expr) \
549 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
550
551COMPILER_WARNING_PUSH
552#if defined(__GNUC__)
553COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
554#endif
555static inline SIZE_T
556query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi)
557{
558 return VirtualQuery(mi, mi, sizeof(*mi));
559}
560COMPILER_WARNING_POP
561
562static void
563native_thread_init_stack(rb_thread_t *th)
564{
565 MEMORY_BASIC_INFORMATION mi;
566 char *base, *end;
567 DWORD size, space;
568
569 CHECK_ERR(query_memory_basic_info(&mi));
570 base = mi.AllocationBase;
571 end = mi.BaseAddress;
572 end += mi.RegionSize;
573 size = end - base;
574 space = size / 5;
575 if (space > 1024*1024) space = 1024*1024;
576 th->ec->machine.stack_start = (VALUE *)end - 1;
577 th->ec->machine.stack_maxsize = size - space;
578}
579
580#ifndef InterlockedExchangePointer
581#define InterlockedExchangePointer(t, v) \
582 (void *)InterlockedExchange((long *)(t), (long)(v))
583#endif
584static void
585native_thread_destroy(rb_thread_t *th)
586{
587 HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
588 thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
589 w32_close_handle(intr);
590}
591
592static unsigned long __stdcall
593thread_start_func_1(void *th_ptr)
594{
595 rb_thread_t *th = th_ptr;
596 volatile HANDLE thread_id = th->thread_id;
597
598 native_thread_init_stack(th);
599 th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
600
601 /* run */
602 thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
603 th->thread_id, th->native_thread_data.interrupt_event);
604
605 thread_start_func_2(th, th->ec->machine.stack_start);
606
607 w32_close_handle(thread_id);
608 thread_debug("thread deleted (th: %p)\n", th);
609 return 0;
610}
611
612static int
613native_thread_create(rb_thread_t *th)
614{
615 const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
616 th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
617
618 if ((th->thread_id) == 0) {
619 return thread_errno;
620 }
621
622 w32_resume_thread(th->thread_id);
623
624 if (THREAD_DEBUG) {
625 Sleep(0);
626 thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n",
627 th, th->thread_id,
628 th->native_thread_data.interrupt_event, stack_size);
629 }
630 return 0;
631}
632
633static void
634native_thread_join(HANDLE th)
635{
636 w32_wait_events(&th, 1, INFINITE, 0);
637}
638
639#if USE_NATIVE_THREAD_PRIORITY
640
641static void
642native_thread_apply_priority(rb_thread_t *th)
643{
644 int priority = th->priority;
645 if (th->priority > 0) {
646 priority = THREAD_PRIORITY_ABOVE_NORMAL;
647 }
648 else if (th->priority < 0) {
649 priority = THREAD_PRIORITY_BELOW_NORMAL;
650 }
651 else {
652 priority = THREAD_PRIORITY_NORMAL;
653 }
654
655 SetThreadPriority(th->thread_id, priority);
656}
657
658#endif /* USE_NATIVE_THREAD_PRIORITY */
659
660int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
661
662static int
663native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
664{
665 fd_set *r = NULL, *w = NULL, *e = NULL;
666 if (readfds) {
667 rb_fd_resize(n - 1, readfds);
668 r = rb_fd_ptr(readfds);
669 }
670 if (writefds) {
671 rb_fd_resize(n - 1, writefds);
672 w = rb_fd_ptr(writefds);
673 }
674 if (exceptfds) {
675 rb_fd_resize(n - 1, exceptfds);
676 e = rb_fd_ptr(exceptfds);
677 }
678 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
679}
680
681/* @internal */
682int
683rb_w32_check_interrupt(rb_thread_t *th)
684{
685 return w32_wait_events(0, 0, 0, th);
686}
687
688static void
689ubf_handle(void *ptr)
690{
691 rb_thread_t *th = (rb_thread_t *)ptr;
692 thread_debug("ubf_handle: %p\n", th);
693
694 if (!SetEvent(th->native_thread_data.interrupt_event)) {
695 w32_error("ubf_handle");
696 }
697}
698
699int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
700int rb_w32_set_thread_description_str(HANDLE th, VALUE name);
701#define native_set_another_thread_name rb_w32_set_thread_description_str
702
703static struct {
704 HANDLE id;
705 HANDLE lock;
706} timer_thread;
707#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
708
709static unsigned long __stdcall
710timer_thread_func(void *dummy)
711{
712 rb_vm_t *vm = GET_VM();
713 thread_debug("timer_thread\n");
714 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
715 while (WaitForSingleObject(timer_thread.lock,
716 TIME_QUANTUM_USEC/1000) == WAIT_TIMEOUT) {
717 vm->clock++;
718 ruby_sigchld_handler(vm); /* probably no-op */
719 rb_threadptr_check_signal(vm->ractor.main_thread);
720 }
721 thread_debug("timer killed\n");
722 return 0;
723}
724
725void
726rb_thread_wakeup_timer_thread(int sig)
727{
728 /* do nothing */
729}
730
731static VALUE
732rb_thread_start_unblock_thread(void)
733{
734 return Qfalse; /* no-op */
735}
736
737static void
738rb_thread_create_timer_thread(void)
739{
740 if (timer_thread.id == 0) {
741 if (!timer_thread.lock) {
742 timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
743 }
744 timer_thread.id = w32_create_thread(1024 + (THREAD_DEBUG ? BUFSIZ : 0),
745 timer_thread_func, 0);
746 w32_resume_thread(timer_thread.id);
747 }
748}
749
750static int
751native_stop_timer_thread(void)
752{
753 int stopped = --system_working <= 0;
754 if (stopped) {
755 SetEvent(timer_thread.lock);
756 native_thread_join(timer_thread.id);
757 CloseHandle(timer_thread.lock);
758 timer_thread.lock = 0;
759 }
760 return stopped;
761}
762
763static void
764native_reset_timer_thread(void)
765{
766 if (timer_thread.id) {
767 CloseHandle(timer_thread.id);
768 timer_thread.id = 0;
769 }
770}
771
772int
773ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
774{
775 return rb_ec_raised_p(th->ec, RAISED_STACKOVERFLOW);
776}
777
778#if defined(__MINGW32__)
779LONG WINAPI
780rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
781{
782 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
783 rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW);
784 raise(SIGSEGV);
785 }
786 return EXCEPTION_CONTINUE_SEARCH;
787}
788#endif
789
790#ifdef RUBY_ALLOCA_CHKSTK
791void
792ruby_alloca_chkstk(size_t len, void *sp)
793{
794 if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
795 rb_execution_context_t *ec = GET_EC();
796 if (!rb_ec_raised_p(ec, RAISED_STACKOVERFLOW)) {
797 rb_ec_raised_set(ec, RAISED_STACKOVERFLOW);
798 rb_exc_raise(sysstack_error);
799 }
800 }
801}
802#endif
803int
804rb_reserved_fd_p(int fd)
805{
806 return 0;
807}
808
809int
810rb_sigwait_fd_get(rb_thread_t *th)
811{
812 return -1; /* TODO */
813}
814
815NORETURN(void rb_sigwait_fd_put(rb_thread_t *, int));
816void
817rb_sigwait_fd_put(rb_thread_t *th, int fd)
818{
819 rb_bug("not implemented, should not be called");
820}
821
822NORETURN(void rb_sigwait_sleep(const rb_thread_t *, int, const rb_hrtime_t *));
823void
824rb_sigwait_sleep(const rb_thread_t *th, int fd, const rb_hrtime_t *rel)
825{
826 rb_bug("not implemented, should not be called");
827}
828
831{
832 return GetCurrentThread();
833}
834
835static void
836native_set_thread_name(rb_thread_t *th)
837{
838}
839
840static VALUE
841native_thread_native_thread_id(rb_thread_t *th)
842{
843 DWORD tid = GetThreadId(th->thread_id);
844 if (tid == 0) rb_sys_fail("GetThreadId");
845 return ULONG2NUM(tid);
846}
847#define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
848
849#if USE_MJIT
850static unsigned long __stdcall
851mjit_worker(void *arg)
852{
853 void (*worker_func)(void) = arg;
854 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-mjitworker");
855 worker_func();
856 return 0;
857}
858
859/* Launch MJIT thread. Returns FALSE if it fails to create thread. */
860int
861rb_thread_create_mjit_thread(void (*worker_func)(void))
862{
863 size_t stack_size = 4 * 1024; /* 4KB is the minimum commit size */
864 HANDLE thread_id = w32_create_thread(stack_size, mjit_worker, worker_func);
865 if (thread_id == 0) {
866 return FALSE;
867 }
868
869 w32_resume_thread(thread_id);
870 return TRUE;
871}
872#endif
873
874#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition: assume.h:30
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition: long.h:60
#define Qfalse
Old name of RUBY_Qfalse.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition: gc.c:6140
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:671
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
void rb_sys_fail(const char *mesg)
Converts a C errno into a Ruby exception, then raises it.
Definition: error.c:3145
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
RBIMPL_ATTR_NORETURN() void rb_eof_error(void)
Utility function to raise rb_eEOFError.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition: largesize.h:198
#define ALLOCA_N(type, n)
Definition: memory.h:286
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition: select.h:43
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.