forked from rubinius/rubinius
-
Notifications
You must be signed in to change notification settings - Fork 0
/
state.hpp
145 lines (112 loc) · 2.92 KB
/
state.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#ifndef RBX_STATE_HPP
#define RBX_STATE_HPP
namespace rubinius {
class VM;
class ManagedThread;
class ThreadState;
class SharedState;
class State {
VM* vm_;
SharedState& shared_;
public:
State(VM* vm)
: vm_(vm)
, shared_(vm->shared)
{}
VM* vm() {
return vm_;
}
ManagedThread* thread() {
return (ManagedThread*)vm_;
}
Object* raise_exception(Exception* exc) {
vm_->thread_state()->raise_exception(exc);
return 0;
}
void set_call_frame(CallFrame* cf) {
vm_->set_call_frame(cf);
}
Globals& globals() {
return shared_.globals;
}
Symbol* symbol(const char* str) {
return vm_->symbol(str);
}
Symbol* symbol(std::string str) {
return vm_->symbol(str);
}
Symbol* symbol(String* str) {
return vm_->symbol(str);
}
template <class T>
T* new_object(Class *cls) {
return reinterpret_cast<T*>(vm_->new_object_typed(cls, sizeof(T), T::type));
}
ThreadState* thread_state() {
return vm_->thread_state();
}
ObjectMemory* memory() {
return shared_.memory();
}
SharedState& shared() {
return shared_;
}
bool detect_stack_condition(void* end) {
return vm_->detect_stack_condition(end);
}
bool check_async(CallFrame* call_frame) {
if(vm_->check_local_interrupts) {
return process_async(call_frame);
}
return true;
}
void raise_stack_error(CallFrame* call_frame);
bool check_stack(CallFrame* call_frame, void* end) {
// @TODO assumes stack growth direction
if(unlikely(reinterpret_cast<uintptr_t>(end) < vm_->stack_limit_)) {
raise_stack_error(call_frame);
return false;
}
return true;
}
bool process_async(CallFrame* call_frame);
void check_exception(CallFrame* call_frame);
bool check_interrupts(GCToken gct, CallFrame* call_frame, void* end);
gc::Slab& local_slab() {
return vm_->local_slab();
}
bool stop_the_world() WARN_UNUSED {
return shared_.stop_the_world(vm_);
};
void restart_world() {
shared_.restart_world(vm_);
}
void gc_independent(GCToken gct) {
shared_.gc_independent(vm_);
}
void gc_dependent() {
shared_.gc_dependent(vm_);
}
void checkpoint(GCToken gct, CallFrame* call_frame) {
vm_->set_call_frame(call_frame);
gc_checkpoint(gct, call_frame);
shared_.checkpoint(vm_);
}
void gc_checkpoint(GCToken gct, CallFrame* frame) {
if(unlikely(shared_.check_gc_p())) {
vm_->collect_maybe(gct, frame);
}
}
void lock(GCToken gct) {
gc_independent(gct);
vm_->lock(vm_);
gc_dependent();
}
void unlock() {
vm_->unlock(vm_);
}
void park(GCToken gct, CallFrame* call_frame);
void park_timed(GCToken gct, CallFrame* call_frame, struct timespec* ts);
};
}
#endif