8
8
// option. This file may not be copied, modified, or distributed
9
9
// except according to those terms.
10
10
11
- use std:: vec;
12
- use std:: libc:: { c_uint, uintptr_t} ;
11
+ use std:: rt:: env:: max_cached_stacks;
12
+ use std:: os:: { errno, page_size, MemoryMap , MapReadable , MapWritable ,
13
+ MapNonStandardFlags , MapVirtual } ;
14
+ use std:: libc;
13
15
14
- pub struct StackSegment {
15
- priv buf: ~[ u8 ] ,
16
- priv valgrind_id : c_uint
16
+ /// A task's stack. The name "Stack" is a vestige of segmented stacks.
17
+ pub struct Stack {
18
+ priv buf: MemoryMap ,
19
+ priv min_size : uint ,
20
+ priv valgrind_id : libc:: c_uint ,
17
21
}
18
22
19
- impl StackSegment {
20
- pub fn new ( size : uint ) -> StackSegment {
21
- unsafe {
22
- // Crate a block of uninitialized values
23
- let mut stack = vec:: with_capacity ( size) ;
24
- stack. set_len ( size) ;
23
+ // Try to use MAP_STACK on platforms that support it (it's what we're doing
24
+ // anyway), but some platforms don't support it at all. For example, it appears
25
+ // that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
26
+ // fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
27
+ #[ cfg( not( windows) , not( target_os = "freebsd" ) ) ]
28
+ static STACK_FLAGS : libc:: c_int = libc:: MAP_STACK | libc:: MAP_PRIVATE |
29
+ libc:: MAP_ANON ;
30
+ #[ cfg( target_os = "freebsd" ) ]
31
+ static STACK_FLAGS : libc:: c_int = libc:: MAP_PRIVATE | libc:: MAP_ANON ;
32
+ #[ cfg( windows) ]
33
+ static STACK_FLAGS : libc:: c_int = 0 ;
25
34
26
- let mut stk = StackSegment {
27
- buf : stack,
28
- valgrind_id : 0
29
- } ;
35
+ impl Stack {
36
+ /// Allocate a new stack of `size`. If size = 0, this will fail. Use
37
+ /// `dummy_stack` if you want a zero-sized stack.
38
+ pub fn new ( size : uint ) -> Stack {
39
+ // Map in a stack. Eventually we might be able to handle stack
40
+ // allocation failure, which would fail to spawn the task. But there's
41
+ // not many sensible things to do on OOM. Failure seems fine (and is
42
+ // what the old stack allocation did).
43
+ let stack = match MemoryMap :: new ( size, [ MapReadable , MapWritable ,
44
+ MapNonStandardFlags ( STACK_FLAGS ) ] ) {
45
+ Ok ( map) => map,
46
+ Err ( e) => fail ! ( "mmap for stack of size {} failed: {}" , size, e)
47
+ } ;
30
48
31
- // XXX: Using the FFI to call a C macro. Slow
32
- stk. valgrind_id = rust_valgrind_stack_register ( stk. start ( ) , stk. end ( ) ) ;
33
- return stk;
49
+ // Change the last page to be inaccessible. This is to provide safety;
50
+ // when an FFI function overflows it will (hopefully) hit this guard
51
+ // page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is
52
+ // guaranteed to be aligned properly.
53
+ if !protect_last_page ( & stack) {
54
+ fail ! ( "Could not memory-protect guard page. stack={:?}, errno={}" ,
55
+ stack, errno( ) ) ;
56
+ }
57
+
58
+ let mut stk = Stack {
59
+ buf : stack,
60
+ min_size : size,
61
+ valgrind_id : 0
62
+ } ;
63
+
64
+ // XXX: Using the FFI to call a C macro. Slow
65
+ stk. valgrind_id = unsafe {
66
+ rust_valgrind_stack_register ( stk. start ( ) , stk. end ( ) )
67
+ } ;
68
+ return stk;
69
+ }
70
+
71
+ /// Create a 0-length stack which starts (and ends) at 0.
72
+ pub unsafe fn dummy_stack ( ) -> Stack {
73
+ Stack {
74
+ buf : MemoryMap { data : 0 as * mut u8 , len : 0 , kind : MapVirtual } ,
75
+ min_size : 0 ,
76
+ valgrind_id : 0
34
77
}
35
78
}
36
79
37
80
/// Point to the low end of the allocated stack
38
81
pub fn start ( & self ) -> * uint {
39
- self . buf . as_ptr ( ) as * uint
82
+ self . buf . data as * uint
40
83
}
41
84
42
- /// Point one word beyond the high end of the allocated stack
85
+ /// Point one uint beyond the high end of the allocated stack
43
86
pub fn end ( & self ) -> * uint {
44
87
unsafe {
45
- self . buf . as_ptr ( ) . offset ( self . buf . len ( ) as int ) as * uint
88
+ self . buf . data . offset ( self . buf . len as int ) as * uint
46
89
}
47
90
}
48
91
}
49
92
50
- impl Drop for StackSegment {
93
+ #[ cfg( unix) ]
94
+ fn protect_last_page ( stack : & MemoryMap ) -> bool {
95
+ unsafe {
96
+ // This may seem backwards: the start of the segment is the last page?
97
+ // Yes! The stack grows from higher addresses (the end of the allocated
98
+ // block) to lower addresses (the start of the allocated block).
99
+ let last_page = stack. data as * libc:: c_void ;
100
+ libc:: mprotect ( last_page, page_size ( ) as libc:: size_t ,
101
+ libc:: PROT_NONE ) != -1
102
+ }
103
+ }
104
+
105
+ #[ cfg( windows) ]
106
+ fn protect_last_page ( stack : & MemoryMap ) -> bool {
107
+ unsafe {
108
+ // see above
109
+ let last_page = stack. data as * mut libc:: c_void ;
110
+ let mut old_prot: libc:: DWORD = 0 ;
111
+ libc:: VirtualProtect ( last_page, page_size ( ) as libc:: SIZE_T ,
112
+ libc:: PAGE_NOACCESS ,
113
+ & mut old_prot as libc:: LPDWORD ) != 0
114
+ }
115
+ }
116
+
117
+ impl Drop for Stack {
51
118
fn drop ( & mut self ) {
52
119
unsafe {
53
120
// XXX: Using the FFI to call a C macro. Slow
@@ -56,20 +123,36 @@ impl Drop for StackSegment {
56
123
}
57
124
}
58
125
59
- pub struct StackPool ( ( ) ) ;
126
+ pub struct StackPool {
127
+ // Ideally this would be some datastructure that preserved ordering on
128
+ // Stack.min_size.
129
+ priv stacks : ~[ Stack ] ,
130
+ }
60
131
61
132
impl StackPool {
62
- pub fn new ( ) -> StackPool { StackPool ( ( ) ) }
133
+ pub fn new ( ) -> StackPool {
134
+ StackPool {
135
+ stacks : ~[ ] ,
136
+ }
137
+ }
63
138
64
- pub fn take_segment ( & self , min_size : uint ) -> StackSegment {
65
- StackSegment :: new ( min_size)
139
+ pub fn take_stack ( & mut self , min_size : uint ) -> Stack {
140
+ // Ideally this would be a binary search
141
+ match self . stacks . iter ( ) . position ( |s| s. min_size < min_size) {
142
+ Some ( idx) => self . stacks . swap_remove ( idx) ,
143
+ None => Stack :: new ( min_size)
144
+ }
66
145
}
67
146
68
- pub fn give_segment ( & self , _stack : StackSegment ) {
147
+ pub fn give_stack ( & mut self , stack : Stack ) {
148
+ if self . stacks . len ( ) <= max_cached_stacks ( ) {
149
+ self . stacks . push ( stack)
150
+ }
69
151
}
70
152
}
71
153
72
154
extern {
73
- fn rust_valgrind_stack_register ( start : * uintptr_t , end : * uintptr_t ) -> c_uint ;
74
- fn rust_valgrind_stack_deregister ( id : c_uint ) ;
155
+ fn rust_valgrind_stack_register ( start : * libc:: uintptr_t ,
156
+ end : * libc:: uintptr_t ) -> libc:: c_uint ;
157
+ fn rust_valgrind_stack_deregister ( id : libc:: c_uint ) ;
75
158
}
0 commit comments