Skip to content

Commit 4272765

Browse files
committed
auto merge of #4791 : jbclements/rust/demodeing-and-deGCing, r=jbclements,brson
r? It looks to me like the string_reader and tt_reader structs are GC pointers only because they predate the modern borrow system. This commit leaves the type names string_reader and tt_reader alone (they still refer to GC-ed pointers), but internally the functions now use borrowed pointers to refer to these structures. My guess would be that it's possible to move this change outward and not use the GCed pointers at all, but that change looks like it could be a larger one. Actually, I'm delighted at how quick this change was.
2 parents 6e9298a + 61827a7 commit 4272765

File tree

2 files changed

+70
-67
lines changed

2 files changed

+70
-67
lines changed

src/libsyntax/ext/tt/transcribe.rs

+25-23
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@ type tt_frame = @{
3939
up: tt_frame_up,
4040
};
4141

42-
pub type tt_reader = @{
42+
pub type tt_reader = @tt_reader_;
43+
pub type tt_reader_ = {
4344
sp_diag: span_handler,
4445
interner: @ident_interner,
4546
mut cur: tt_frame,
@@ -87,7 +88,7 @@ pure fn dup_tt_frame(&&f: tt_frame) -> tt_frame {
8788
}
8889
}
8990

90-
pub pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
91+
pub pure fn dup_tt_reader(r: &tt_reader_) -> tt_reader {
9192
@{sp_diag: r.sp_diag, interner: r.interner,
9293
mut cur: dup_tt_frame(r.cur),
9394
interpolations: r.interpolations,
@@ -96,7 +97,7 @@ pub pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
9697
}
9798

9899

99-
pure fn lookup_cur_matched_by_matched(r: tt_reader,
100+
pure fn lookup_cur_matched_by_matched(r: &tt_reader_,
100101
start: @named_match) -> @named_match {
101102
pure fn red(+ad: @named_match, idx: &uint) -> @named_match {
102103
match *ad {
@@ -110,15 +111,15 @@ pure fn lookup_cur_matched_by_matched(r: tt_reader,
110111
vec::foldl(start, r.repeat_idx, red)
111112
}
112113

113-
fn lookup_cur_matched(r: tt_reader, name: ident) -> @named_match {
114+
fn lookup_cur_matched(r: &tt_reader_, name: ident) -> @named_match {
114115
lookup_cur_matched_by_matched(r, r.interpolations.get(&name))
115116
}
116117
enum lis {
117118
lis_unconstrained, lis_constraint(uint, ident), lis_contradiction(~str)
118119
}
119120

120-
fn lockstep_iter_size(t: token_tree, r: tt_reader) -> lis {
121-
fn lis_merge(lhs: lis, rhs: lis, r: tt_reader) -> lis {
121+
fn lockstep_iter_size(t: token_tree, r: &tt_reader_) -> lis {
122+
fn lis_merge(lhs: lis, rhs: lis, r: &tt_reader_) -> lis {
122123
match lhs {
123124
lis_unconstrained => rhs,
124125
lis_contradiction(_) => lhs,
@@ -150,7 +151,7 @@ fn lockstep_iter_size(t: token_tree, r: tt_reader) -> lis {
150151
}
151152

152153

153-
pub fn tt_next_token(&&r: tt_reader) -> TokenAndSpan {
154+
pub fn tt_next_token(r: &tt_reader_) -> TokenAndSpan {
154155
let ret_val = TokenAndSpan { tok: r.cur_tok, sp: r.cur_span };
155156
while r.cur.idx >= r.cur.readme.len() {
156157
/* done with this set; pop or repeat? */
@@ -199,25 +200,26 @@ pub fn tt_next_token(&&r: tt_reader) -> TokenAndSpan {
199200
return ret_val;
200201
}
201202
tt_seq(sp, ref tts, ref sep, zerok) => {
202-
match lockstep_iter_size(tt_seq(sp, (*tts), (*sep), zerok), r) {
203-
lis_unconstrained => {
204-
r.sp_diag.span_fatal(
203+
match lockstep_iter_size(tt_seq(sp, (*tts), (*sep), zerok), r) {
204+
lis_unconstrained => {
205+
r.sp_diag.span_fatal(
205206
sp, /* blame macro writer */
206-
~"attempted to repeat an expression containing no syntax \
207-
variables matched as repeating at this depth");
208-
}
209-
lis_contradiction(ref msg) => {
210-
/* FIXME #2887 blame macro invoker instead*/
211-
r.sp_diag.span_fatal(sp, (*msg));
212-
}
213-
lis_constraint(len, _) => {
214-
if len == 0 {
215-
if !zerok {
207+
~"attempted to repeat an expression \
208+
containing no syntax \
209+
variables matched as repeating at this depth");
210+
}
211+
lis_contradiction(ref msg) => {
212+
/* FIXME #2887 blame macro invoker instead*/
213+
r.sp_diag.span_fatal(sp, (*msg));
214+
}
215+
lis_constraint(len, _) => {
216+
if len == 0 {
217+
if !zerok {
216218
r.sp_diag.span_fatal(sp, /* FIXME #2887 blame invoker
217-
*/
219+
*/
218220
~"this must repeat at least \
219-
once");
220-
}
221+
once");
222+
}
221223

222224
r.cur.idx += 1u;
223225
return tt_next_token(r);

src/libsyntax/parse/lexer.rs

+45-44
Original file line numberDiff line numberDiff line change
@@ -24,24 +24,25 @@ use core::either;
2424
use core::str;
2525
use core::u64;
2626

27-
pub use ext::tt::transcribe::{tt_reader, new_tt_reader};
27+
pub use ext::tt::transcribe::{tt_reader, tt_reader_, new_tt_reader};
2828

29-
use std;
29+
//use std;
3030

3131
pub trait reader {
32-
fn is_eof() -> bool;
33-
fn next_token() -> TokenAndSpan;
34-
fn fatal(~str) -> !;
35-
fn span_diag() -> span_handler;
36-
pure fn interner() -> @token::ident_interner;
37-
fn peek() -> TokenAndSpan;
38-
fn dup() -> reader;
32+
fn is_eof(&self) -> bool;
33+
fn next_token(&self) -> TokenAndSpan;
34+
fn fatal(&self,~str) -> !;
35+
fn span_diag(&self) -> span_handler;
36+
pure fn interner(&self) -> @token::ident_interner;
37+
fn peek(&self) -> TokenAndSpan;
38+
fn dup(&self) -> reader;
3939
}
4040

4141
#[deriving_eq]
4242
pub struct TokenAndSpan {tok: token::Token, sp: span}
4343

44-
pub type string_reader = @{
44+
pub type string_reader = @string_reader_;
45+
pub type string_reader_ = {
4546
span_diagnostic: span_handler,
4647
src: @~str,
4748
// The absolute offset within the codemap of the next character to read
@@ -90,7 +91,7 @@ pub fn new_low_level_string_reader(span_diagnostic: span_handler,
9091
// duplicating the string reader is probably a bad idea, in
9192
// that using them will cause interleaved pushes of line
9293
// offsets to the underlying filemap...
93-
fn dup_string_reader(&&r: string_reader) -> string_reader {
94+
fn dup_string_reader(r: &string_reader_) -> string_reader {
9495
@{span_diagnostic: r.span_diagnostic, src: r.src,
9596
mut pos: r.pos,
9697
mut last_pos: r.last_pos,
@@ -99,28 +100,28 @@ fn dup_string_reader(&&r: string_reader) -> string_reader {
99100
mut peek_tok: r.peek_tok, mut peek_span: r.peek_span}
100101
}
101102

102-
impl string_reader: reader {
103-
fn is_eof() -> bool { is_eof(self) }
103+
impl string_reader_: reader {
104+
fn is_eof(&self) -> bool { is_eof(self) }
104105
// return the next token. EFFECT: advances the string_reader.
105-
fn next_token() -> TokenAndSpan {
106+
fn next_token(&self) -> TokenAndSpan {
106107
let ret_val = TokenAndSpan {tok: self.peek_tok, sp: self.peek_span};
107108
string_advance_token(self);
108109
return ret_val;
109110
}
110-
fn fatal(m: ~str) -> ! {
111+
fn fatal(&self, m: ~str) -> ! {
111112
self.span_diagnostic.span_fatal(copy self.peek_span, m)
112113
}
113-
fn span_diag() -> span_handler { self.span_diagnostic }
114-
pure fn interner() -> @token::ident_interner { self.interner }
115-
fn peek() -> TokenAndSpan {
114+
fn span_diag(&self) -> span_handler { self.span_diagnostic }
115+
pure fn interner(&self) -> @token::ident_interner { self.interner }
116+
fn peek(&self) -> TokenAndSpan {
116117
TokenAndSpan {tok: self.peek_tok, sp: self.peek_span}
117118
}
118-
fn dup() -> reader { dup_string_reader(self) as reader }
119+
fn dup(&self) -> reader { dup_string_reader(self) as reader }
119120
}
120121

121-
pub impl tt_reader: reader {
122-
fn is_eof() -> bool { self.cur_tok == token::EOF }
123-
fn next_token() -> TokenAndSpan {
122+
pub impl tt_reader_: reader {
123+
fn is_eof(&self) -> bool { self.cur_tok == token::EOF }
124+
fn next_token(&self) -> TokenAndSpan {
124125
/* weird resolve bug: if the following `if`, or any of its
125126
statements are removed, we get resolution errors */
126127
if false {
@@ -129,19 +130,19 @@ pub impl tt_reader: reader {
129130
}
130131
tt_next_token(self)
131132
}
132-
fn fatal(m: ~str) -> ! {
133+
fn fatal(&self, m: ~str) -> ! {
133134
self.sp_diag.span_fatal(copy self.cur_span, m);
134135
}
135-
fn span_diag() -> span_handler { self.sp_diag }
136-
pure fn interner() -> @token::ident_interner { self.interner }
137-
fn peek() -> TokenAndSpan {
136+
fn span_diag(&self) -> span_handler { self.sp_diag }
137+
pure fn interner(&self) -> @token::ident_interner { self.interner }
138+
fn peek(&self) -> TokenAndSpan {
138139
TokenAndSpan { tok: self.cur_tok, sp: self.cur_span }
139140
}
140-
fn dup() -> reader { dup_tt_reader(self) as reader }
141+
fn dup(&self) -> reader { dup_tt_reader(self) as reader }
141142
}
142143

143144
// EFFECT: advance peek_tok and peek_span to refer to the next token.
144-
fn string_advance_token(&&r: string_reader) {
145+
fn string_advance_token(r: &string_reader_) {
145146
match (consume_whitespace_and_comments(r)) {
146147
Some(comment) => {
147148
r.peek_tok = comment.tok;
@@ -159,11 +160,11 @@ fn string_advance_token(&&r: string_reader) {
159160
}
160161
}
161162

162-
fn byte_offset(rdr: string_reader) -> BytePos {
163+
fn byte_offset(rdr: &string_reader_) -> BytePos {
163164
(rdr.pos - rdr.filemap.start_pos)
164165
}
165166

166-
pub fn get_str_from(rdr: string_reader, start: BytePos) -> ~str {
167+
pub fn get_str_from(rdr: &string_reader_, start: BytePos) -> ~str {
167168
unsafe {
168169
// I'm pretty skeptical about this subtraction. What if there's a
169170
// multi-byte character before the mark?
@@ -174,7 +175,7 @@ pub fn get_str_from(rdr: string_reader, start: BytePos) -> ~str {
174175

175176
// EFFECT: advance the StringReader by one character. If a newline is
176177
// discovered, add it to the FileMap's list of line start offsets.
177-
pub fn bump(rdr: string_reader) {
178+
pub fn bump(rdr: &string_reader_) {
178179
rdr.last_pos = rdr.pos;
179180
let current_byte_offset = byte_offset(rdr).to_uint();;
180181
if current_byte_offset < (*rdr.src).len() {
@@ -198,10 +199,10 @@ pub fn bump(rdr: string_reader) {
198199
rdr.curr = -1 as char;
199200
}
200201
}
201-
pub fn is_eof(rdr: string_reader) -> bool {
202+
pub fn is_eof(rdr: &string_reader_) -> bool {
202203
rdr.curr == -1 as char
203204
}
204-
pub fn nextch(rdr: string_reader) -> char {
205+
pub fn nextch(rdr: &string_reader_) -> char {
205206
let offset = byte_offset(rdr).to_uint();
206207
if offset < (*rdr.src).len() {
207208
return str::char_at(*rdr.src, offset);
@@ -246,7 +247,7 @@ fn is_bin_digit(c: char) -> bool { return c == '0' || c == '1'; }
246247

247248
// EFFECT: eats whitespace and comments.
248249
// returns a Some(sugared-doc-attr) if one exists, None otherwise.
249-
fn consume_whitespace_and_comments(rdr: string_reader)
250+
fn consume_whitespace_and_comments(rdr: &string_reader_)
250251
-> Option<TokenAndSpan> {
251252
while is_whitespace(rdr.curr) { bump(rdr); }
252253
return consume_any_line_comment(rdr);
@@ -255,7 +256,7 @@ fn consume_whitespace_and_comments(rdr: string_reader)
255256
// PRECONDITION: rdr.curr is not whitespace
256257
// EFFECT: eats any kind of comment.
257258
// returns a Some(sugared-doc-attr) if one exists, None otherwise
258-
fn consume_any_line_comment(rdr: string_reader)
259+
fn consume_any_line_comment(rdr: &string_reader_)
259260
-> Option<TokenAndSpan> {
260261
if rdr.curr == '/' {
261262
match nextch(rdr) {
@@ -298,7 +299,7 @@ fn consume_any_line_comment(rdr: string_reader)
298299
}
299300

300301
// might return a sugared-doc-attr
301-
fn consume_block_comment(rdr: string_reader)
302+
fn consume_block_comment(rdr: &string_reader_)
302303
-> Option<TokenAndSpan> {
303304

304305
// block comments starting with "/**" or "/*!" are doc-comments
@@ -337,7 +338,7 @@ fn consume_block_comment(rdr: string_reader)
337338
return consume_whitespace_and_comments(rdr);
338339
}
339340

340-
fn scan_exponent(rdr: string_reader) -> Option<~str> {
341+
fn scan_exponent(rdr: &string_reader_) -> Option<~str> {
341342
let mut c = rdr.curr;
342343
let mut rslt = ~"";
343344
if c == 'e' || c == 'E' {
@@ -355,7 +356,7 @@ fn scan_exponent(rdr: string_reader) -> Option<~str> {
355356
} else { return None::<~str>; }
356357
}
357358

358-
fn scan_digits(rdr: string_reader, radix: uint) -> ~str {
359+
fn scan_digits(rdr: &string_reader_, radix: uint) -> ~str {
359360
let mut rslt = ~"";
360361
loop {
361362
let c = rdr.curr;
@@ -370,7 +371,7 @@ fn scan_digits(rdr: string_reader, radix: uint) -> ~str {
370371
};
371372
}
372373

373-
fn scan_number(c: char, rdr: string_reader) -> token::Token {
374+
fn scan_number(c: char, rdr: &string_reader_) -> token::Token {
374375
let mut num_str, base = 10u, c = c, n = nextch(rdr);
375376
if c == '0' && n == 'x' {
376377
bump(rdr);
@@ -486,7 +487,7 @@ fn scan_number(c: char, rdr: string_reader) -> token::Token {
486487
}
487488
}
488489

489-
fn scan_numeric_escape(rdr: string_reader, n_hex_digits: uint) -> char {
490+
fn scan_numeric_escape(rdr: &string_reader_, n_hex_digits: uint) -> char {
490491
let mut accum_int = 0, i = n_hex_digits;
491492
while i != 0u {
492493
let n = rdr.curr;
@@ -501,7 +502,7 @@ fn scan_numeric_escape(rdr: string_reader, n_hex_digits: uint) -> char {
501502
return accum_int as char;
502503
}
503504

504-
fn next_token_inner(rdr: string_reader) -> token::Token {
505+
fn next_token_inner(rdr: &string_reader_) -> token::Token {
505506
let mut accum_str = ~"";
506507
let mut c = rdr.curr;
507508
if (c >= 'a' && c <= 'z')
@@ -526,7 +527,7 @@ fn next_token_inner(rdr: string_reader) -> token::Token {
526527
if is_dec_digit(c) {
527528
return scan_number(c, rdr);
528529
}
529-
fn binop(rdr: string_reader, op: token::binop) -> token::Token {
530+
fn binop(rdr: &string_reader_, op: token::binop) -> token::Token {
530531
bump(rdr);
531532
if rdr.curr == '=' {
532533
bump(rdr);
@@ -719,7 +720,7 @@ fn next_token_inner(rdr: string_reader) -> token::Token {
719720
}
720721
}
721722

722-
fn consume_whitespace(rdr: string_reader) {
723+
fn consume_whitespace(rdr: &string_reader_) {
723724
while is_whitespace(rdr.curr) && !is_eof(rdr) { bump(rdr); }
724725
}
725726

@@ -730,7 +731,7 @@ pub mod test {
730731
use util::interner;
731732
use diagnostic;
732733
use util::testing::{check_equal, check_equal_ptr};
733-
#[test] fn t1 () {
734+
#[tetst] fn t1 () {
734735
let teststr =
735736
@~"/* my source file */
736737
fn main() { io::println(~\"zebra\"); }\n";

0 commit comments

Comments
 (0)