@@ -36,6 +36,29 @@ pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
36
36
f ( )
37
37
}
38
38
39
+ /// Copies the data from an iterator into the arena
40
+ // SAFETY: the caller must ensure the destination memory region is large enough and is free.
41
+ #[ inline]
42
+ unsafe fn write_from_iter < ' a , T > (
43
+ mut iter : impl Iterator < Item = T > ,
44
+ expected_len : usize ,
45
+ mem : * mut T ,
46
+ ) -> & ' a mut [ T ] {
47
+ let mut i = 0 ;
48
+ // Use a manual loop since LLVM manages to optimize it better for
49
+ // slice iterators
50
+ loop {
51
+ let value = iter. next ( ) ;
52
+ if i >= expected_len || value. is_none ( ) {
53
+ // We only return as many items as the iterator gave us, even
54
+ // though it was supposed to give us `len`
55
+ return slice:: from_raw_parts_mut ( mem, i) ;
56
+ }
57
+ ptr:: write ( mem. add ( i) , value. unwrap ( ) ) ;
58
+ i += 1 ;
59
+ }
60
+ }
61
+
39
62
/// An arena that can hold objects of only one type.
40
63
pub struct TypedArena < T > {
41
64
/// A pointer to the next object to be allocated.
@@ -105,9 +128,10 @@ const PAGE: usize = 4096;
105
128
const HUGE_PAGE : usize = 2 * 1024 * 1024 ;
106
129
107
130
impl < T > Default for TypedArena < T > {
108
- /// Creates a new `TypedArena`.
109
- fn default ( ) -> TypedArena < T > {
110
- TypedArena {
131
+ /// Creates a new `BaseTypedArena`.
132
+ #[ inline]
133
+ fn default ( ) -> Self {
134
+ Self {
111
135
// We set both `ptr` and `end` to 0 so that the first call to
112
136
// alloc() will trigger a grow().
113
137
ptr : Cell :: new ( ptr:: null_mut ( ) ) ,
@@ -261,7 +285,32 @@ impl<T> TypedArena<T> {
261
285
#[ inline]
262
286
pub fn alloc_from_iter < I : IntoIterator < Item = T > > ( & self , iter : I ) -> & mut [ T ] {
263
287
assert ! ( mem:: size_of:: <T >( ) != 0 ) ;
264
- iter. alloc_from_iter ( self )
288
+ let iter = iter. into_iter ( ) ;
289
+
290
+ if mem:: needs_drop :: < T > ( ) {
291
+ iter. alloc_from_iter ( self )
292
+ } else {
293
+ let size_hint = iter. size_hint ( ) ;
294
+
295
+ match size_hint {
296
+ ( min, Some ( max) ) if min == max => {
297
+ // We know the exact number of elements the iterator will produce here
298
+ let len = min;
299
+
300
+ if len == 0 {
301
+ return & mut [ ] ;
302
+ }
303
+
304
+ // Safety: T doesn't need drop, so a panicking iterator doesn't result in
305
+ // dropping uninitialized memory
306
+ unsafe {
307
+ let start_ptr = self . alloc_raw_slice ( len) ;
308
+ write_from_iter ( iter, len, start_ptr)
309
+ }
310
+ }
311
+ _ => cold_path ( || iter. alloc_from_iter ( self ) ) ,
312
+ }
313
+ }
265
314
}
266
315
267
316
/// Grows the arena.
@@ -489,28 +538,6 @@ impl DroplessArena {
489
538
}
490
539
}
491
540
492
- #[ inline]
493
- unsafe fn write_from_iter < T , I : Iterator < Item = T > > (
494
- & self ,
495
- mut iter : I ,
496
- len : usize ,
497
- mem : * mut T ,
498
- ) -> & mut [ T ] {
499
- let mut i = 0 ;
500
- // Use a manual loop since LLVM manages to optimize it better for
501
- // slice iterators
502
- loop {
503
- let value = iter. next ( ) ;
504
- if i >= len || value. is_none ( ) {
505
- // We only return as many items as the iterator gave us, even
506
- // though it was supposed to give us `len`
507
- return slice:: from_raw_parts_mut ( mem, i) ;
508
- }
509
- ptr:: write ( mem. add ( i) , value. unwrap ( ) ) ;
510
- i += 1 ;
511
- }
512
- }
513
-
514
541
#[ inline]
515
542
pub fn alloc_from_iter < T , I : IntoIterator < Item = T > > ( & self , iter : I ) -> & mut [ T ] {
516
543
let iter = iter. into_iter ( ) ;
@@ -529,7 +556,7 @@ impl DroplessArena {
529
556
}
530
557
531
558
let mem = self . alloc_raw ( Layout :: array :: < T > ( len) . unwrap ( ) ) as * mut T ;
532
- unsafe { self . write_from_iter ( iter, len, mem) }
559
+ unsafe { write_from_iter ( iter, len, mem) }
533
560
}
534
561
( _, _) => {
535
562
cold_path ( move || -> & mut [ T ] {
0 commit comments