@@ -1907,6 +1907,32 @@ impl<T> RawIterRange<T> {
1907
1907
}
1908
1908
}
1909
1909
}
1910
+
1911
+ /// # Safety
1912
+ /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate
1913
+ /// after yielding all elements.
1914
+ #[ cfg_attr( feature = "inline-more" , inline) ]
1915
+ unsafe fn next_impl < const DO_CHECK_PTR_RANGE : bool > ( & mut self ) -> Option < Bucket < T > > {
1916
+ loop {
1917
+ if let Some ( index) = self . current_group . lowest_set_bit ( ) {
1918
+ self . current_group = self . current_group . remove_lowest_bit ( ) ;
1919
+ return Some ( self . data . next_n ( index) ) ;
1920
+ }
1921
+
1922
+ if DO_CHECK_PTR_RANGE && self . next_ctrl >= self . end {
1923
+ return None ;
1924
+ }
1925
+
1926
+ // We might read past self.end up to the next group boundary,
1927
+ // but this is fine because it only occurs on tables smaller
1928
+ // than the group size where the trailing control bytes are all
1929
+ // EMPTY. On larger tables self.end is guaranteed to be aligned
1930
+ // to the group size (since tables are power-of-two sized).
1931
+ self . current_group = Group :: load_aligned ( self . next_ctrl ) . match_full ( ) ;
1932
+ self . data = self . data . next_n ( Group :: WIDTH ) ;
1933
+ self . next_ctrl = self . next_ctrl . add ( Group :: WIDTH ) ;
1934
+ }
1935
+ }
1910
1936
}
1911
1937
1912
1938
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
@@ -1932,25 +1958,8 @@ impl<T> Iterator for RawIterRange<T> {
1932
1958
#[ cfg_attr( feature = "inline-more" , inline) ]
1933
1959
fn next ( & mut self ) -> Option < Bucket < T > > {
1934
1960
unsafe {
1935
- loop {
1936
- if let Some ( index) = self . current_group . lowest_set_bit ( ) {
1937
- self . current_group = self . current_group . remove_lowest_bit ( ) ;
1938
- return Some ( self . data . next_n ( index) ) ;
1939
- }
1940
-
1941
- if self . next_ctrl >= self . end {
1942
- return None ;
1943
- }
1944
-
1945
- // We might read past self.end up to the next group boundary,
1946
- // but this is fine because it only occurs on tables smaller
1947
- // than the group size where the trailing control bytes are all
1948
- // EMPTY. On larger tables self.end is guaranteed to be aligned
1949
- // to the group size (since tables are power-of-two sized).
1950
- self . current_group = Group :: load_aligned ( self . next_ctrl ) . match_full ( ) ;
1951
- self . data = self . data . next_n ( Group :: WIDTH ) ;
1952
- self . next_ctrl = self . next_ctrl . add ( Group :: WIDTH ) ;
1953
- }
1961
+ // SAFETY: We set checker flag to true.
1962
+ self . next_impl :: < true > ( )
1954
1963
}
1955
1964
}
1956
1965
@@ -2128,16 +2137,22 @@ impl<T> Iterator for RawIter<T> {
2128
2137
2129
2138
#[ cfg_attr( feature = "inline-more" , inline) ]
2130
2139
fn next ( & mut self ) -> Option < Bucket < T > > {
2131
- if let Some ( b) = self . iter . next ( ) {
2140
+ // Inner iterator iterates over buckets
2141
+ // so it can do unnecessary work if we already yielded all items.
2142
+ if self . items == 0 {
2143
+ return None ;
2144
+ }
2145
+
2146
+ let nxt = unsafe {
2147
+ // SAFETY: We check number of items to yield using `items` field.
2148
+ self . iter . next_impl :: < false > ( )
2149
+ } ;
2150
+
2151
+ if nxt. is_some ( ) {
2132
2152
self . items -= 1 ;
2133
- Some ( b)
2134
- } else {
2135
- // We don't check against items == 0 here to allow the
2136
- // compiler to optimize away the item count entirely if the
2137
- // iterator length is never queried.
2138
- debug_assert_eq ! ( self . items, 0 ) ;
2139
- None
2140
2153
}
2154
+
2155
+ nxt
2141
2156
}
2142
2157
2143
2158
#[ inline]
0 commit comments