@@ -1630,7 +1630,7 @@ HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::ExpandBuffer(
1630
1630
}
1631
1631
}
1632
1632
table_ = temporary_table;
1633
- Allocator::template BackingWriteBarrier (&table_);
1633
+ Allocator::BackingWriteBarrier (&table_);
1634
1634
1635
1635
HashTableBucketInitializer<Traits, Allocator, Value>::InitializeTable (
1636
1636
original_table, new_table_size);
@@ -1684,7 +1684,7 @@ Value* HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::RehashTo(
1684
1684
// This swaps the newly allocated buffer with the current one. The store to
1685
1685
// the current table has to be atomic to prevent races with concurrent marker.
1686
1686
AsAtomicPtr (&table_)->store (new_hash_table.table_ , std::memory_order_relaxed);
1687
- Allocator::template BackingWriteBarrier (&table_);
1687
+ Allocator::BackingWriteBarrier (&table_);
1688
1688
table_size_ = new_table_size;
1689
1689
1690
1690
new_hash_table.table_ = old_table;
@@ -1836,8 +1836,8 @@ void HashTable<Key, Value, Extractor, Traits, KeyTraits, Allocator>::swap(
1836
1836
// on the mutator thread, which is also the only one that writes to them, so
1837
1837
// there is *no* risk of data races when reading.
1838
1838
AtomicWriteSwap (table_, other.table_ );
1839
- Allocator::template BackingWriteBarrier (&table_);
1840
- Allocator::template BackingWriteBarrier (&other.table_ );
1839
+ Allocator::BackingWriteBarrier (&table_);
1840
+ Allocator::BackingWriteBarrier (&other.table_ );
1841
1841
if (IsWeak<ValueType>::value) {
1842
1842
// Weak processing is omitted when no backing store is present. In case such
1843
1843
// an empty table is later on used it needs to be strongified.
0 commit comments