@@ -453,7 +453,46 @@ impl RwLock {
453
453
454
454
#[ inline]
455
455
pub unsafe fn downgrade ( & self ) {
456
- todo ! ( )
456
+ // Atomically set to read-locked with a single reader, without any waiting threads.
457
+ if let Err ( mut state) = self . state . compare_exchange (
458
+ without_provenance_mut ( LOCKED ) ,
459
+ without_provenance_mut ( LOCKED | SINGLE ) ,
460
+ Release ,
461
+ Relaxed ,
462
+ ) {
463
+ // Attempt to grab the queue lock.
464
+ loop {
465
+ let next = state. map_addr ( |addr| addr | QUEUE_LOCKED ) ;
466
+ match self . state . compare_exchange ( state, next, AcqRel , Relaxed ) {
467
+ Err ( new_state) => state = new_state,
468
+ Ok ( new_state) => {
469
+ assert_eq ! (
470
+ new_state. mask( !MASK ) . addr( ) ,
471
+ LOCKED | QUEUED | QUEUE_LOCKED ,
472
+ "{:p}" ,
473
+ new_state
474
+ ) ;
475
+ state = new_state;
476
+ break ;
477
+ }
478
+ }
479
+ }
480
+
481
+ assert_eq ! ( state. mask( !MASK ) . addr( ) , LOCKED | QUEUED | QUEUE_LOCKED ) ;
482
+
483
+ // SAFETY: We have the queue lock so all safety contracts are fulfilled.
484
+ let tail = unsafe { add_backlinks_and_find_tail ( to_node ( state) ) . as_ref ( ) } ;
485
+
486
+ // Increment the reader count from 0 to 1.
487
+ assert_eq ! (
488
+ tail. next. 0 . fetch_byte_add( SINGLE , AcqRel ) . addr( ) ,
489
+ 0 ,
490
+ "Reader count was not zero while we had the write lock"
491
+ ) ;
492
+
493
+ // Release the queue lock.
494
+ self . state . fetch_byte_sub ( QUEUE_LOCKED , Release ) ;
495
+ }
457
496
}
458
497
459
498
/// # Safety
@@ -547,6 +586,7 @@ impl RwLock {
547
586
loop {
548
587
let prev = unsafe { current. as_ref ( ) . prev . get ( ) } ;
549
588
unsafe {
589
+ // There must be threads waiting.
550
590
Node :: complete ( current) ;
551
591
}
552
592
match prev {
0 commit comments