At the moment, many of the core assertions in the standard library make use of the unreachable macro, which is simply an indirection for panic. This is totally fine, but I would like to begin an initiative on upgrading some of these invocations as core::hint::unreachable_unchecked so that we may (at least) give the compiler a chance to optimize the code better.
Below is a non-exhaustive list of possible candidates. I will be editing the issue as more candidates are introduced and as certain examples are marked 100% safe or not.
Merged
None so far.
Definitely Safe
This is the perfect candidate for core::hint::unreachable_unchecked since it is apparent that self is now the Cow::Owned variant.
|
#[stable(feature = "rust1", since = "1.0.0")] |
|
pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned { |
|
match *self { |
|
Borrowed(borrowed) => { |
|
*self = Owned(borrowed.to_owned()); |
|
match *self { |
|
Borrowed(..) => unreachable!(), |
|
Owned(ref mut owned) => owned, |
|
} |
|
} |
|
Owned(ref mut owned) => owned, |
|
} |
|
} |
Possible Candidates
|
#[unstable(feature = "once_cell", issue = "74465")] |
|
impl<T: Clone> Clone for OnceCell<T> { |
|
fn clone(&self) -> OnceCell<T> { |
|
let res = OnceCell::new(); |
|
if let Some(value) = self.get() { |
|
match res.set(value.clone()) { |
|
Ok(()) => (), |
|
Err(_) => unreachable!(), |
|
} |
|
} |
|
res |
|
} |
|
} |
|
fn clone(&self) -> SyncOnceCell<T> { |
|
let cell = Self::new(); |
|
if let Some(value) = self.get() { |
|
match cell.set(value.clone()) { |
|
Ok(()) => (), |
|
Err(_) => unreachable!(), |
|
} |
|
} |
|
cell |
|
} |
|
fn from(value: T) -> Self { |
|
let cell = Self::new(); |
|
match cell.set(value) { |
|
Ok(()) => cell, |
|
Err(_) => unreachable!(), |
|
} |
|
} |
|
// Initialization is done. |
|
Err(DONE) => {} |
|
// Not possible, these are one-use channels |
|
DATA => unreachable!(), |
|
match (&mut *self.data.get()).take() { |
|
Some(data) => Ok(data), |
|
None => unreachable!(), |
|
} |
|
// We are the sole receiver; there cannot be a blocking |
|
// receiver already. |
|
_ => unreachable!(), |
|
// We're the only ones that can block on this port |
|
_ => unreachable!(), |
|
// Now that we've got ownership of our state, figure out what to do |
|
// about it. |
|
match state { |
|
EMPTY => unreachable!(), |
|
// There was a thread waiting, just pass the lock |
|
if let NotifiedTcs::Single(tcs) = guard.notified_tcs() { |
|
guard.lock_var_mut().owner = Some(tcs) |
|
} else { |
|
unreachable!() // called notify_one |
|
} |
|
unsafe fn __iterator_get_unchecked(&mut self, _idx: usize) -> Self::Item |
|
where |
|
Self: TrustedRandomAccessNoCoerce, |
|
{ |
|
unreachable!("Always specialized"); |
|
} |
|
// One or more readers were waiting, pass the lock to them |
|
if let NotifiedTcs::All { count } = rguard.notified_tcs() { |
|
*rguard.lock_var_mut() = Some(count) |
|
} else { |
|
unreachable!() // called notify_all |
|
} |
|
match (self.a.next_back(), self.b.next_back()) { |
|
(Some(x), Some(y)) => Some((x, y)), |
|
(None, None) => None, |
|
_ => unreachable!(), |
|
} |
|
default unsafe fn get_unchecked(&mut self, _idx: usize) -> <Self as Iterator>::Item |
|
where |
|
Self: TrustedRandomAccessNoCoerce, |
|
{ |
|
unreachable!("Always specialized"); |
|
} |
|
// Now that we've determined that this queue "has data", we peek at the |
|
// queue to see if the data is an upgrade or not. If it's an upgrade, |
|
// then we need to destroy this port and abort selection on the |
|
// upgraded port. |
|
if has_data { |
|
match self.queue.peek() { |
|
Some(&mut GoUp(..)) => match self.queue.pop() { |
|
Some(GoUp(port)) => Err(port), |
|
_ => unreachable!(), |
|
}, |
|
_ => Ok(true), |
|
} |
|
} else { |
|
Ok(false) |
|
} |
|
} |
|
match self.queue.pop() { |
|
mpsc::Data(t) => Ok(t), |
|
mpsc::Empty => Err(Disconnected), |
|
// with no senders, an inconsistency is impossible. |
|
mpsc::Inconsistent => unreachable!(), |
|
} |
|
match result { |
|
CopyResult::Ended(bytes_copied) => return Ok(bytes_copied + written), |
|
CopyResult::Error(e, _) => return Err(e), |
|
CopyResult::Fallback(0) => { /* use the fallback below */ } |
|
CopyResult::Fallback(_) => { |
|
unreachable!("splice should not return > 0 bytes on the fallback path") |
|
} |
|
State::Done => unreachable!(), |
|
match segments[5] { |
|
// IPv4 Compatible address |
|
0 => write!(f, "::{}", ipv4), |
|
// IPv4 Mapped address |
|
0xffff => write!(f, "::ffff:{}", ipv4), |
|
_ => unreachable!(), |
|
match (split_edge.force(), right_node.force()) { |
|
(Internal(edge), Internal(node)) => { |
|
left_node = edge.descend(); |
|
right_node = node.first_edge().descend(); |
|
} |
|
(Leaf(_), Leaf(_)) => break, |
|
_ => unreachable!(), |
|
} |
|
let mut out_node = match root.borrow_mut().force() { |
|
Leaf(leaf) => leaf, |
|
Internal(_) => unreachable!(), |
|
}; |
|
Err(_) => unreachable!("empty internal node"), |
|
match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) { |
|
(ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => { |
|
// Make room for stolen edges. |
|
slice_shr(right.edge_area_mut(..new_right_len + 1), count); |
|
|
|
// Steal edges. |
|
move_to_slice( |
|
left.edge_area_mut(new_left_len + 1..old_left_len + 1), |
|
right.edge_area_mut(..count), |
|
); |
|
|
|
right.correct_childrens_parent_links(0..new_right_len + 1); |
|
} |
|
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} |
|
_ => unreachable!(), |
|
} |
|
match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) { |
|
(ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => { |
|
// Steal edges. |
|
move_to_slice( |
|
right.edge_area_mut(..count), |
|
left.edge_area_mut(old_left_len + 1..new_left_len + 1), |
|
); |
|
|
|
// Fill gap where stolen edges used to be. |
|
slice_shl(right.edge_area_mut(..old_right_len + 1), count); |
|
|
|
left.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1); |
|
right.correct_childrens_parent_links(0..new_right_len + 1); |
|
} |
|
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} |
|
_ => unreachable!(), |
|
} |
|
match (left_node.force(), right_node.force()) { |
|
(ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => { |
|
move_to_slice( |
|
left.edge_area_mut(new_left_len + 1..old_left_len + 1), |
|
right.edge_area_mut(1..new_right_len + 1), |
|
); |
|
right.correct_childrens_parent_links(1..new_right_len + 1); |
|
} |
|
(ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} |
|
_ => unreachable!(), |
|
} |
|
match mem::replace(&mut guard.blocker, f(signal_token)) { |
|
NoneBlocked => {} |
|
_ => unreachable!(), |
|
} |
|
Flavor::Sync(..) => unreachable!(), |
|
BlockedSender(..) => unreachable!(), |
|
// If this is a no-buffer channel (cap == 0), then if we didn't wait we |
|
// need to ACK the sender. If we waited, then the sender waking us up |
|
// was already the ACK. |
|
let pending_sender2 = if guard.cap == 0 && !waited { |
|
match mem::replace(&mut guard.blocker, NoneBlocked) { |
|
NoneBlocked => None, |
|
BlockedReceiver(..) => unreachable!(), |
|
BlockedSender(token) => { |
|
guard.canceled.take(); |
|
Some(token) |
|
} |
|
} |
|
} else { |
|
None |
|
}; |
|
match mem::replace(&mut guard.blocker, NoneBlocked) { |
|
NoneBlocked => {} |
|
BlockedSender(..) => unreachable!(), |
|
BlockedReceiver(token) => wakeup(token, guard), |
|
} |
|
let waiter = match mem::replace(&mut guard.blocker, NoneBlocked) { |
|
NoneBlocked => None, |
|
BlockedSender(token) => { |
|
*guard.canceled.take().unwrap() = true; |
|
Some(token) |
|
} |
|
BlockedReceiver(..) => unreachable!(), |
|
}; |
|
let new_port = match *unsafe { self.inner() } { |
|
Flavor::Oneshot(ref p) => match p.recv(None) { |
|
Ok(t) => return Ok(t), |
|
Err(oneshot::Disconnected) => return Err(RecvError), |
|
Err(oneshot::Upgraded(rx)) => rx, |
|
Err(oneshot::Empty) => unreachable!(), |
|
}, |
|
Flavor::Stream(ref p) => match p.recv(None) { |
|
Ok(t) => return Ok(t), |
|
Err(stream::Disconnected) => return Err(RecvError), |
|
Err(stream::Upgraded(rx)) => rx, |
|
Err(stream::Empty) => unreachable!(), |
|
}, |
|
Flavor::Shared(ref p) => match p.recv(None) { |
|
Ok(t) => return Ok(t), |
|
Err(shared::Disconnected) => return Err(RecvError), |
|
Err(shared::Empty) => unreachable!(), |
|
}, |
|
Flavor::Sync(ref p) => return p.recv(None).map_err(|_| RecvError), |
|
}; |
Must Remain unreachable
None so far.
Action
I would love to send in a PR that resolves this issue! However, I do need some help and guidance from the library maintainers to ensure that undefined behavior is 110% avoided. Feedback on which parts are safe regardless of platform/architecture/hardware is much appreciated.
As mentioned earlier, I will be editing this issue as more people chime in. Thanks!
At the moment, many of the core assertions in the standard library make use of the
unreachablemacro, which is simply an indirection forpanic. This is totally fine, but I would like to begin an initiative on upgrading some of these invocations ascore::hint::unreachable_uncheckedso that we may (at least) give the compiler a chance to optimize the code better.Below is a non-exhaustive list of possible candidates. I will be editing the issue as more candidates are introduced and as certain examples are marked 100% safe or not.
Merged
None so far.
Definitely Safe
This is the perfect candidate for
core::hint::unreachable_uncheckedsince it is apparent thatselfis now theCow::Ownedvariant.rust/library/alloc/src/borrow.rs
Lines 275 to 287 in 97f2698
Possible Candidates
rust/library/core/src/lazy.rs
Lines 52 to 64 in 673d0db
rust/library/std/src/lazy.rs
Lines 121 to 130 in adf1688
rust/library/std/src/lazy.rs
Lines 152 to 158 in adf1688
rust/library/std/src/sys/sgx/abi/mod.rs
Lines 51 to 52 in 17f30e5
rust/library/std/src/sync/mpsc/oneshot.rs
Lines 105 to 106 in 673d0db
rust/library/std/src/sync/mpsc/oneshot.rs
Lines 170 to 173 in 673d0db
rust/library/std/src/sync/mpsc/oneshot.rs
Lines 188 to 190 in 673d0db
rust/library/std/src/sync/mpsc/oneshot.rs
Lines 251 to 252 in 673d0db
rust/library/std/src/sync/mpsc/oneshot.rs
Lines 278 to 281 in 673d0db
rust/library/std/src/sys/sgx/mutex.rs
Lines 113 to 118 in a28109a
rust/library/core/src/iter/traits/iterator.rs
Lines 3444 to 3449 in 3ed6c1d
rust/library/std/src/sys/sgx/rwlock.rs
Lines 134 to 139 in c5fbcd3
rust/library/core/src/iter/adapters/zip.rs
Lines 182 to 186 in a49e38e
rust/library/core/src/iter/adapters/zip.rs
Lines 219 to 224 in a49e38e
rust/library/std/src/sync/mpsc/stream.rs
Lines 429 to 444 in fe1c942
rust/library/std/src/sync/mpsc/shared.rs
Lines 338 to 343 in 673d0db
rust/library/std/src/sys/unix/kernel_copy.rs
Lines 201 to 207 in dfd7b8d
rust/library/std/src/path.rs
Line 912 in 2ad56d5
rust/library/std/src/net/ip.rs
Lines 1679 to 1684 in a49e38e
rust/library/alloc/src/collections/btree/split.rs
Lines 49 to 56 in 673d0db
rust/library/alloc/src/collections/btree/map.rs
Lines 184 to 187 in 3ed6c1d
rust/library/alloc/src/collections/btree/node.rs
Line 1222 in 23461b2
rust/library/alloc/src/collections/btree/node.rs
Lines 1424 to 1439 in 23461b2
rust/library/alloc/src/collections/btree/node.rs
Lines 1487 to 1503 in 23461b2
rust/library/alloc/src/collections/btree/node.rs
Lines 1599 to 1609 in 23461b2
rust/library/std/src/sync/mpsc/sync.rs
Lines 111 to 114 in 673d0db
rust/library/std/src/sync/mpsc/mod.rs
Line 828 in 607d6b0
rust/library/std/src/sync/mpsc/sync.rs
Line 243 in 673d0db
rust/library/std/src/sync/mpsc/sync.rs
Lines 328 to 342 in 673d0db
rust/library/std/src/sync/mpsc/sync.rs
Lines 378 to 382 in 673d0db
rust/library/std/src/sync/mpsc/sync.rs
Lines 402 to 409 in 673d0db
rust/library/std/src/sync/mpsc/mod.rs
Lines 1157 to 1176 in 607d6b0
Must Remain
unreachableNone so far.
Action
I would love to send in a PR that resolves this issue! However, I do need some help and guidance from the library maintainers to ensure that undefined behavior is 110% avoided. Feedback on which parts are safe regardless of platform/architecture/hardware is much appreciated.
As mentioned earlier, I will be editing this issue as more people chime in. Thanks!