I have this deref specialization code:
trait Foo { fn bar(&mut self) {} }
impl Foo for () {}
impl<T> Foo for &mut T where T: Foo {}
struct S<'v, T> { value: &'v mut T }
impl<'v, T> Foo for S<'v, T> {
fn bar(&mut self) {
struct DerefWrap<T>(T);
struct Wrap<T>(T);
impl<T> std::ops::Deref for DerefWrap<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> std::ops::DerefMut for DerefWrap<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: Foo> Foo for DerefWrap<Wrap<&mut T>> {
fn bar(&mut self) {
println!("ok")
}
}
impl<T> Foo for Wrap<T> {
fn bar(&mut self) {
println!("nope")
}
}
let mut value = &mut ();
DerefWrap(Wrap(&mut value)).bar(); // Works
DerefWrap(Wrap(&mut self.value)).bar(); // Doesn't work. Why?
}
}
fn main() {
let mut x = S { value: &mut () };
x.bar();
}
Why does the first call to bar
work (Print ok
), while the second one doesn't? (It prints nope
.) The parameters are the same: &mut &mut ()
vs &mut &mut ()
. Is there some obscure difference between member data and let bindings I haven't considered?
Thank you.