veloren_common/util/
dir.rs

1use crate::comp::Ori;
2
3use super::{Plane, Projection};
4use rand::Rng;
5use serde::{Deserialize, Serialize};
6use tracing::warn;
7use vek::*;
8
9/// Type representing a direction using Vec3 that is normalized and NaN free
10/// These properties are enforced actively via panics when `debug_assertions` is
11/// enabled
12#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
13#[serde(into = "SerdeDir")]
14#[serde(from = "SerdeDir")]
15pub struct Dir(Vec3<f32>);
16impl Default for Dir {
17    fn default() -> Self { Self::forward() }
18}
19
20// Validate at Deserialization
21#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
22struct SerdeDir(Vec3<f32>);
23impl From<SerdeDir> for Dir {
24    fn from(dir: SerdeDir) -> Self {
25        let dir = dir.0;
26        if dir.map(f32::is_nan).reduce_or() {
27            warn!(
28                ?dir,
29                "Deserialized dir containing NaNs, replacing with default"
30            );
31            Default::default()
32        } else if !dir.is_normalized() {
33            warn!(
34                ?dir,
35                "Deserialized unnormalized dir, replacing with default"
36            );
37            Default::default()
38        } else {
39            Self(dir)
40        }
41    }
42}
43
44impl From<Dir> for SerdeDir {
45    fn from(other: Dir) -> SerdeDir { SerdeDir(*other) }
46}
47/*pub enum TryFromVec3Error {
48    ContainsNans,
49    NotNormalized,
50}
51
52impl TryFrom<Vec3<f32>> for Dir {
53    type Error = TryFromVec3Error;
54
55    fn try_from(v: Vec3) -> Result<Self, TryFromVec3Error> {
56        if v.map(f32::is_nan).reduce_or() {
57            Err(TryFromVec3Error::ContainsNans)
58        } else {
59            v.try_normalized()
60                .map(|n| Self(n))
61                .ok_or(TryFromVec3Error::NotNormalized)
62        }
63    }
64}*/
65
66impl Dir {
67    pub fn new(dir: Vec3<f32>) -> Self {
68        debug_assert!(!dir.map(f32::is_nan).reduce_or());
69        debug_assert!(dir.is_normalized());
70        Self(dir)
71    }
72
73    pub fn from_unnormalized(dirs: Vec3<f32>) -> Option<Self> {
74        dirs.try_normalized().map(|dir| {
75            #[cfg(debug_assertions)]
76            {
77                if dir.map(f32::is_nan).reduce_or() {
78                    panic!("{} => {}", dirs, dir);
79                }
80            }
81            Self(dir)
82        })
83    }
84
85    /// Generates a random direction that has a z component of 0
86    pub fn random_2d(rng: &mut impl Rng) -> Self {
87        let a = rng.gen_range(0.0..std::f32::consts::TAU);
88        // This will always be normalized.
89        Self::new(Vec3::new(a.cos(), a.sin(), 0.0))
90    }
91
92    pub fn slerp(from: Self, to: Self, factor: f32) -> Self {
93        Self(slerp_normalized(from.0, to.0, factor))
94    }
95
96    #[must_use]
97    pub fn slerped_to(self, to: Self, factor: f32) -> Self {
98        Self(slerp_normalized(self.0, to.0, factor))
99    }
100
101    /// Note: this uses `from` if `to` is unnormalizable
102    pub fn slerp_to_vec3(from: Self, to: Vec3<f32>, factor: f32) -> Self {
103        Self(slerp_to_unnormalized(from.0, to, factor).unwrap_or_else(|e| e))
104    }
105
106    pub fn rotation_between(&self, to: Self) -> Quaternion<f32> {
107        Quaternion::<f32>::rotation_from_to_3d(self.0, to.0)
108    }
109
110    pub fn rotation(&self) -> Quaternion<f32> { Self::default().rotation_between(*self) }
111
112    pub fn is_valid(&self) -> bool { !self.0.map(f32::is_nan).reduce_or() && self.is_normalized() }
113
114    pub fn up() -> Self { Dir::new(Vec3::<f32>::unit_z()) }
115
116    pub fn down() -> Self { -Dir::new(Vec3::<f32>::unit_z()) }
117
118    pub fn left() -> Self { -Dir::new(Vec3::<f32>::unit_x()) }
119
120    pub fn right() -> Self { Dir::new(Vec3::<f32>::unit_x()) }
121
122    pub fn forward() -> Self { Dir::new(Vec3::<f32>::unit_y()) }
123
124    pub fn back() -> Self { -Dir::new(Vec3::<f32>::unit_y()) }
125
126    pub fn to_horizontal(self) -> Option<Self> { Self::from_unnormalized(self.xy().into()) }
127
128    pub fn vec(&self) -> &Vec3<f32> { &self.0 }
129
130    pub fn to_vec(self) -> Vec3<f32> { self.0 }
131
132    pub fn merge_z(self, look_dir: Self) -> Self {
133        // This code just gets look_dir without Z part
134        // and normalizes it. This is what `xy_dir is`.
135        //
136        // Then we find rotation between xy_dir and look_dir
137        // which gives us quaternion how of what rotation we need
138        // to do to get Z part we want.
139        //
140        // Then we construct Ori without Z part
141        // and applying `pitch` to get needed orientation.
142        let xy_dir = Dir::from_unnormalized(Vec3::new(self.x, self.y, 0.0)).unwrap_or_default();
143        let pitch = xy_dir.rotation_between(self);
144
145        Ori::from(Vec3::new(look_dir.x, look_dir.y, 0.0))
146            .prerotated(pitch)
147            .look_dir()
148    }
149}
150
151impl std::ops::Deref for Dir {
152    type Target = Vec3<f32>;
153
154    fn deref(&self) -> &Vec3<f32> { &self.0 }
155}
156
157impl From<Dir> for Vec3<f32> {
158    fn from(dir: Dir) -> Self { *dir }
159}
160
161impl Projection<Plane> for Dir {
162    type Output = Option<Self>;
163
164    fn projected(self, plane: &Plane) -> Self::Output {
165        Dir::from_unnormalized(plane.projection(*self))
166    }
167}
168
169impl Projection<Dir> for Vec3<f32> {
170    type Output = Vec3<f32>;
171
172    fn projected(self, dir: &Dir) -> Self::Output {
173        let dir = **dir;
174        self.dot(dir) * dir
175    }
176}
177
178impl std::ops::Mul<Dir> for Quaternion<f32> {
179    type Output = Dir;
180
181    fn mul(self, dir: Dir) -> Self::Output { Dir((self * *dir).normalized()) }
182}
183
184impl std::ops::Neg for Dir {
185    type Output = Dir;
186
187    fn neg(self) -> Dir { Dir::new(-self.0) }
188}
189
190/// Begone ye NaN's
191/// Slerp two `Vec3`s skipping the slerp if their directions are very close
192/// This avoids a case where `vek`s slerp produces NaN's
193/// Additionally, it avoids unnecessary calculations if they are near identical
194/// Assumes `from` and `to` are normalized and returns a normalized vector
195#[inline(always)]
196fn slerp_normalized(from: Vec3<f32>, to: Vec3<f32>, factor: f32) -> Vec3<f32> {
197    debug_assert!(!to.map(f32::is_nan).reduce_or());
198    debug_assert!(!from.map(f32::is_nan).reduce_or());
199    // Ensure from is normalized
200    #[cfg(debug_assertions)]
201    {
202        let unnormalized = {
203            let len_sq = from.magnitude_squared();
204            !(0.999..=1.001).contains(&len_sq)
205        };
206
207        if unnormalized {
208            panic!("Called slerp_normalized with unnormalized `from`: {}", from);
209        }
210    }
211
212    // Ensure to is normalized
213    #[cfg(debug_assertions)]
214    {
215        let unnormalized = {
216            let len_sq = from.magnitude_squared();
217            !(0.999..=1.001).contains(&len_sq)
218        };
219
220        if unnormalized {
221            panic!("Called slerp_normalized with unnormalized `to`: {}", to);
222        }
223    }
224
225    let dot = from.dot(to);
226    if dot >= 1.0 - 1E-6 {
227        // Close together, just use to
228        return to;
229    }
230
231    let (from, to, factor) = if dot < -0.999 {
232        // Not linearly independent (slerp will fail since it doesn't check for this)
233        // Instead we will choose a midpoint and slerp from or to that depending on the
234        // factor
235        let mid_dir = if from.z.abs() > 0.999 {
236            // If vec's lie along the z-axis default to (1, 0, 0) as midpoint
237            Vec3::unit_x()
238        } else {
239            // Default to picking midpoint in the xy plane
240            Vec3::new(from.y, -from.x, 0.0).normalized()
241        };
242
243        if factor > 0.5 {
244            (mid_dir, to, factor * 2.0 - 1.0)
245        } else {
246            (from, mid_dir, factor * 2.0)
247        }
248    } else {
249        (from, to, factor)
250    };
251
252    let slerped = Vec3::slerp(from, to, factor);
253    let slerped_normalized = slerped.normalized();
254    // Ensure normalization worked
255    // This should not be possible but I will leave it here for now just in case
256    // something was missed
257    #[cfg(debug_assertions)]
258    {
259        if !slerped_normalized.is_normalized() || slerped_normalized.map(f32::is_nan).reduce_or() {
260            panic!(
261                "Failed to normalize {:?} produced from:\nslerp(\n    {:?},\n    {:?},\n    \
262                 {:?},\n)\nWith result: {:?})",
263                slerped, from, to, factor, slerped_normalized
264            );
265        }
266    }
267
268    slerped_normalized
269}
270
271/// Begone ye NaN's
272/// Slerp two `Vec3`s skipping the slerp if their directions are very close
273/// This avoids a case where `vek`s slerp produces NaN's
274/// Additionally, it avoids unnecessary calculations if they are near identical
275/// Assumes `from` is normalized and returns a normalized vector, but `to`
276/// doesn't need to be normalized
277/// Returns `Err(from)`` if `to` is unnormalizable
278// TODO: in some cases we might want to base the slerp rate on the magnitude of
279// `to` for example when `to` is velocity and `from` is orientation
280fn slerp_to_unnormalized(
281    from: Vec3<f32>,
282    to: Vec3<f32>,
283    factor: f32,
284) -> Result<Vec3<f32>, Vec3<f32>> {
285    to.try_normalized()
286        .map(|to| slerp_normalized(from, to, factor))
287        .ok_or(from)
288}