veloren_voxygen/scene/camera.rs
1use common::{terrain::TerrainGrid, vol::ReadVol};
2use common_base::span;
3use core::{f32::consts::PI, fmt::Debug, ops::Range};
4use num::traits::{FloatConst, real::Real};
5use treeculler::Frustum;
6use vek::*;
7
8pub const NEAR_PLANE: f32 = 0.0625;
9pub const FAR_PLANE: f32 = 524288.06; // excessive precision: 524288.0625
10
11const FIRST_PERSON_INTERP_TIME: f32 = 0.1;
12const THIRD_PERSON_INTERP_TIME: f32 = 0.1;
13const FREEFLY_INTERP_TIME: f32 = 0.0;
14const LERP_ORI_RATE: f32 = 15.0;
15const CLIPPING_MODE_RANGE: Range<f32> = 2.0..20.0;
16pub const MIN_ZOOM: f32 = 0.1;
17
18// Possible TODO: Add more modes
19#[derive(PartialEq, Debug, Clone, Copy, Eq, Hash)]
20pub enum CameraMode {
21 FirstPerson = 0,
22 ThirdPerson = 1,
23 Freefly = 2,
24}
25
26impl Default for CameraMode {
27 fn default() -> Self { Self::ThirdPerson }
28}
29
30#[derive(Clone, Copy)]
31pub struct Dependents {
32 pub view_mat: Mat4<f32>,
33 pub view_mat_inv: Mat4<f32>,
34 pub proj_mat: Mat4<f32>,
35 pub proj_mat_inv: Mat4<f32>,
36 /// Specifically there for satisfying our treeculler dependency, which can't
37 /// handle inverted depth planes.
38 pub proj_mat_treeculler: Mat4<f32>,
39 pub cam_pos: Vec3<f32>,
40 pub cam_dir: Vec3<f32>,
41}
42
43pub struct Camera {
44 tgt_focus: Vec3<f32>,
45 focus: Vec3<f32>,
46 tgt_ori: Vec3<f32>,
47 ori: Vec3<f32>,
48 tgt_dist: f32,
49 dist: f32,
50 tgt_fov: f32,
51 fov: f32,
52 tgt_fixate: f32,
53 fixate: f32,
54 aspect: f32,
55 mode: CameraMode,
56
57 last_time: Option<f64>,
58
59 dependents: Dependents,
60 frustum: Frustum<f32>,
61}
62
63fn clamp_and_modulate(ori: Vec3<f32>) -> Vec3<f32> {
64 Vec3 {
65 // Wrap camera yaw
66 x: ori.x.rem_euclid(2.0 * PI),
67 // Clamp camera pitch to the vertical limits
68 y: ori.y.clamp(-PI / 2.0 + 0.0001, PI / 2.0 - 0.0001),
69 // Wrap camera roll
70 z: ori.z.rem_euclid(2.0 * PI),
71 }
72}
73
74/// Generalized method to construct a perspective projection with x ∈ [-1,1], y
75/// ∈ [-1,1], z ∈ [0,1] given fov_y_radians, aspect_ratio, 1/n, and 1/f. Note
76/// that you pass in *1/n* and *1/f*, not n and f like you normally would for a
77/// perspective projection; this is done to enable uniform handling of both
78/// finite and infinite far planes.
79///
80/// The only requirements on n and f are: 1/n ≠ 1/f, and 0 ≤ 1/n * 1/f.
81///
82/// This ensures that the near and far plane are not identical (or else your
83/// projection would not cover any distance), and that they have the same sign
84/// (or else we cannot rely on clipping to properly fix your scene). This also
85/// ensures that at least one of 1/n and 1/f is not 0, and by construction it
86/// guarantees that neither n nor f are 0; these are required in order to make
87/// sense of the definition of near and far planes, and avoid collapsing all
88/// depths to a single point.
89///
90/// For "typical" projections (matching perspective_lh_no), you would satisfy
91/// the stronger requirements. We give the typical conditions for each bullet
92/// point, and then explain the consequences of not satisfying these conditions:
93///
94/// * 1/n < 1/f (0 to 1 depth planes, meaning n = near and f = far; if f < n,
95/// depth planes go from 1 to 0, meaning f = near and n = far, aka "reverse
96/// depth").
97///
98/// This is by far the most likely thing to want to change; inverted depth
99/// coordinates have *far* better accuracy for DirectX / Metal / WGPU-like
100/// APIs, when using floating point depth, while not being *worse* than the
101/// alternative (OpenGL-like depth, or when using fixed-point / integer depth).
102/// For maximum benefit, make sure you are using Depth32F, as on most
103/// platforms this is the only depth buffer size where floating point can be
104/// used.
105///
106/// It is a bit unintuitive to prove this, but it turns out that when using
107/// 1 to 0 depth planes, the point where the depth buffer has its worst
108/// precision is not at the far plane (as with 0 to 1 depth planes) nor at
109/// the near plane, as you might expect, but exactly at far/2 (the
110/// near plane setting does not affect the point of minimum accuracy at
111/// all!). However, don't let this fool you into believing the point of
112/// worst precision has simply been moved around--for *any* fixed Δz that is
113/// the minimum amount of depth precision you want over the whole range, and
114/// any near plane, you can set the far plane farther (generally much much
115/// farther!) with reversed clip space than you can with standard clip space
116/// while still getting at least that much depth precision in the worst
117/// case. Nor is this a small worst-case; for many desirable near and far
118/// plane combinations, more than half the visible space will have
119/// completely unusable precision under 0 to 1 depth, while having much better
120/// than needed precision under 1 to 0 depth.
121///
122/// To compute the exact (at least "roughly exact") worst-case accuracy for
123/// floating point depth and a given precision target Δz, for reverse clip
124/// planes (this can be computed for the non-reversed case too, but it's
125/// painful and the values are horrible, so don't bother), we compute
126/// (assuming a finite far plane--see below for details on the infinite
127/// case) the change in the integer representation of the mantissa at z=n/2:
128///
129/// ```ignore
130/// e = floor(ln(near/(far - near))/ln(2))
131/// db/dz = 2^(2-e) / ((1 / far - 1 / near) * (far)^2)
132/// ```
133///
134/// Then the maximum precision you can safely use to get a change in the
135/// integer representation of the mantissa (assuming 32-bit floating points)
136/// is around:
137///
138/// ```ignore
139/// abs(2^(-23) / (db/dz)).
140/// ```
141///
142/// In particular, if your worst-case target accuracy over the depth range
143/// is Δz, you should be okay if:
144///
145/// ```ignore
146/// abs(Δz * (db/dz)) * 2^(23) ≥ 1.
147/// ```
148///
149/// This only accounts for precision of the final floating-point value, so
150/// it's possible that artifacts may be introduced elsewhere during the
151/// computation that reduce precision further; the most famous example of
152/// this is that OpenGL wipes out most of the precision gains by going from
153/// [-1,1] to [0,1] by letting
154///
155/// ```ignore
156/// clip space depth = depth * 0.5 + 0.5
157/// ```
158///
159/// which results in huge precision errors by removing nearly all the
160/// floating point values with the most precision (those close to 0).
161/// Fortunately, most such artifacts are absent under the wgpu/DirectX/Metal
162/// depth clip space model, so with any luck remaining depth errors due to
163/// the perspective warp itself should be minimal.
164///
165/// * 0 ≠ 1/far (finite far plane). When this is false, the far plane is at
166/// infinity; this removes the restriction of having a far plane at all, often
167/// with minimal reduction in accuracy for most values in the scene. In fact,
168/// in almost all cases with non-reversed depth planes, it *improves* accuracy
169/// over the finite case for the vast majority of the range; however, you
170/// should be using reversed depth planes, and if you are then there is a
171/// quite natural accuracy vs. distance tradeoff in the infinite case.
172///
173/// When using an infinite far plane, the worst-case accuracy is *always* at
174/// infinity, and gets progressively worse as you get farther away from the
175/// near plane. However, there is a second advantage that may not be
176/// immediately apparent: the perspective warp becomes much simpler,
177/// potentially removing artifacts! Specifically, in the 0 to 1 depth plane
178/// case, the assigned depth value (after perspective division) becomes:
179///
180/// ```ignore
181/// depth = 1 - near/z
182/// ```
183///
184/// while in the 1 to 0 depth plane case (which you should be using), the
185/// equation is even simpler:
186///
187/// ```ignore
188/// depth = near/z
189/// ```
190///
191/// In the 1 to 0 case, in particular, you can see that the depth value is
192/// *linear in z in log space.* This lets us compute, for any given target
193/// precision, a *very* simple worst-case upper bound on the maximum
194/// absolute z value for which that precision can be achieved (the upper
195/// bound is tight in some cases, but in others may be conservative):
196///
197/// ```ignore
198/// db/dz ≥ 1/z
199/// ```
200///
201/// Plugging that into our old formula, we find that we attain the required
202/// precision at least in the range (again, this is for the 1 to 0 infinite
203/// case only!):
204///
205/// ```ignore
206/// abs(z) ≤ Δz * 2^23
207/// ```
208///
209/// One thing you may notice is that this worst-case bound *does not depend
210/// on the near plane.*This means that (within reason) you can put the near
211/// plane as close as you like and still attain this bound. Of course, the
212/// bound is not completely tight, but it should not be off by more than a
213/// factor of 2 or so (informally proven, not made rigorous yet), so for most
214/// practical purposes you can set the near plane as low as you like in this
215/// case.
216///
217/// * 0 < 1/near (positive near plane--best used when moving *to* left-handed
218/// spaces, as we normally do in OpenGL and DirectX). A use case for *not*
219/// doing this is that it allows moving *from* a left-handed space *to* a
220/// right-handed space in WGPU / DirectX / Metal coordinates; this means that
221/// if matrices were already set up for OpenGL using functions like look_at_rh
222/// that assume right-handed coordinates, we can simply switch these to
223/// look_at_lh and use a right-handed perspective projection with a negative
224/// near plane, to get correct rendering behavior. Details are out of scope
225/// for this comment.
226///
227/// Note that there is one final, very important thing that affects possible
228/// precision--the actual underlying precision of the floating point format at a
229/// particular value! As your z values go up, their precision will shrink, so
230/// if at all possible try to shrink your z values down to the lowest range in
231/// which they can be. Unfortunately, this cannot be part of the perspective
232/// projection itself, because by the time z gets to the projection it is
233/// usually too late for values to still be integers (or coarse-grained powers
234/// of 2). Instead, try to scale down x, y, and z as soon as possible before
235/// submitting them to the GPU, ideally by as large as possible of a power of 2
236/// that works for your use case. Not only will this improve depth precision
237/// and recall, it will also help address other artifacts caused by values far
238/// from z (such as improperly rounded rotations, or improper line equations due
239/// to greedy meshing).
240///
241/// TODO: Consider passing fractions rather than 1/n and 1/f directly, even
242/// though the logic for why it should be okay to pass them directly is probably
243/// sound (they are both valid z values in the range, so gl_FragCoord.w will be
244/// assigned to this, meaning if they are imprecise enough then the whole
245/// calculation will be similarly imprecise).
246///
247/// TODO: Since it's a bit confusing that n and f are not always near and far,
248/// and a negative near plane can (probably) be emulated with simple actions on
249/// the perspective matrix, consider removing this functionality and replacing
250/// our assertion with a single condition: `(1/far) * (1/near) < (1/near)²`.
251pub fn perspective_lh_zo_general<T>(
252 fov_y_radians: T,
253 aspect_ratio: T,
254 inv_n: T,
255 inv_f: T,
256) -> Mat4<T>
257where
258 T: Real + FloatConst + Debug,
259{
260 // Per comments, we only need these two assertions to make sure our calculations
261 // make sense.
262 debug_assert_ne!(
263 inv_n, inv_f,
264 "The near and far plane distances cannot be equal, found: {:?} = {:?}",
265 inv_n, inv_f
266 );
267 debug_assert!(
268 T::zero() <= inv_n * inv_f,
269 "The near and far plane distances must have the same sign, found: {:?} * {:?} < 0",
270 inv_n,
271 inv_f
272 );
273
274 // TODO: Would be nice to separate out the aspect ratio computations.
275 let two = T::one() + T::one();
276 let tan_half_fovy = (fov_y_radians / two).tan();
277 let m00 = T::one() / (aspect_ratio * tan_half_fovy);
278 let m11 = T::one() / tan_half_fovy;
279 let m23 = -T::one() / (inv_n - inv_f);
280 let m22 = inv_n * (-m23);
281 Mat4::new(
282 m00,
283 T::zero(),
284 T::zero(),
285 T::zero(),
286 T::zero(),
287 m11,
288 T::zero(),
289 T::zero(),
290 T::zero(),
291 T::zero(),
292 m22,
293 m23,
294 T::zero(),
295 T::zero(),
296 T::one(),
297 T::zero(),
298 )
299}
300
301/// Same as perspective_lh_zo_general, but for right-handed source spaces.
302pub fn perspective_rh_zo_general<T>(
303 fov_y_radians: T,
304 aspect_ratio: T,
305 inv_n: T,
306 inv_f: T,
307) -> Mat4<T>
308where
309 T: Real + FloatConst + Debug,
310{
311 let mut m = perspective_lh_zo_general(fov_y_radians, aspect_ratio, inv_n, inv_f);
312 m[(2, 2)] = -m[(2, 2)];
313 m[(3, 2)] = -m[(3, 2)];
314 m
315}
316
317impl Camera {
318 /// Create a new `Camera` with default parameters.
319 pub fn new(aspect: f32, mode: CameraMode) -> Self {
320 // Make sure aspect is valid
321 let aspect = if aspect.is_normal() { aspect } else { 1.0 };
322
323 let dist = match mode {
324 CameraMode::ThirdPerson => 10.0,
325 CameraMode::FirstPerson | CameraMode::Freefly => MIN_ZOOM,
326 };
327
328 Self {
329 tgt_focus: Vec3::unit_z() * 10.0,
330 focus: Vec3::unit_z() * 10.0,
331 tgt_ori: Vec3::zero(),
332 ori: Vec3::zero(),
333 tgt_dist: dist,
334 dist,
335 tgt_fov: 1.1,
336 fov: 1.1,
337 tgt_fixate: 1.0,
338 fixate: 1.0,
339 aspect,
340 mode,
341
342 last_time: None,
343
344 dependents: Dependents {
345 view_mat: Mat4::identity(),
346 view_mat_inv: Mat4::identity(),
347 proj_mat: Mat4::identity(),
348 proj_mat_inv: Mat4::identity(),
349 proj_mat_treeculler: Mat4::identity(),
350 cam_pos: Vec3::zero(),
351 cam_dir: Vec3::unit_y(),
352 },
353 frustum: Frustum::from_modelview_projection(Mat4::identity().into_col_arrays()),
354 }
355 }
356
357 /// Compute the transformation matrices (view matrix and projection matrix)
358 /// and position of the camera.
359 pub fn compute_dependents(&mut self, terrain: &TerrainGrid) {
360 self.compute_dependents_full(terrain, |block| block.is_opaque())
361 }
362
363 /// The is_fluid argument should return true for transparent voxels.
364 pub fn compute_dependents_full<V: ReadVol>(
365 &mut self,
366 terrain: &V,
367 is_transparent: fn(&V::Vox) -> bool,
368 ) {
369 span!(_guard, "compute_dependents", "Camera::compute_dependents");
370 // TODO: More intelligent function to decide on which strategy to use
371 if self.tgt_dist < CLIPPING_MODE_RANGE.end {
372 self.compute_dependents_near(terrain, is_transparent)
373 } else {
374 self.compute_dependents_far(terrain, is_transparent)
375 }
376 }
377
378 fn compute_dependents_near<V: ReadVol>(
379 &mut self,
380 terrain: &V,
381 is_transparent: fn(&V::Vox) -> bool,
382 ) {
383 const FRUSTUM_PADDING: [Vec3<f32>; 4] = [
384 Vec3::new(0.0, 0.0, -1.0),
385 Vec3::new(0.0, 0.0, 1.0),
386 Vec3::new(0.0, 0.0, -1.0),
387 Vec3::new(0.0, 0.0, 1.0),
388 ];
389 // Calculate new frustum location as there may have been lerp towards tgt_dist
390 // Without this, there will be camera jumping back and forth in some scenarios
391 // TODO: Optimize and fix clipping still happening if self.dist << self.tgt_dist
392
393 // Use tgt_dist, as otherwise we end up in loop due to dist depending on frustum
394 // and vice versa
395 let local_dependents = self.compute_dependents_helper(self.tgt_dist);
396 let frustum = self.compute_frustum(&local_dependents);
397 let dist = {
398 frustum
399 .points
400 .iter()
401 .take(4)
402 .zip(FRUSTUM_PADDING.iter())
403 .map(|(pos, padding)| {
404 let fwd = self.forward();
405 // TODO: undo once treeculler is vek15.7
406 let transformed = Vec3::new(pos.x, pos.y, pos.z);
407 transformed + 0.6 * (fwd.cross(*padding) + fwd.cross(*padding).cross(fwd))
408 })
409 .chain([(self.focus - self.forward() * (self.dist + 0.5))]) // Padding to behind
410 .map(|pos| {
411 match terrain
412 .ray(self.focus, pos)
413 .ignore_error()
414 .max_iter(500)
415 .until(is_transparent)
416 .cast()
417 {
418 (d, Ok(Some(_))) => f32::min(d, self.tgt_dist),
419 (_, Ok(None)) => self.dist,
420 (_, Err(_)) => self.dist,
421 }
422 .max(0.0)
423 })
424 .reduce(f32::min)
425 .unwrap_or(0.0)
426 };
427
428 // If the camera ends up being too close to the focus point, switch policies.
429 if dist < CLIPPING_MODE_RANGE.start {
430 self.compute_dependents_far(terrain, is_transparent);
431 } else {
432 if self.dist >= dist {
433 self.dist = dist;
434 }
435
436 // Recompute only if needed
437 if (dist - self.tgt_dist).abs() > f32::EPSILON {
438 let dependents = self.compute_dependents_helper(dist);
439 self.frustum = self.compute_frustum(&dependents);
440 self.dependents = dependents;
441 } else {
442 self.dependents = local_dependents;
443 self.frustum = frustum;
444 }
445 }
446 }
447
448 fn compute_dependents_far<V: ReadVol>(
449 &mut self,
450 terrain: &V,
451 is_transparent: fn(&V::Vox) -> bool,
452 ) {
453 let dist = {
454 let (start, end) = (self.focus - self.forward() * self.dist, self.focus);
455
456 match terrain
457 .ray(start, end)
458 .ignore_error()
459 .max_iter(500)
460 .until(|b| !is_transparent(b))
461 .cast()
462 {
463 (d, Ok(Some(_))) => f32::min(self.dist - d - 0.03, self.dist),
464 (_, Ok(None)) => self.dist,
465 (_, Err(_)) => self.dist,
466 }
467 .max(0.0)
468 };
469
470 let dependents = self.compute_dependents_helper(dist);
471 self.frustum = self.compute_frustum(&dependents);
472 self.dependents = dependents;
473 }
474
475 fn compute_dependents_helper(&self, dist: f32) -> Dependents {
476 let view_mat = Mat4::<f32>::identity()
477 * Mat4::translation_3d(-Vec3::unit_z() * dist)
478 * Mat4::rotation_z(self.ori.z)
479 * Mat4::rotation_x(self.ori.y)
480 * Mat4::rotation_y(self.ori.x)
481 * Mat4::rotation_3d(PI / 2.0, -Vec4::unit_x())
482 * Mat4::translation_3d(-self.focus.map(|e| e.fract()));
483 let view_mat_inv: Mat4<f32> = view_mat.inverted();
484
485 let fov = self.get_effective_fov();
486 // NOTE: We reverse the far and near planes to produce an inverted depth
487 // buffer (1 to 0 z planes).
488 let proj_mat =
489 perspective_rh_zo_general(fov, self.aspect, 1.0 / FAR_PLANE, 1.0 / NEAR_PLANE);
490 // For treeculler, we also produce a version without inverted depth.
491 let proj_mat_treeculler =
492 perspective_rh_zo_general(fov, self.aspect, 1.0 / NEAR_PLANE, 1.0 / FAR_PLANE);
493
494 Dependents {
495 view_mat,
496 view_mat_inv,
497 proj_mat,
498 proj_mat_inv: proj_mat.inverted(),
499 proj_mat_treeculler,
500 cam_pos: Vec3::from(view_mat_inv * Vec4::unit_w()),
501 cam_dir: Vec3::from(view_mat_inv * -Vec4::unit_z()),
502 }
503 }
504
505 fn compute_frustum(&mut self, dependents: &Dependents) -> Frustum<f32> {
506 Frustum::from_modelview_projection(
507 (dependents.proj_mat_treeculler
508 * dependents.view_mat
509 * Mat4::translation_3d(-self.focus.map(|e| e.trunc())))
510 .into_col_arrays(),
511 )
512 }
513
514 pub fn frustum(&self) -> &Frustum<f32> { &self.frustum }
515
516 pub fn dependents(&self) -> Dependents { self.dependents }
517
518 /// Rotate the camera about its focus by the given delta, limiting the input
519 /// accordingly.
520 pub fn rotate_by(&mut self, delta: Vec3<f32>) {
521 let delta = delta * self.fixate;
522 // Wrap camera yaw
523 self.tgt_ori.x = (self.tgt_ori.x + delta.x).rem_euclid(2.0 * PI);
524 // Clamp camera pitch to the vertical limits
525 self.tgt_ori.y = (self.tgt_ori.y + delta.y).clamp(-PI / 2.0 + 0.001, PI / 2.0 - 0.001);
526 // Wrap camera roll
527 self.tgt_ori.z = (self.tgt_ori.z + delta.z).rem_euclid(2.0 * PI);
528 }
529
530 /// Set the orientation of the camera about its focus.
531 pub fn set_orientation(&mut self, ori: Vec3<f32>) { self.tgt_ori = clamp_and_modulate(ori); }
532
533 /// Set the orientation of the camera about its focus without lerping.
534 pub fn set_orientation_instant(&mut self, ori: Vec3<f32>) {
535 self.set_orientation(ori);
536 self.ori = self.tgt_ori;
537 }
538
539 /// Zoom the camera by the given delta, limiting the input accordingly.
540 pub fn zoom_by(&mut self, delta: f32, cap: Option<f32>) {
541 if self.mode == CameraMode::ThirdPerson {
542 // Clamp camera dist to the 2 <= x <= infinity range
543 self.tgt_dist = (self.tgt_dist + delta).max(2.0);
544 }
545
546 if let Some(cap) = cap {
547 self.tgt_dist = self.tgt_dist.min(cap);
548 }
549 }
550
551 /// Zoom with the ability to switch between first and third-person mode.
552 ///
553 /// Note that cap > 18237958000000.0 can cause panic due to float overflow
554 pub fn zoom_switch(&mut self, delta: f32, cap: f32, scale: f32) {
555 if delta > 0_f32 || self.mode != CameraMode::FirstPerson {
556 let t = self.tgt_dist + delta;
557 const MIN_THIRD_PERSON: f32 = 2.35;
558 match self.mode {
559 CameraMode::ThirdPerson => {
560 if t < MIN_THIRD_PERSON * scale {
561 self.set_mode(CameraMode::FirstPerson);
562 } else {
563 self.tgt_dist = t;
564 }
565 },
566 CameraMode::FirstPerson => {
567 self.set_mode(CameraMode::ThirdPerson);
568 self.tgt_dist = MIN_THIRD_PERSON * scale;
569 },
570 _ => {},
571 }
572 }
573
574 self.tgt_dist = self.tgt_dist.min(cap);
575 }
576
577 /// Get the distance of the camera from the focus
578 pub fn get_distance(&self) -> f32 { self.dist }
579
580 /// Set the distance of the camera from the focus (i.e., zoom).
581 pub fn set_distance(&mut self, dist: f32) { self.tgt_dist = dist; }
582
583 pub fn update(&mut self, time: f64, dt: f32, smoothing_enabled: bool) {
584 // This is horribly frame time dependent, but so is most of the game
585 let delta = self.last_time.replace(time).map_or(0.0, |t| time - t);
586 if (self.dist - self.tgt_dist).abs() > 0.01 {
587 self.dist = Lerp::lerp(
588 self.dist,
589 self.tgt_dist,
590 0.65 * (delta as f32) / self.interp_time(),
591 );
592 }
593
594 if (self.fov - self.tgt_fov).abs() > 0.01 {
595 self.fov = Lerp::lerp(
596 self.fov,
597 self.tgt_fov,
598 0.65 * (delta as f32) / self.interp_time(),
599 );
600 }
601
602 if (self.fixate - self.tgt_fixate).abs() > 0.01 {
603 self.fixate = Lerp::lerp(
604 self.fixate,
605 self.tgt_fixate,
606 0.65 * (delta as f32) / self.interp_time(),
607 );
608 }
609
610 if (self.focus - self.tgt_focus).magnitude_squared() > 0.001 {
611 let lerped_focus = Lerp::lerp(
612 self.focus,
613 self.tgt_focus,
614 (delta as f32) / self.interp_time()
615 * if matches!(self.mode, CameraMode::FirstPerson) {
616 2.0
617 } else {
618 1.0
619 },
620 );
621
622 self.focus.x = lerped_focus.x;
623 self.focus.y = lerped_focus.y;
624
625 // Always lerp in z
626 self.focus.z = lerped_focus.z;
627 }
628
629 let lerp_angle = |a: f32, b: f32, rate: f32| {
630 let offs = [-2.0 * PI, 0.0, 2.0 * PI]
631 .iter()
632 .min_by_key(|offs: &&f32| ((a - (b + *offs)).abs() * 1000.0) as i32)
633 .unwrap();
634 Lerp::lerp(a, b + *offs, rate)
635 };
636
637 let ori = if smoothing_enabled {
638 Vec3::new(
639 lerp_angle(self.ori.x, self.tgt_ori.x, LERP_ORI_RATE * dt),
640 Lerp::lerp(self.ori.y, self.tgt_ori.y, LERP_ORI_RATE * dt),
641 lerp_angle(self.ori.z, self.tgt_ori.z, LERP_ORI_RATE * dt),
642 )
643 } else {
644 self.tgt_ori
645 };
646 self.ori = clamp_and_modulate(ori);
647 }
648
649 pub fn interp_time(&self) -> f32 {
650 match self.mode {
651 CameraMode::FirstPerson => FIRST_PERSON_INTERP_TIME,
652 CameraMode::ThirdPerson => THIRD_PERSON_INTERP_TIME,
653 CameraMode::Freefly => FREEFLY_INTERP_TIME,
654 }
655 }
656
657 /// Get the focus position of the camera.
658 pub fn get_focus_pos(&self) -> Vec3<f32> { self.focus }
659
660 /// Set the focus position of the camera.
661 pub fn set_focus_pos(&mut self, focus: Vec3<f32>) { self.tgt_focus = focus; }
662
663 /// Set the focus position of the camera, without lerping.
664 pub fn force_focus_pos(&mut self, focus: Vec3<f32>) {
665 self.tgt_focus = focus;
666 self.focus = focus;
667 }
668
669 /// Get the aspect ratio of the camera.
670 pub fn get_aspect_ratio(&self) -> f32 { self.aspect }
671
672 /// Set the aspect ratio of the camera.
673 pub fn set_aspect_ratio(&mut self, aspect: f32) {
674 self.aspect = if aspect.is_normal() { aspect } else { 1.0 };
675 }
676
677 /// Get the orientation of the camera.
678 pub fn get_orientation(&self) -> Vec3<f32> { self.ori }
679
680 /// Get the orientation that the camera is moving toward.
681 pub fn get_tgt_orientation(&self) -> Vec3<f32> { self.tgt_ori }
682
683 /// Get the field of view of the camera in radians, taking into account
684 /// fixation.
685 pub fn get_effective_fov(&self) -> f32 { self.fov * self.fixate }
686
687 // /// Get the field of view of the camera in radians.
688 // pub fn get_fov(&self) -> f32 { self.fov }
689
690 /// Set the field of view of the camera in radians.
691 pub fn set_fov(&mut self, fov: f32) { self.tgt_fov = fov; }
692
693 /// Set the 'fixation' proportion, allowing the camera to focus in with
694 /// precise aiming. Fixation is applied on top of the regular FoV.
695 pub fn set_fixate(&mut self, fixate: f32) { self.tgt_fixate = fixate; }
696
697 /// Set the FOV in degrees
698 pub fn set_fov_deg(&mut self, fov: u16) {
699 //Magic value comes from pi/180; no use recalculating.
700 self.set_fov((fov as f32) * 0.01745329)
701 }
702
703 /// Set the mode of the camera.
704 pub fn set_mode(&mut self, mode: CameraMode) {
705 if self.mode != mode {
706 self.mode = mode;
707 match self.mode {
708 CameraMode::ThirdPerson => {
709 self.zoom_by(5.0, None);
710 },
711 CameraMode::FirstPerson => {
712 self.set_distance(MIN_ZOOM);
713 },
714 CameraMode::Freefly => {
715 self.set_distance(MIN_ZOOM);
716 },
717 }
718 }
719 }
720
721 /// Get the mode of the camera
722 pub fn get_mode(&self) -> CameraMode {
723 // Perform a bit of a trick... don't report first-person until the camera has
724 // lerped close enough to the player.
725 match self.mode {
726 CameraMode::FirstPerson if self.dist < 0.5 => CameraMode::FirstPerson,
727 CameraMode::FirstPerson => CameraMode::ThirdPerson,
728 mode => mode,
729 }
730 }
731
732 /// Cycle the camera to its next valid mode. If is_admin is false then only
733 /// modes which are accessible without admin access will be cycled to.
734 pub fn next_mode(&mut self, is_admin: bool, has_target: bool) {
735 if has_target {
736 self.set_mode(match self.mode {
737 CameraMode::ThirdPerson => CameraMode::FirstPerson,
738 CameraMode::FirstPerson => {
739 if is_admin {
740 CameraMode::Freefly
741 } else {
742 CameraMode::ThirdPerson
743 }
744 },
745 CameraMode::Freefly => CameraMode::ThirdPerson,
746 });
747 } else {
748 self.set_mode(CameraMode::Freefly);
749 }
750 }
751
752 /// Return a unit vector in the forward direction for the current camera
753 /// orientation
754 pub fn forward(&self) -> Vec3<f32> {
755 Vec3::new(
756 f32::sin(self.ori.x) * f32::cos(self.ori.y),
757 f32::cos(self.ori.x) * f32::cos(self.ori.y),
758 -f32::sin(self.ori.y),
759 )
760 }
761
762 /// Return a unit vector in the right direction for the current camera
763 /// orientation
764 pub fn right(&self) -> Vec3<f32> {
765 const UP: Vec3<f32> = Vec3::new(0.0, 0.0, 1.0);
766 self.forward().cross(UP).normalized()
767 }
768
769 /// Return a unit vector in the forward direction on the XY plane for
770 /// the current camera orientation
771 pub fn forward_xy(&self) -> Vec2<f32> { Vec2::new(f32::sin(self.ori.x), f32::cos(self.ori.x)) }
772
773 /// Return a unit vector in the right direction on the XY plane for
774 /// the current camera orientation
775 pub fn right_xy(&self) -> Vec2<f32> { Vec2::new(f32::cos(self.ori.x), -f32::sin(self.ori.x)) }
776
777 pub fn get_pos_with_focus(&self) -> Vec3<f32> {
778 let focus_off = self.get_focus_pos().map(f32::trunc);
779 self.dependents().cam_pos + focus_off
780 }
781}