veloren_world/sim/util.rs
1use bitvec::prelude::{BitBox, bitbox};
2use common::{
3 terrain::{MapSizeLg, TerrainChunkSize, neighbors, uniform_idx_as_vec2, vec2_as_uniform_idx},
4 vol::RectVolSize,
5};
6use common_base::prof_span;
7use noise::{
8 MultiFractal, NoiseFn, Seedable, core::worley::*, math::vectors::*,
9 permutationtable::PermutationTable,
10};
11use num::Float;
12use rayon::prelude::*;
13use std::sync::Arc;
14use vek::*;
15
16/// Calculates the smallest distance along an axis (x, y) from an edge of
17/// the world. This value is maximal at map_size_lg.chunks() / 2 and
18/// minimized at the
19/// extremes (0 or map_size_lg.chunks() on one or more axes). It then divides
20/// the quantity by cell_size, so the final result is 1 when we are not in a
21/// cell along the edge of the world, and ranges between 0 and 1 otherwise
22/// (lower when the chunk is closer to the edge).
23pub fn map_edge_factor(map_size_lg: MapSizeLg, posi: usize) -> f32 {
24 uniform_idx_as_vec2(map_size_lg, posi)
25 .map2(map_size_lg.chunks().map(i32::from), |e, sz| {
26 (sz / 2 - (e - sz / 2).abs()) as f32 / (16.0 / 1024.0 * sz as f32)
27 })
28 .reduce_partial_min()
29 .clamp(0.0, 1.0)
30}
31
32/// Computes the cumulative distribution function of the weighted sum of k
33/// independent, uniformly distributed random variables between 0 and 1. For
34/// each variable i, we use `weights[i]` as the weight to give `samples[i]` (the
35/// weights should all be positive).
36///
37/// If the precondition is met, the distribution of the result of calling this
38/// function will be uniformly distributed while preserving the same information
39/// that was in the original average.
40///
41/// For N > 33 the function will no longer return correct results since we will
42/// overflow u32.
43///
44/// NOTE:
45///
46/// Per [[1]], the problem of determining the CDF of
47/// the sum of uniformly distributed random variables over *different* ranges is
48/// considerably more complicated than it is for the same-range case.
49/// Fortunately, it also provides a reference to [2], which contains a complete
50/// derivation of an exact rule for the density function for this case. The CDF
51/// is just the integral of the cumulative distribution function [[3]],
52/// which we use to convert this into a CDF formula.
53///
54/// This allows us to sum weighted, uniform, independent random variables.
55///
56/// At some point, we should probably contribute this back to stats-rs.
57///
58/// 1. [https://www.r-bloggers.com/sums-of-random-variables/][1],
59/// 2. Sadooghi-Alvandi, S., A. Nematollahi, & R. Habibi, 2009. On the
60/// Distribution of the Sum of Independent Uniform Random Variables.
61/// Statistical Papers, 50, 171-175.
62/// 3. [https://en.wikipedia.org/wiki/Cumulative_distribution_function][3]
63///
64/// [1]: https://www.r-bloggers.com/sums-of-random-variables/
65/// [3]: https://en.wikipedia.org/wiki/Cumulative_distribution_function
66pub fn cdf_irwin_hall<const N: usize>(weights: &[f32; N], samples: [f32; N]) -> f32 {
67 // Let J_k = {(j_1, ... , j_k) : 1 ≤ j_1 < j_2 < ··· < j_k ≤ N }.
68 //
69 // Let A_N = Π{k = 1 to n}a_k.
70 //
71 // The density function for N ≥ 2 is:
72 //
73 // 1/(A_N * (N - 1)!) * (x^(N-1) + Σ{k = 1 to N}((-1)^k *
74 // Σ{(j_1, ..., j_k) ∈ J_k}(max(0, x - Σ{l = 1 to k}(a_(j_l)))^(N - 1))))
75 //
76 // So the cumulative distribution function is its integral, i.e. (I think)
77 //
78 // 1/(product{k in A}(k) * N!) * (x^N + sum(k in 1 to N)((-1)^k *
79 // sum{j in Subsets[A, {k}]}(max(0, x - sum{l in j}(l))^N)))
80 //
81 // which is also equivalent to
82 //
83 // (letting B_k = { a in Subsets[A, {k}] : sum {l in a} l }, B_(0,1) = 0 and
84 // H_k = { i : 1 ≤ 1 ≤ N! / (k! * (N - k)!) })
85 //
86 // 1/(product{k in A}(k) * N!) * sum(k in 0 to N)((-1)^k *
87 // sum{l in H_k}(max(0, x - B_(k,l))^N))
88 //
89 // We should be able to iterate through the whole power set
90 // instead, and figure out K by calling count_ones(), so we can compute the
91 // result in O(2^N) iterations.
92 let x: f64 = weights
93 .iter()
94 .zip(samples.iter())
95 .map(|(&weight, &sample)| weight as f64 * sample as f64)
96 .sum();
97
98 let mut y = 0.0f64;
99 for subset in 0u32..(1 << N) {
100 // Number of set elements
101 let k = subset.count_ones();
102 // Add together exactly the set elements to get B_subset
103 let z = weights
104 .iter()
105 .enumerate()
106 .filter(|(i, _)| subset & (1 << i) as u32 != 0)
107 .map(|(_, &k)| k as f64)
108 .sum::<f64>();
109 // Compute max(0, x - B_subset)^N
110 let z = (x - z).max(0.0).powi(N as i32);
111 // The parity of k determines whether the sum is negated.
112 y += if k & 1 == 0 { z } else { -z };
113 }
114
115 // Divide by the product of the weights.
116 y /= weights.iter().map(|&k| k as f64).product::<f64>();
117
118 // Remember to multiply by 1 / N! at the end.
119 (y / (1..(N as i32) + 1).product::<i32>() as f64) as f32
120}
121
122/// First component of each element of the vector is the computed CDF of the
123/// noise function at this index (i.e. its position in a sorted list of value
124/// returned by the noise function applied to every chunk in the game). Second
125/// component is the cached value of the noise function that generated the
126/// index.
127pub type InverseCdf<F = f32> = Box<[(f32, F)]>;
128
129/// NOTE: First component is estimated horizon angles at each chunk; second
130/// component is estimated heights of maximal occluder at each chunk (used
131/// for making shadows volumetric).
132pub type HorizonMap<A, H> = (Vec<A>, Vec<H>);
133
134/// Compute inverse cumulative distribution function for arbitrary function f,
135/// the hard way. We pre-generate noise values prior to worldgen, then sort
136/// them in order to determine the correct position in the sorted order. That
137/// lets us use `(index + 1) / (WORLDSIZE.y * WORLDSIZE.x)` as a uniformly
138/// distributed (from almost-0 to 1) regularization of the chunks. That is, if
139/// we apply the computed "function" F⁻¹(x, y) to (x, y) and get out p, it means
140/// that approximately (100 * p)% of chunks have a lower value for F⁻¹ than p.
141/// The main purpose of doing this is to make sure we are using the entire range
142/// we want, and to allow us to apply the numerous results about distributions
143/// on uniform functions to the procedural noise we generate, which lets us much
144/// more reliably control the *number* of features in the world while still
145/// letting us play with the *shape* of those features, without having arbitrary
146/// cutoff points / discontinuities (which tend to produce ugly-looking /
147/// unnatural terrain).
148///
149/// As a concrete example, before doing this it was very hard to tweak humidity
150/// so that either most of the world wasn't dry, or most of it wasn't wet, by
151/// combining the billow noise function and the computed altitude. This is
152/// because the billow noise function has a very unusual distribution that is
153/// heavily skewed towards 0. By correcting for this tendency, we can start
154/// with uniformly distributed billow noise and altitudes and combine them to
155/// get uniformly distributed humidity, while still preserving the existing
156/// shapes that the billow noise and altitude functions produce.
157///
158/// f takes an index, which represents the index corresponding to this chunk in
159/// any any SimChunk vector returned by uniform_noise, and (for convenience) the
160/// float-translated version of those coordinates.
161/// f should return a value with no NaNs. If there is a NaN, it will panic.
162/// There are no other conditions on f. If f returns None, the value will be
163/// set to NaN, and will be ignored for the purposes of computing the uniform
164/// range.
165///
166/// Returns a vec of (f32, f32) pairs consisting of the percentage of chunks
167/// with a value lower than this one, and the actual noise value (we don't need
168/// to cache it, but it makes ensuring that subsequent code that needs the noise
169/// value actually uses the same one we were using here easier). Also returns
170/// the "inverted index" pointing from a position to a noise.
171pub fn uniform_noise<F: Float + Send>(
172 map_size_lg: MapSizeLg,
173 f: impl Fn(usize, Vec2<f64>) -> Option<F> + Sync,
174) -> (InverseCdf<F>, Box<[(usize, F)]>) {
175 let mut noise = (0..map_size_lg.chunks_len())
176 .into_par_iter()
177 .filter_map(|i| {
178 f(
179 i,
180 (uniform_idx_as_vec2(map_size_lg, i)
181 * TerrainChunkSize::RECT_SIZE.map(|e| e as i32))
182 .map(|e| e as f64),
183 )
184 .map(|res| (i, res))
185 })
186 .collect::<Vec<_>>();
187
188 // sort_unstable_by is equivalent to sort_by here since we include a unique
189 // index in the comparison. We could leave out the index, but this might
190 // make the order not reproduce the same way between different versions of
191 // Rust (for example).
192 noise.par_sort_unstable_by(|f, g| (f.1, f.0).partial_cmp(&(g.1, g.0)).unwrap());
193
194 // Construct a vector that associates each chunk position with the 1-indexed
195 // position of the noise in the sorted vector (divided by the vector length).
196 // This guarantees a uniform distribution among the samples (excluding those
197 // that returned None, which will remain at zero).
198 let mut uniform_noise = vec![(0.0, F::nan()); map_size_lg.chunks_len()].into_boxed_slice();
199 // NOTE: Consider using try_into here and elsewhere in this function, since
200 // i32::MAX technically doesn't fit in an f32 (even if we should never reach
201 // that limit).
202 let total = noise.len() as f32;
203 for (noise_idx, &(chunk_idx, noise_val)) in noise.iter().enumerate() {
204 uniform_noise[chunk_idx] = ((1 + noise_idx) as f32 / total, noise_val);
205 }
206 (uniform_noise, noise.into_boxed_slice())
207}
208
209/// Iterate through all cells adjacent and including four chunks whose top-left
210/// point is posi. This isn't just the immediate neighbors of a chunk plus the
211/// center, because it is designed to cover neighbors of a point in the chunk's
212/// "interior."
213///
214/// This is what's used during cubic interpolation, for example, as it
215/// guarantees that for any point between the given chunk (on the top left) and
216/// its top-right/down-right/down neighbors, the twelve chunks surrounding this
217/// box (its "perimeter") are also inspected.
218pub fn local_cells(map_size_lg: MapSizeLg, posi: usize) -> impl Clone + Iterator<Item = usize> {
219 let pos = uniform_idx_as_vec2(map_size_lg, posi);
220 // NOTE: want to keep this such that the chunk index is in ascending order!
221 let grid_size = 3i32;
222 let grid_bounds = 2 * grid_size + 1;
223 (0..grid_bounds * grid_bounds)
224 .map(move |index| {
225 Vec2::new(
226 pos.x + (index % grid_bounds) - grid_size,
227 pos.y + (index / grid_bounds) - grid_size,
228 )
229 })
230 .filter(move |pos| {
231 pos.x >= 0
232 && pos.y >= 0
233 && pos.x < map_size_lg.chunks().x as i32
234 && pos.y < map_size_lg.chunks().y as i32
235 })
236 .map(move |e| vec2_as_uniform_idx(map_size_lg, e))
237}
238
239// Note that we should already have okay cache locality since we have a grid.
240pub fn uphill(
241 map_size_lg: MapSizeLg,
242 dh: &[isize],
243 posi: usize,
244) -> impl Clone + Iterator<Item = usize> + '_ {
245 neighbors(map_size_lg, posi).filter(move |&posj| dh[posj] == posi as isize)
246}
247
248/// Compute the neighbor "most downhill" from all chunks.
249///
250/// TODO: See if allocating in advance is worthwhile.
251pub fn downhill<F: Float>(
252 map_size_lg: MapSizeLg,
253 h: impl Fn(usize) -> F + Sync,
254 is_ocean: impl Fn(usize) -> bool + Sync,
255) -> Box<[isize]> {
256 // Constructs not only the list of downhill nodes, but also computes an ordering
257 // (visiting nodes in order from roots to leaves).
258 (0..map_size_lg.chunks_len())
259 .into_par_iter()
260 .map(|posi| {
261 let nh = h(posi);
262 if is_ocean(posi) {
263 -2
264 } else {
265 let mut best = -1;
266 let mut besth = nh;
267 for nposi in neighbors(map_size_lg, posi) {
268 let nbh = h(nposi);
269 if nbh < besth {
270 besth = nbh;
271 best = nposi as isize;
272 }
273 }
274 best
275 }
276 })
277 .collect::<Vec<_>>()
278 .into_boxed_slice()
279}
280
281/* /// Bilinear interpolation.
282///
283/// Linear interpolation in both directions (i.e. quadratic interpolation).
284fn get_interpolated_bilinear<T, F>(&self, pos: Vec2<i32>, mut f: F) -> Option<T>
285 where
286 T: Copy + Default + Signed + Float + Add<Output = T> + Mul<f32, Output = T>,
287 F: FnMut(Vec2<i32>) -> Option<T>,
288{
289 // (i) Find downhill for all four points.
290 // (ii) Compute distance from each downhill point and do linear interpolation on
291 // their heights. (iii) Compute distance between each neighboring point
292 // and do linear interpolation on their distance-interpolated
293 // heights.
294
295 // See http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1990A%26A...239..443S&defaultprint=YES&page_ind=0&filetype=.pdf
296 //
297 // Note that these are only guaranteed monotone in one dimension; fortunately,
298 // that is sufficient for our purposes.
299 let pos = pos.map2(TerrainChunkSize::RECT_SIZE, |e, sz: u32| {
300 e as f64 / sz as f64
301 });
302
303 // Orient the chunk in the direction of the most downhill point of the four. If
304 // there is no "most downhill" point, then we don't care.
305 let x0 = pos.map2(Vec2::new(0, 0), |e, q| e.max(0.0) as i32 + q);
306 let y0 = f(x0)?;
307
308 let x1 = pos.map2(Vec2::new(1, 0), |e, q| e.max(0.0) as i32 + q);
309 let y1 = f(x1)?;
310
311 let x2 = pos.map2(Vec2::new(0, 1), |e, q| e.max(0.0) as i32 + q);
312 let y2 = f(x2)?;
313
314 let x3 = pos.map2(Vec2::new(1, 1), |e, q| e.max(0.0) as i32 + q);
315 let y3 = f(x3)?;
316
317 let z0 = y0
318 .mul(1.0 - pos.x.fract() as f32)
319 .mul(1.0 - pos.y.fract() as f32);
320 let z1 = y1.mul(pos.x.fract() as f32).mul(1.0 - pos.y.fract() as f32);
321 let z2 = y2.mul(1.0 - pos.x.fract() as f32).mul(pos.y.fract() as f32);
322 let z3 = y3.mul(pos.x.fract() as f32).mul(pos.y.fract() as f32);
323
324 Some(z0 + z1 + z2 + z3)
325} */
326
327/// Find all ocean tiles from a height map, using an inductive definition of
328/// ocean as one of:
329/// - posi is at the side of the world (map_edge_factor(posi) == 0.0)
330/// - posi has a neighboring ocean tile, and has a height below sea level
331/// (oldh(posi) <= 0.0).
332pub fn get_oceans<F: Float>(map_size_lg: MapSizeLg, oldh: impl Fn(usize) -> F + Sync) -> BitBox {
333 // We can mark tiles as ocean candidates by scanning row by row, since the top
334 // edge is ocean, the sides are connected to it, and any subsequent ocean
335 // tiles must be connected to it.
336 let mut is_ocean = bitbox![0; map_size_lg.chunks_len()];
337 let mut stack = Vec::new();
338 let mut do_push = |pos| {
339 let posi = vec2_as_uniform_idx(map_size_lg, pos);
340 if oldh(posi) <= F::zero() {
341 stack.push(posi);
342 }
343 };
344 for x in 0..map_size_lg.chunks().x as i32 {
345 do_push(Vec2::new(x, 0));
346 do_push(Vec2::new(x, map_size_lg.chunks().y as i32 - 1));
347 }
348 for y in 1..map_size_lg.chunks().y as i32 - 1 {
349 do_push(Vec2::new(0, y));
350 do_push(Vec2::new(map_size_lg.chunks().x as i32 - 1, y));
351 }
352 while let Some(chunk_idx) = stack.pop() {
353 // println!("Ocean chunk {:?}: {:?}", uniform_idx_as_vec2(map_size_lg,
354 // chunk_idx), oldh(chunk_idx));
355 let mut is_ocean = is_ocean.get_mut(chunk_idx).unwrap();
356 if *is_ocean {
357 continue;
358 }
359 *is_ocean = true;
360 stack.extend(neighbors(map_size_lg, chunk_idx).filter(|&neighbor_idx| {
361 // println!("Ocean neighbor: {:?}: {:?}", uniform_idx_as_vec2(map_size_lg,
362 // neighbor_idx), oldh(neighbor_idx));
363 oldh(neighbor_idx) <= F::zero()
364 }));
365 }
366 is_ocean
367}
368
369/// Finds the horizon map for sunlight for the given chunks.
370#[expect(clippy::result_unit_err)]
371pub fn get_horizon_map<F: Float + Sync, A: Send, H: Send>(
372 map_size_lg: MapSizeLg,
373 bounds: Aabr<i32>,
374 minh: F,
375 maxh: F,
376 h: impl Fn(usize) -> F + Sync,
377 to_angle: impl Fn(F) -> A + Sync,
378 to_height: impl Fn(F) -> H + Sync,
379) -> Result<[HorizonMap<A, H>; 2], ()> {
380 prof_span!("get_horizon_map");
381 if maxh < minh {
382 // maxh must be greater than minh
383 return Err(());
384 }
385 let map_size = Vec2::<i32>::from(bounds.size()).map(|e| e as usize);
386 let map_len = map_size.product();
387
388 // Now, do the raymarching.
389 let chunk_x = if let Vec2 { x: Some(x), .. } = TerrainChunkSize::RECT_SIZE.map(F::from) {
390 x
391 } else {
392 return Err(());
393 };
394 // let epsilon = F::epsilon() * if let x = F::from(map_size.x) { x } else {
395 // return Err(()) };
396 let march = |dx: isize, maxdx: fn(isize, map_size_lg: MapSizeLg) -> isize| {
397 let mut angles = Vec::with_capacity(map_len);
398 let mut heights = Vec::with_capacity(map_len);
399 (0..map_len)
400 .into_par_iter()
401 .map(|posi| {
402 let wposi =
403 bounds.min + Vec2::new((posi % map_size.x) as i32, (posi / map_size.x) as i32);
404 if wposi.reduce_partial_min() < 0
405 || wposi.x as usize >= usize::from(map_size_lg.chunks().x)
406 || wposi.y as usize >= usize::from(map_size_lg.chunks().y)
407 {
408 return (to_angle(F::zero()), to_height(F::zero()));
409 }
410 let posi = vec2_as_uniform_idx(map_size_lg, wposi);
411 // March in the given direction.
412 let maxdx = maxdx(wposi.x as isize, map_size_lg);
413 let mut slope = F::zero();
414 let h0 = h(posi);
415 let h = if h0 < minh {
416 F::zero()
417 } else {
418 let mut max_height = F::zero();
419 let maxdz = maxh - h0;
420 let posi = posi as isize;
421 for deltax in 1..maxdx {
422 let posj = (posi + deltax * dx) as usize;
423 let deltax = chunk_x * F::from(deltax).unwrap();
424 let h_j_est = slope * deltax;
425 if h_j_est > maxdz {
426 break;
427 }
428 let h_j_act = h(posj) - h0;
429 if
430 /* h_j_est - h_j_act <= epsilon */
431 h_j_est <= h_j_act {
432 slope = h_j_act / deltax;
433 max_height = h_j_act;
434 }
435 }
436 h0 - minh + max_height
437 };
438 let a = slope;
439 (to_angle(a), to_height(h))
440 })
441 .unzip_into_vecs(&mut angles, &mut heights);
442 (angles, heights)
443 };
444 let west = march(-1, |x, _| x);
445 let east = march(1, |x, map_size_lg| {
446 (usize::from(map_size_lg.chunks().x) - x as usize) as isize
447 });
448 Ok([west, east])
449}
450
451fn build_sources<Source>(seed: u32, octaves: usize) -> Vec<Source>
452where
453 Source: Default + Seedable,
454{
455 let mut sources = Vec::with_capacity(octaves);
456 for x in 0..octaves {
457 let source = Source::default();
458 sources.push(source.set_seed(seed + x as u32));
459 }
460 sources
461}
462
463/// Noise function that outputs hybrid Multifractal noise.
464///
465/// The result of this multifractal noise is that valleys in the noise should
466/// have smooth bottoms at all altitudes.
467///
468/// Copied from noise crate to add offset.
469#[derive(Clone, Debug)]
470pub struct HybridMulti<T> {
471 /// Total number of frequency octaves to generate the noise with.
472 ///
473 /// The number of octaves control the _amount of detail_ in the noise
474 /// function. Adding more octaves increases the detail, with the drawback
475 /// of increasing the calculation time.
476 pub octaves: usize,
477
478 /// The number of cycles per unit length that the noise function outputs.
479 pub frequency: f64,
480
481 /// A multiplier that determines how quickly the frequency increases for
482 /// each successive octave in the noise function.
483 ///
484 /// The frequency of each successive octave is equal to the product of the
485 /// previous octave's frequency and the lacunarity value.
486 ///
487 /// A lacunarity of 2.0 results in the frequency doubling every octave. For
488 /// almost all cases, 2.0 is a good value to use.
489 pub lacunarity: f64,
490
491 /// A multiplier that determines how quickly the amplitudes diminish for
492 /// each successive octave in the noise function.
493 ///
494 /// The amplitude of each successive octave is equal to the product of the
495 /// previous octave's amplitude and the persistence value. Increasing the
496 /// persistence produces "rougher" noise.
497 ///
498 /// H = 1.0 - fractal increment = -ln(persistence) / ln(lacunarity). For
499 /// a fractal increment between 0 (inclusive) and 1 (exclusive), keep
500 /// persistence between 1 / lacunarity (inclusive, for low fractal
501 /// dimension) and 1 (exclusive, for high fractal dimension).
502 pub persistence: f64,
503
504 /// An offset that is added to the output of each sample of the underlying
505 /// Perlin noise function. Because each successive octave is weighted in
506 /// part by the previous signal's output, increasing the offset will weight
507 /// the output more heavily towards 1.0.
508 pub offset: f64,
509
510 seed: u32,
511 sources: Vec<T>,
512 //scale_factor: f64,
513}
514
515impl<T> HybridMulti<T>
516where
517 T: Default + Seedable,
518{
519 pub const DEFAULT_FREQUENCY: f64 = 2.0;
520 pub const DEFAULT_LACUNARITY: f64 = /* std::f64::consts::PI * 2.0 / 3.0 */ 2.0;
521 pub const DEFAULT_OCTAVES: usize = 6;
522 pub const DEFAULT_OFFSET: f64 = /* 0.25 *//* 0.5 */ 0.7;
523 // -ln(2^(-0.25))/ln(2) = 0.25
524 // 2^(-0.25) ~ 13/16
525 pub const DEFAULT_PERSISTENCE: f64 = /* 0.25 *//* 0.5 */ 13.0 / 16.0;
526 pub const DEFAULT_SEED: u32 = 0;
527 pub const MAX_OCTAVES: usize = 32;
528
529 pub fn new(seed: u32) -> Self {
530 Self {
531 seed,
532 octaves: Self::DEFAULT_OCTAVES,
533 frequency: Self::DEFAULT_FREQUENCY,
534 lacunarity: Self::DEFAULT_LACUNARITY,
535 persistence: Self::DEFAULT_PERSISTENCE,
536 offset: Self::DEFAULT_OFFSET,
537 sources: build_sources(seed, Self::DEFAULT_OCTAVES),
538 //scale_factor: Self::calc_scale_factor(Self::DEFAULT_PERSISTENCE,
539 // Self::DEFAULT_OCTAVES),
540 }
541 }
542
543 pub fn set_offset(self, offset: f64) -> Self { Self { offset, ..self } }
544
545 #[expect(dead_code)]
546 pub fn set_sources(self, sources: Vec<T>) -> Self { Self { sources, ..self } }
547 /*fn calc_scale_factor(persistence: f64, octaves: usize) -> f64 {
548 let mut result = persistence;
549
550 // Do octave 0
551 let mut amplitude = persistence;
552 let mut weight = result;
553 let mut signal = amplitude;
554 weight *= signal;
555
556 result += signal;
557
558 if octaves >= 1 {
559 result += (1..=octaves).fold(0.0, |acc, _| {
560 amplitude *= persistence;
561 weight = weight.max(1.0);
562 signal = amplitude;
563 weight *= signal;
564 acc + signal
565 });
566 }
567
568 2.0 / result
569 }*/
570}
571
572impl<T> Default for HybridMulti<T>
573where
574 T: Default + Seedable,
575{
576 fn default() -> Self { Self::new(Self::DEFAULT_SEED) }
577}
578
579impl<T> MultiFractal for HybridMulti<T>
580where
581 T: Default + Seedable,
582{
583 fn set_octaves(self, mut octaves: usize) -> Self {
584 if self.octaves == octaves {
585 return self;
586 }
587
588 octaves = octaves.clamp(1, Self::MAX_OCTAVES);
589 Self {
590 octaves,
591 sources: build_sources(self.seed, octaves),
592 //scale_factor: Self::calc_scale_factor(self.persistence, octaves),
593 ..self
594 }
595 }
596
597 fn set_frequency(self, frequency: f64) -> Self { Self { frequency, ..self } }
598
599 fn set_lacunarity(self, lacunarity: f64) -> Self { Self { lacunarity, ..self } }
600
601 fn set_persistence(self, persistence: f64) -> Self {
602 Self {
603 persistence,
604 //scale_factor: Self::calc_scale_factor(persistence, self.octaves),
605 ..self
606 }
607 }
608}
609
610impl<T> Seedable for HybridMulti<T>
611where
612 T: Default + Seedable,
613{
614 fn set_seed(self, seed: u32) -> Self {
615 if self.seed == seed {
616 return self;
617 }
618
619 Self {
620 seed,
621 sources: build_sources(seed, self.octaves),
622 ..self
623 }
624 }
625
626 fn seed(&self) -> u32 { self.seed }
627}
628
629/// 2-dimensional `HybridMulti` noise
630impl<T> NoiseFn<f64, 2> for HybridMulti<T>
631where
632 T: NoiseFn<f64, 2>,
633{
634 fn get(&self, point: [f64; 2]) -> f64 {
635 let mut point = Vector2::from(point);
636
637 let mut attenuation = self.persistence;
638
639 // First unscaled octave of function; later octaves are scaled.
640 point *= self.frequency;
641 // Offset and bias to scale into [offset - 1.0, 1.0 + offset] range.
642 let bias = 1.0;
643 let mut result =
644 (self.sources[0].get(point.into_array()) + self.offset) * bias * self.persistence;
645 let mut scale = self.persistence;
646 let mut weight = result;
647
648 // Spectral construction inner loop, where the fractal is built.
649 for x in 1..self.octaves {
650 // Prevent divergence.
651 weight = weight.min(1.0);
652
653 // Raise the spatial frequency.
654 point *= self.lacunarity;
655
656 // Get noise value, and scale it to the [offset - 1.0, 1.0 + offset] range.
657 let mut signal = (self.sources[x].get(point.into_array()) + self.offset) * bias;
658
659 // Scale the amplitude appropriately for this frequency.
660 signal *= attenuation;
661
662 scale += attenuation;
663
664 // Increase the attenuation for the next octave, to be equal to persistence ^ (x
665 // + 1)
666 attenuation *= self.persistence;
667
668 // Add it in, weighted by previous octave's noise value.
669 result += weight * signal;
670
671 // Update the weighting value.
672 weight *= signal;
673 }
674
675 // Scale the result to the [-1,1] range
676 //result * self.scale_factor
677 (result / scale) / bias - self.offset
678 }
679}
680
681/// 3-dimensional `HybridMulti` noise
682impl<T> NoiseFn<f64, 3> for HybridMulti<T>
683where
684 T: NoiseFn<f64, 3>,
685{
686 fn get(&self, point: [f64; 3]) -> f64 {
687 let mut point = Vector3::from(point);
688
689 let mut attenuation = self.persistence;
690
691 // First unscaled octave of function; later octaves are scaled.
692 point *= self.frequency;
693 // Offset and bias to scale into [offset - 1.0, 1.0 + offset] range.
694 let bias = 1.0;
695 let mut result =
696 (self.sources[0].get(point.into_array()) + self.offset) * bias * self.persistence;
697 let mut scale = self.persistence;
698 let mut weight = result;
699
700 // Spectral construction inner loop, where the fractal is built.
701 for x in 1..self.octaves {
702 // Prevent divergence.
703 weight = weight.min(1.0);
704
705 // Raise the spatial frequency.
706 point *= self.lacunarity;
707
708 // Get noise value, and scale it to the [0, 1.0] range.
709 let mut signal = (self.sources[x].get(point.into_array()) + self.offset) * bias;
710
711 // Scale the amplitude appropriately for this frequency.
712 signal *= attenuation;
713
714 scale += attenuation;
715
716 // Increase the attenuation for the next octave, to be equal to persistence ^ (x
717 // + 1)
718 attenuation *= self.persistence;
719
720 // Add it in, weighted by previous octave's noise value.
721 result += weight * signal;
722
723 // Update the weighting value.
724 weight *= signal;
725 }
726
727 // Scale the result to the [-1,1] range
728 //result * self.scale_factor
729 (result / scale) / bias - self.offset
730 }
731}
732
733/// 4-dimensional `HybridMulti` noise
734impl<T> NoiseFn<f64, 4> for HybridMulti<T>
735where
736 T: NoiseFn<f64, 4>,
737{
738 fn get(&self, point: [f64; 4]) -> f64 {
739 let mut point = Vector4::from(point);
740
741 let mut attenuation = self.persistence;
742
743 // First unscaled octave of function; later octaves are scaled.
744 point *= self.frequency;
745 // Offset and bias to scale into [offset - 1.0, 1.0 + offset] range.
746 let bias = 1.0;
747 let mut result =
748 (self.sources[0].get(point.into_array()) + self.offset) * bias * self.persistence;
749 let mut scale = self.persistence;
750 let mut weight = result;
751
752 // Spectral construction inner loop, where the fractal is built.
753 for x in 1..self.octaves {
754 // Prevent divergence.
755 weight = weight.min(1.0);
756
757 // Raise the spatial frequency.
758 point *= self.lacunarity;
759
760 // Get noise value, and scale it to the [0, 1.0] range.
761 let mut signal = (self.sources[x].get(point.into_array()) + self.offset) * bias;
762
763 // Scale the amplitude appropriately for this frequency.
764 signal *= attenuation;
765
766 scale += attenuation;
767
768 // Increase the attenuation for the next octave, to be equal to persistence ^ (x
769 // + 1)
770 attenuation *= self.persistence;
771
772 // Add it in, weighted by previous octave's noise value.
773 result += weight * signal;
774
775 // Update the weighting value.
776 weight *= signal;
777 }
778
779 // Scale the result to the [-1,1] range
780 //result * self.scale_factor
781 (result / scale) / bias - self.offset
782 }
783}
784
785/* code used by sharp in future – note: NoiseFn impl probably broken by noise crate upgrade from 0.7 to 0.9
786/// Noise function that applies a scaling factor and a bias to the output value
787/// from the source function.
788///
789/// The function retrieves the output value from the source function, multiplies
790/// it with the scaling factor, adds the bias to it, then outputs the value.
791pub struct ScaleBias<'a, F: 'a> {
792 /// Outputs a value.
793 pub source: &'a F,
794
795 /// Scaling factor to apply to the output value from the source function.
796 /// The default value is 1.0.
797 pub scale: f64,
798
799 /// Bias to apply to the scaled output value from the source function.
800 /// The default value is 0.0.
801 pub bias: f64,
802}
803
804impl<'a, F> ScaleBias<'a, F> {
805 pub fn new(source: &'a F) -> Self {
806 ScaleBias {
807 source,
808 scale: 1.0,
809 bias: 0.0,
810 }
811 }
812
813 pub fn set_scale(self, scale: f64) -> Self { ScaleBias { scale, ..self } }
814
815 pub fn set_bias(self, bias: f64) -> Self { ScaleBias { bias, ..self } }
816}
817
818impl<'a, F: NoiseFn<T> + 'a, T> NoiseFn<T> for ScaleBias<'a, F> {
819 #[cfg(not(target_os = "emscripten"))]
820 fn get(&self, point: T) -> f64 { (self.source.get(point)).mul_add(self.scale, self.bias) }
821
822 #[cfg(target_os = "emscripten")]
823 fn get(&self, point: T) -> f64 { (self.source.get(point) * self.scale) + self.bias }
824}
825 */
826
827/// Noise function that outputs Worley noise.
828///
829/// Copied from noise crate to make thread-safe.
830#[derive(Clone)]
831pub struct Worley {
832 /// Specifies the distance function to use when calculating the boundaries
833 /// of the cell.
834 pub distance_function: Arc<DistanceFunction>,
835
836 /// Signifies whether the distance from the borders of the cell should be
837 /// returned, or the value for the cell.
838 pub return_type: ReturnType,
839
840 /// Frequency of the seed points.
841 pub frequency: f64,
842
843 seed: u32,
844 perm_table: PermutationTable,
845}
846
847type DistanceFunction = dyn Fn(&[f64], &[f64]) -> f64 + Sync + Send;
848
849impl Worley {
850 //pub const DEFAULT_SEED: u32 = 0;
851 pub const DEFAULT_FREQUENCY: f64 = 1.0;
852
853 pub fn new(seed: u32) -> Self {
854 Self {
855 perm_table: PermutationTable::new(seed),
856 seed,
857 distance_function: Arc::new(distance_functions::euclidean),
858 return_type: ReturnType::Value,
859 frequency: Self::DEFAULT_FREQUENCY,
860 }
861 }
862
863 /// Sets the distance function used by the Worley cells.
864 pub fn set_distance_function<F>(self, function: F) -> Self
865 where
866 F: Fn(&[f64], &[f64]) -> f64 + 'static + Sync + Send,
867 {
868 Self {
869 distance_function: Arc::new(function),
870 ..self
871 }
872 }
873
874 /// Enables or disables applying the distance from the nearest seed point
875 /// to the output value.
876 #[expect(dead_code)]
877 pub fn set_return_type(self, return_type: ReturnType) -> Self {
878 Self {
879 return_type,
880 ..self
881 }
882 }
883
884 /// Sets the frequency of the seed points.
885 pub fn set_frequency(self, frequency: f64) -> Self { Self { frequency, ..self } }
886}
887
888impl Default for Worley {
889 fn default() -> Self { Self::new(0) }
890}
891
892impl Seedable for Worley {
893 /// Sets the seed value used by the Worley cells.
894 fn set_seed(self, seed: u32) -> Self {
895 // If the new seed is the same as the current seed, just return self.
896 if self.seed == seed {
897 return self;
898 }
899
900 // Otherwise, regenerate the permutation table based on the new seed.
901 Self {
902 perm_table: PermutationTable::new(seed),
903 seed,
904 ..self
905 }
906 }
907
908 fn seed(&self) -> u32 { self.seed }
909}
910
911impl NoiseFn<f64, 2> for Worley {
912 fn get(&self, point: [f64; 2]) -> f64 {
913 worley_2d(
914 &self.perm_table,
915 &*self.distance_function,
916 self.return_type,
917 Vector2::from(point) * self.frequency,
918 )
919 }
920}
921
922impl NoiseFn<f64, 3> for Worley {
923 fn get(&self, point: [f64; 3]) -> f64 {
924 worley_3d(
925 &self.perm_table,
926 &*self.distance_function,
927 self.return_type,
928 Vector3::from(point) * self.frequency,
929 )
930 }
931}
932
933impl NoiseFn<f64, 4> for Worley {
934 fn get(&self, point: [f64; 4]) -> f64 {
935 worley_4d(
936 &self.perm_table,
937 &*self.distance_function,
938 self.return_type,
939 Vector4::from(point) * self.frequency,
940 )
941 }
942}