veloren_server/
terrain_persistence.rs

1use atomicwrites::{AtomicFile, OverwriteBehavior};
2use bincode::{
3    config::legacy,
4    error::DecodeError,
5    serde::{decode_from_std_read, encode_to_vec},
6};
7use common::{
8    terrain::{Block, TerrainChunk},
9    vol::{RectRasterableVol, WriteVol},
10};
11use hashbrown::HashMap;
12use schnellru::{Limiter, LruMap};
13use serde::{Deserialize, Serialize, de::DeserializeOwned};
14use std::{
15    any::{Any, type_name},
16    fs::File,
17    io::{self, Read as _, Write as _},
18    path::PathBuf,
19};
20use tracing::{debug, error, info, warn};
21use vek::*;
22
23const MAX_BLOCK_CACHE: usize = 64_000_000;
24
25pub struct TerrainPersistence {
26    path: PathBuf,
27    chunks: HashMap<Vec2<i32>, LoadedChunk>,
28    /// A cache of recently unloaded chunks
29    cached_chunks: LruMap<Vec2<i32>, Chunk, ByBlockLimiter>,
30}
31
32/// Wrapper over a [`Chunk`] that keeps track of modifications
33#[derive(Default)]
34pub struct LoadedChunk {
35    chunk: Chunk,
36    modified: bool,
37}
38
39impl TerrainPersistence {
40    /// Create a new terrain persistence system using the given data directory.
41    ///
42    /// If the `VELOREN_TERRAIN` environment variable is set, this will be used
43    /// as the persistence directory instead.
44    pub fn new(mut data_dir: PathBuf) -> Self {
45        let path = std::env::var("VELOREN_TERRAIN")
46            .map(PathBuf::from)
47            .unwrap_or_else(|_| {
48                data_dir.push("terrain");
49                data_dir
50            });
51
52        std::fs::create_dir_all(&path).expect("Failed to create terrain persistence directory");
53
54        info!("Using {:?} as the terrain persistence path", path);
55
56        Self {
57            path,
58            chunks: HashMap::default(),
59            cached_chunks: LruMap::new(ByBlockLimiter::new(MAX_BLOCK_CACHE)),
60        }
61    }
62
63    /// Apply persistence changes to a newly generated chunk.
64    pub fn apply_changes(&mut self, key: Vec2<i32>, terrain_chunk: &mut TerrainChunk) {
65        let loaded_chunk = self.load_chunk(key);
66
67        let mut resets = Vec::new();
68        for (rpos, new_block) in loaded_chunk.chunk.blocks() {
69            if let Err(e) = terrain_chunk.map(rpos, |block| {
70                if block == new_block {
71                    resets.push(rpos);
72                }
73                new_block
74            }) {
75                warn!(
76                    "Could not set block in chunk {:?} with position {:?} (out of bounds?): {:?}",
77                    key, rpos, e
78                );
79            }
80        }
81
82        // Reset any unchanged blocks (this is an optimisation only)
83        for rpos in resets {
84            loaded_chunk.chunk.reset_block(rpos);
85            loaded_chunk.modified = true;
86        }
87    }
88
89    /// Maintain terrain persistence (writing changes changes back to
90    /// filesystem, etc.)
91    pub fn maintain(&mut self) {
92        // Currently, this does nothing because filesystem writeback occurs on
93        // chunk unload However, this is not a particularly reliable
94        // mechanism (it doesn't survive power loss, say). Later, a more
95        // reliable strategy should be implemented here.
96    }
97
98    fn path_for(&self, key: Vec2<i32>) -> PathBuf {
99        let mut path = self.path.clone();
100        path.push(format!("chunk_{}_{}.dat", key.x, key.y));
101        path
102    }
103
104    fn load_chunk(&mut self, key: Vec2<i32>) -> &mut LoadedChunk {
105        let path = self.path_for(key);
106        self.chunks.entry(key).or_insert_with(|| {
107            // If the chunk has been recently unloaded and is still cached, dont read it
108            // from disk
109            if let Some(chunk) = self.cached_chunks.remove(&key) {
110                return LoadedChunk {
111                    chunk,
112                    modified: false,
113                };
114            }
115
116            File::open(&path)
117                .ok()
118                .map(|f| {
119                    let bytes = match io::BufReader::new(f).bytes().collect::<Result<Vec<_>, _>>() {
120                        Ok(bytes) => bytes,
121                        Err(err) => {
122                            error!(
123                                "Failed to read data for chunk {:?} from file: {:?}",
124                                key, err
125                            );
126                            return LoadedChunk::default();
127                        },
128                    };
129                    let chunk = match Chunk::deserialize_from(io::Cursor::new(bytes)) {
130                        Some(chunk) => chunk,
131                        None => {
132                            // Find an untaken name for a backup
133                            let mut backup_path = path.clone();
134                            backup_path.set_extension("dat_backup_0");
135                            let mut i = 1;
136                            while backup_path.exists() {
137                                backup_path.set_extension(format!("dat_backup_{}", i));
138                                i += 1;
139                            }
140
141                            error!(
142                                "Failed to load chunk {:?}, moving possibly corrupt (or too new) \
143                                 data to {:?} for you to repair.",
144                                key, backup_path
145                            );
146                            if let Err(err) = std::fs::rename(path, backup_path) {
147                                error!("Failed to rename invalid chunk file: {:?}", err);
148                            }
149                            Chunk::default()
150                        },
151                    };
152
153                    LoadedChunk {
154                        chunk,
155
156                        modified: false,
157                    }
158                })
159                .unwrap_or_default()
160        })
161    }
162
163    pub fn unload_chunk(&mut self, key: Vec2<i32>) {
164        if let Some(LoadedChunk { chunk, modified }) = self.chunks.remove(&key) {
165            if modified || self.cached_chunks.peek(&key).is_none() {
166                self.cached_chunks.insert(key, chunk.clone());
167            }
168
169            // Prevent any uneccesarry IO when nothing in this chunk has changed
170            if !modified {
171                return;
172            }
173
174            if chunk.blocks.is_empty() {
175                let path = self.path_for(key);
176
177                if path.is_file()
178                    && let Err(error) = std::fs::remove_file(&path)
179                {
180                    error!(?error, ?path, "Failed to remove file for empty chunk");
181                }
182            } else {
183                let bytes =
184                    match encode_to_vec::<version::Current, _>(chunk.prepare_raw(), legacy()) {
185                        Err(err) => {
186                            error!("Failed to serialize chunk data: {:?}", err);
187                            return;
188                        },
189                        Ok(bytes) => bytes,
190                    };
191
192                let atomic_file =
193                    AtomicFile::new(self.path_for(key), OverwriteBehavior::AllowOverwrite);
194                if let Err(err) = atomic_file.write(|file| file.write_all(&bytes)) {
195                    error!("Failed to write chunk data to file: {:?}", err);
196                }
197            }
198        }
199    }
200
201    pub fn clear_chunk(&mut self, chunk: Vec2<i32>) {
202        self.cached_chunks.remove(&chunk);
203        self.chunks.insert(chunk, LoadedChunk {
204            chunk: Chunk::default(),
205            modified: true,
206        });
207    }
208
209    pub fn unload_all(&mut self) {
210        for key in self.chunks.keys().copied().collect::<Vec<_>>() {
211            self.unload_chunk(key);
212        }
213    }
214
215    pub fn set_block(&mut self, pos: Vec3<i32>, block: Block) {
216        let key = pos
217            .xy()
218            .map2(TerrainChunk::RECT_SIZE, |e, sz| e.div_euclid(sz as i32));
219        let loaded_chunk = self.load_chunk(key);
220        let old_block = loaded_chunk
221            .chunk
222            .blocks
223            .insert(pos - key * TerrainChunk::RECT_SIZE.map(|e| e as i32), block);
224        if old_block != Some(block) {
225            loaded_chunk.modified = true;
226        }
227    }
228}
229
230impl Drop for TerrainPersistence {
231    fn drop(&mut self) { self.unload_all(); }
232}
233
234#[derive(Default, Serialize, Deserialize, Clone)]
235pub struct Chunk {
236    blocks: HashMap<Vec3<i32>, Block>,
237}
238
239impl Chunk {
240    fn deserialize_from<R: io::Read + Clone>(reader: R) -> Option<Self> {
241        version::try_load(reader)
242    }
243
244    fn prepare_raw(self) -> version::Current { self.into() }
245
246    fn blocks(&self) -> impl Iterator<Item = (Vec3<i32>, Block)> + '_ {
247        self.blocks.iter().map(|(k, b)| (*k, *b))
248    }
249
250    fn reset_block(&mut self, rpos: Vec3<i32>) { self.blocks.remove(&rpos); }
251
252    /// Get the number of blocks this chunk contains
253    fn len(&self) -> usize { self.blocks.len() }
254}
255
256/// LRU limiter that limits by the number of blocks
257struct ByBlockLimiter {
258    /// Maximum number of blocks that can be contained
259    block_limit: usize,
260    /// Total number of blocks that are currently contained in the LRU
261    counted_blocks: usize,
262}
263
264impl Limiter<Vec2<i32>, Chunk> for ByBlockLimiter {
265    type KeyToInsert<'a> = Vec2<i32>;
266    type LinkType = u32;
267
268    fn is_over_the_limit(&self, _length: usize) -> bool { self.counted_blocks > self.block_limit }
269
270    fn on_insert(
271        &mut self,
272        _length: usize,
273        key: Self::KeyToInsert<'_>,
274        chunk: Chunk,
275    ) -> Option<(Vec2<i32>, Chunk)> {
276        let chunk_size = chunk.len();
277
278        if self.counted_blocks + chunk_size > self.block_limit {
279            None
280        } else {
281            self.counted_blocks += chunk_size;
282            Some((key, chunk))
283        }
284    }
285
286    fn on_replace(
287        &mut self,
288        _length: usize,
289        _old_key: &mut Vec2<i32>,
290        _new_key: Self::KeyToInsert<'_>,
291        old_chunk: &mut Chunk,
292        new_chunk: &mut Chunk,
293    ) -> bool {
294        let old_size = old_chunk.len() as isize; // I assume chunks are never larger than a few thousand blocks anyways, cast should be OK
295        let new_size = new_chunk.len() as isize;
296        let new_total = self
297            .counted_blocks
298            .saturating_add_signed(new_size - old_size);
299
300        if new_total > self.block_limit {
301            false
302        } else {
303            self.counted_blocks = new_total;
304            true
305        }
306    }
307
308    fn on_removed(&mut self, _key: &mut Vec2<i32>, chunk: &mut Chunk) {
309        self.counted_blocks = self.counted_blocks.saturating_sub(chunk.len());
310    }
311
312    fn on_cleared(&mut self) { self.counted_blocks = 0; }
313
314    fn on_grow(&mut self, _new_memory_usage: usize) -> bool { true }
315}
316
317impl ByBlockLimiter {
318    /// Creates a new by-block limit
319    fn new(block_limit: usize) -> Self {
320        Self {
321            block_limit,
322            counted_blocks: 0,
323        }
324    }
325}
326
327/// # Adding a new chunk format version
328///
329/// Chunk formats are designed to be backwards-compatible when loading, but are
330/// not required to be backwards-compatible when saving (i.e: we must always be
331/// able to load old formats, but we're not required to save old formats because
332/// newer formats might contain richer information that is incompatible with an
333/// older format).
334///
335/// The steps for doing this are as follows:
336///
337/// 1. Create a new 'raw format' type that implements [`Serialize`] and
338///    `Deserialize`]. Make sure to add a version field. If in doubt, copy the
339///    last raw format and increment the version number wherever it appears.
340///    Don't forget to increment the version number in the
341///    `serde(deserialize_with = ...}` attribute! Conventionally, these types
342///    are named `V{N}` where `{N}` is the number succeeding the previous raw
343///    format type.
344///
345/// 2. Add an implementation of `From<{YourRawFormat}>` for `Chunk`. As before,
346///    see previous versions if in doubt.
347///
348/// 3. Change the type of [`version::Current`] to your new raw format type.
349///
350/// 4. Add an entry for your raw format at the top of the array in
351///    [`version::loaders`].
352///
353/// 5. Remove the `Serialize` implementation from the previous raw format type:
354///    we don't need it any longer!
355mod version {
356    use super::*;
357
358    /// The newest supported raw format type. This should be changed every time
359    /// a new raw format is added.
360    // Step [3]
361    pub type Current = V3;
362
363    type LoadChunkFn<R> = fn(R) -> Result<Chunk, (&'static str, Box<DecodeError>)>;
364    fn loaders<'a, R: io::Read + Clone>() -> &'a [LoadChunkFn<R>] {
365        // Step [4]
366        &[load_raw::<V3, _>, load_raw::<V2, _>, load_raw::<V1, _>]
367    }
368
369    // Convert back to current
370
371    impl From<Chunk> for Current {
372        fn from(chunk: Chunk) -> Self {
373            Self {
374                version: version_magic(3),
375                blocks: chunk
376                    .blocks
377                    .into_iter()
378                    .map(|(pos, b)| (pos.x as u8, pos.y as u8, pos.z as i16, b.to_u32()))
379                    .collect(),
380            }
381        }
382    }
383
384    /// Version 3 of the raw chunk format.
385    #[derive(Serialize, Deserialize)]
386    pub struct V3 {
387        #[serde(deserialize_with = "version::<_, 3>")]
388        pub version: u64,
389        pub blocks: Vec<(u8, u8, i16, u32)>,
390    }
391
392    impl From<V3> for Chunk {
393        fn from(v3: V3) -> Self {
394            Self {
395                blocks: v3
396                    .blocks
397                    .into_iter()
398                    .map(|(x, y, z, b)| {
399                        (
400                            Vec3::new(x as i32, y as i32, z as i32),
401                            Block::from_u32(b).unwrap_or_else(Block::empty),
402                        )
403                    })
404                    .collect(),
405            }
406        }
407    }
408
409    /// Version 2 of the raw chunk format.
410    #[derive(Deserialize)]
411    pub struct V2 {
412        #[serde(deserialize_with = "version::<_, 2>")]
413        pub version: u64,
414        pub blocks: Vec<(u8, u8, i16, Block)>,
415    }
416
417    impl From<V2> for Chunk {
418        fn from(v2: V2) -> Self {
419            Self {
420                blocks: v2
421                    .blocks
422                    .into_iter()
423                    .map(|(x, y, z, b)| (Vec3::new(x as i32, y as i32, z as i32), b))
424                    .collect(),
425            }
426        }
427    }
428
429    /// Version 1 of the raw chunk format.
430    #[derive(Deserialize)]
431    pub struct V1 {
432        pub blocks: HashMap<Vec3<i32>, Block>,
433    }
434
435    impl From<V1> for Chunk {
436        fn from(v1: V1) -> Self { Self { blocks: v1.blocks } }
437    }
438
439    // Utility things
440
441    fn version_magic(n: u16) -> u64 { (n as u64) | (0x3352ACEEA789 << 16) }
442
443    fn version<'de, D: serde::Deserializer<'de>, const V: u16>(de: D) -> Result<u64, D::Error> {
444        u64::deserialize(de).and_then(|x| {
445            if x == version_magic(V) {
446                Ok(x)
447            } else {
448                Err(serde::de::Error::invalid_value(
449                    serde::de::Unexpected::Unsigned(x),
450                    &"incorrect magic/version bytes",
451                ))
452            }
453        })
454    }
455
456    fn load_raw<RawChunk: Any + Into<Chunk> + DeserializeOwned, R: io::Read + Clone>(
457        mut reader: R,
458    ) -> Result<Chunk, (&'static str, Box<DecodeError>)> {
459        decode_from_std_read::<RawChunk, _, _>(&mut reader, legacy())
460            .map(Into::into)
461            .map_err(|e| (type_name::<RawChunk>(), Box::new(e)))
462    }
463
464    pub fn try_load<R: io::Read + Clone>(reader: R) -> Option<Chunk> {
465        loaders()
466            .iter()
467            .find_map(|load_raw| match load_raw(reader.clone()) {
468                Ok(chunk) => Some(chunk),
469                Err((raw_name, e)) => {
470                    debug!(
471                        "Attempt to load chunk with raw format `{}` failed: {:?}",
472                        raw_name, e
473                    );
474                    None
475                },
476            })
477    }
478}