1use atomicwrites::{AtomicFile, OverwriteBehavior};
2use common::{
3 terrain::{Block, TerrainChunk},
4 vol::{RectRasterableVol, WriteVol},
5};
6use hashbrown::HashMap;
7use schnellru::{Limiter, LruMap};
8use serde::{Deserialize, Serialize, de::DeserializeOwned};
9use std::{
10 any::{Any, type_name},
11 fs::File,
12 io::{self, Read as _, Write as _},
13 path::PathBuf,
14};
15use tracing::{debug, error, info, warn};
16use vek::*;
17
18const MAX_BLOCK_CACHE: usize = 64_000_000;
19
20pub struct TerrainPersistence {
21 path: PathBuf,
22 chunks: HashMap<Vec2<i32>, LoadedChunk>,
23 cached_chunks: LruMap<Vec2<i32>, Chunk, ByBlockLimiter>,
25}
26
27#[derive(Default)]
29pub struct LoadedChunk {
30 chunk: Chunk,
31 modified: bool,
32}
33
34impl TerrainPersistence {
35 pub fn new(mut data_dir: PathBuf) -> Self {
40 let path = std::env::var("VELOREN_TERRAIN")
41 .map(PathBuf::from)
42 .unwrap_or_else(|_| {
43 data_dir.push("terrain");
44 data_dir
45 });
46
47 std::fs::create_dir_all(&path).expect("Failed to create terrain persistence directory");
48
49 info!("Using {:?} as the terrain persistence path", path);
50
51 Self {
52 path,
53 chunks: HashMap::default(),
54 cached_chunks: LruMap::new(ByBlockLimiter::new(MAX_BLOCK_CACHE)),
55 }
56 }
57
58 pub fn apply_changes(&mut self, key: Vec2<i32>, terrain_chunk: &mut TerrainChunk) {
60 let loaded_chunk = self.load_chunk(key);
61
62 let mut resets = Vec::new();
63 for (rpos, new_block) in loaded_chunk.chunk.blocks() {
64 if let Err(e) = terrain_chunk.map(rpos, |block| {
65 if block == new_block {
66 resets.push(rpos);
67 }
68 new_block
69 }) {
70 warn!(
71 "Could not set block in chunk {:?} with position {:?} (out of bounds?): {:?}",
72 key, rpos, e
73 );
74 }
75 }
76
77 for rpos in resets {
79 loaded_chunk.chunk.reset_block(rpos);
80 loaded_chunk.modified = true;
81 }
82 }
83
84 pub fn maintain(&mut self) {
87 }
92
93 fn path_for(&self, key: Vec2<i32>) -> PathBuf {
94 let mut path = self.path.clone();
95 path.push(format!("chunk_{}_{}.dat", key.x, key.y));
96 path
97 }
98
99 fn load_chunk(&mut self, key: Vec2<i32>) -> &mut LoadedChunk {
100 let path = self.path_for(key);
101 self.chunks.entry(key).or_insert_with(|| {
102 if let Some(chunk) = self.cached_chunks.remove(&key) {
105 return LoadedChunk {
106 chunk,
107 modified: false,
108 };
109 }
110
111 File::open(&path)
112 .ok()
113 .map(|f| {
114 let bytes = match io::BufReader::new(f).bytes().collect::<Result<Vec<_>, _>>() {
115 Ok(bytes) => bytes,
116 Err(err) => {
117 error!(
118 "Failed to read data for chunk {:?} from file: {:?}",
119 key, err
120 );
121 return LoadedChunk::default();
122 },
123 };
124 let chunk = match Chunk::deserialize_from(io::Cursor::new(bytes)) {
125 Some(chunk) => chunk,
126 None => {
127 let mut backup_path = path.clone();
129 backup_path.set_extension("dat_backup_0");
130 let mut i = 1;
131 while backup_path.exists() {
132 backup_path.set_extension(format!("dat_backup_{}", i));
133 i += 1;
134 }
135
136 error!(
137 "Failed to load chunk {:?}, moving possibly corrupt (or too new) \
138 data to {:?} for you to repair.",
139 key, backup_path
140 );
141 if let Err(err) = std::fs::rename(path, backup_path) {
142 error!("Failed to rename invalid chunk file: {:?}", err);
143 }
144 Chunk::default()
145 },
146 };
147
148 LoadedChunk {
149 chunk,
150
151 modified: false,
152 }
153 })
154 .unwrap_or_default()
155 })
156 }
157
158 pub fn unload_chunk(&mut self, key: Vec2<i32>) {
159 if let Some(LoadedChunk { chunk, modified }) = self.chunks.remove(&key) {
160 if modified || self.cached_chunks.peek(&key).is_none() {
161 self.cached_chunks.insert(key, chunk.clone());
162 }
163
164 if !modified {
166 return;
167 }
168
169 if chunk.blocks.is_empty() {
170 let path = self.path_for(key);
171
172 if path.is_file() {
173 if let Err(error) = std::fs::remove_file(&path) {
174 error!(?error, ?path, "Failed to remove file for empty chunk");
175 }
176 }
177 } else {
178 let bytes = match bincode::serialize::<version::Current>(&chunk.prepare_raw()) {
179 Err(err) => {
180 error!("Failed to serialize chunk data: {:?}", err);
181 return;
182 },
183 Ok(bytes) => bytes,
184 };
185
186 let atomic_file =
187 AtomicFile::new(self.path_for(key), OverwriteBehavior::AllowOverwrite);
188 if let Err(err) = atomic_file.write(|file| file.write_all(&bytes)) {
189 error!("Failed to write chunk data to file: {:?}", err);
190 }
191 }
192 }
193 }
194
195 pub fn clear_chunk(&mut self, chunk: Vec2<i32>) {
196 self.cached_chunks.remove(&chunk);
197 self.chunks.insert(chunk, LoadedChunk {
198 chunk: Chunk::default(),
199 modified: true,
200 });
201 }
202
203 pub fn unload_all(&mut self) {
204 for key in self.chunks.keys().copied().collect::<Vec<_>>() {
205 self.unload_chunk(key);
206 }
207 }
208
209 pub fn set_block(&mut self, pos: Vec3<i32>, block: Block) {
210 let key = pos
211 .xy()
212 .map2(TerrainChunk::RECT_SIZE, |e, sz| e.div_euclid(sz as i32));
213 let loaded_chunk = self.load_chunk(key);
214 let old_block = loaded_chunk
215 .chunk
216 .blocks
217 .insert(pos - key * TerrainChunk::RECT_SIZE.map(|e| e as i32), block);
218 if old_block != Some(block) {
219 loaded_chunk.modified = true;
220 }
221 }
222}
223
224impl Drop for TerrainPersistence {
225 fn drop(&mut self) { self.unload_all(); }
226}
227
228#[derive(Default, Serialize, Deserialize, Clone)]
229pub struct Chunk {
230 blocks: HashMap<Vec3<i32>, Block>,
231}
232
233impl Chunk {
234 fn deserialize_from<R: io::Read + Clone>(reader: R) -> Option<Self> {
235 version::try_load(reader)
236 }
237
238 fn prepare_raw(self) -> version::Current { self.into() }
239
240 fn blocks(&self) -> impl Iterator<Item = (Vec3<i32>, Block)> + '_ {
241 self.blocks.iter().map(|(k, b)| (*k, *b))
242 }
243
244 fn reset_block(&mut self, rpos: Vec3<i32>) { self.blocks.remove(&rpos); }
245
246 fn len(&self) -> usize { self.blocks.len() }
248}
249
250struct ByBlockLimiter {
252 block_limit: usize,
254 counted_blocks: usize,
256}
257
258impl Limiter<Vec2<i32>, Chunk> for ByBlockLimiter {
259 type KeyToInsert<'a> = Vec2<i32>;
260 type LinkType = u32;
261
262 fn is_over_the_limit(&self, _length: usize) -> bool { self.counted_blocks > self.block_limit }
263
264 fn on_insert(
265 &mut self,
266 _length: usize,
267 key: Self::KeyToInsert<'_>,
268 chunk: Chunk,
269 ) -> Option<(Vec2<i32>, Chunk)> {
270 let chunk_size = chunk.len();
271
272 if self.counted_blocks + chunk_size > self.block_limit {
273 None
274 } else {
275 self.counted_blocks += chunk_size;
276 Some((key, chunk))
277 }
278 }
279
280 fn on_replace(
281 &mut self,
282 _length: usize,
283 _old_key: &mut Vec2<i32>,
284 _new_key: Self::KeyToInsert<'_>,
285 old_chunk: &mut Chunk,
286 new_chunk: &mut Chunk,
287 ) -> bool {
288 let old_size = old_chunk.len() as isize; let new_size = new_chunk.len() as isize;
290 let new_total = self
291 .counted_blocks
292 .saturating_add_signed(new_size - old_size);
293
294 if new_total > self.block_limit {
295 false
296 } else {
297 self.counted_blocks = new_total;
298 true
299 }
300 }
301
302 fn on_removed(&mut self, _key: &mut Vec2<i32>, chunk: &mut Chunk) {
303 self.counted_blocks = self.counted_blocks.saturating_sub(chunk.len());
304 }
305
306 fn on_cleared(&mut self) { self.counted_blocks = 0; }
307
308 fn on_grow(&mut self, _new_memory_usage: usize) -> bool { true }
309}
310
311impl ByBlockLimiter {
312 fn new(block_limit: usize) -> Self {
314 Self {
315 block_limit,
316 counted_blocks: 0,
317 }
318 }
319}
320
321mod version {
350 use super::*;
351
352 pub type Current = V3;
356
357 type LoadChunkFn<R> = fn(R) -> Result<Chunk, (&'static str, bincode::Error)>;
358 fn loaders<'a, R: io::Read + Clone>() -> &'a [LoadChunkFn<R>] {
359 &[load_raw::<V3, _>, load_raw::<V2, _>, load_raw::<V1, _>]
361 }
362
363 impl From<Chunk> for Current {
366 fn from(chunk: Chunk) -> Self {
367 Self {
368 version: version_magic(3),
369 blocks: chunk
370 .blocks
371 .into_iter()
372 .map(|(pos, b)| (pos.x as u8, pos.y as u8, pos.z as i16, b.to_u32()))
373 .collect(),
374 }
375 }
376 }
377
378 #[derive(Serialize, Deserialize)]
380 pub struct V3 {
381 #[serde(deserialize_with = "version::<_, 3>")]
382 pub version: u64,
383 pub blocks: Vec<(u8, u8, i16, u32)>,
384 }
385
386 impl From<V3> for Chunk {
387 fn from(v3: V3) -> Self {
388 Self {
389 blocks: v3
390 .blocks
391 .into_iter()
392 .map(|(x, y, z, b)| {
393 (
394 Vec3::new(x as i32, y as i32, z as i32),
395 Block::from_u32(b).unwrap_or_else(Block::empty),
396 )
397 })
398 .collect(),
399 }
400 }
401 }
402
403 #[derive(Deserialize)]
405 pub struct V2 {
406 #[serde(deserialize_with = "version::<_, 2>")]
407 pub version: u64,
408 pub blocks: Vec<(u8, u8, i16, Block)>,
409 }
410
411 impl From<V2> for Chunk {
412 fn from(v2: V2) -> Self {
413 Self {
414 blocks: v2
415 .blocks
416 .into_iter()
417 .map(|(x, y, z, b)| (Vec3::new(x as i32, y as i32, z as i32), b))
418 .collect(),
419 }
420 }
421 }
422
423 #[derive(Deserialize)]
425 pub struct V1 {
426 pub blocks: HashMap<Vec3<i32>, Block>,
427 }
428
429 impl From<V1> for Chunk {
430 fn from(v1: V1) -> Self { Self { blocks: v1.blocks } }
431 }
432
433 fn version_magic(n: u16) -> u64 { (n as u64) | (0x3352ACEEA789 << 16) }
436
437 fn version<'de, D: serde::Deserializer<'de>, const V: u16>(de: D) -> Result<u64, D::Error> {
438 u64::deserialize(de).and_then(|x| {
439 if x == version_magic(V) {
440 Ok(x)
441 } else {
442 Err(serde::de::Error::invalid_value(
443 serde::de::Unexpected::Unsigned(x),
444 &"incorrect magic/version bytes",
445 ))
446 }
447 })
448 }
449
450 fn load_raw<RawChunk: Any + Into<Chunk> + DeserializeOwned, R: io::Read + Clone>(
451 reader: R,
452 ) -> Result<Chunk, (&'static str, bincode::Error)> {
453 bincode::deserialize_from::<_, RawChunk>(reader)
454 .map(Into::into)
455 .map_err(|e| (type_name::<RawChunk>(), e))
456 }
457
458 pub fn try_load<R: io::Read + Clone>(reader: R) -> Option<Chunk> {
459 loaders()
460 .iter()
461 .find_map(|load_raw| match load_raw(reader.clone()) {
462 Ok(chunk) => Some(chunk),
463 Err((raw_name, e)) => {
464 debug!(
465 "Attempt to load chunk with raw format `{}` failed: {:?}",
466 raw_name, e
467 );
468 None
469 },
470 })
471 }
472}