veloren_server/sys/msg/register.rs
1use crate::{
2 EditableSettings, Settings,
3 client::Client,
4 login_provider::{LoginProvider, PendingLogin},
5 metrics::PlayerMetrics,
6 settings::{BanOperation, banlist::NormalizedIpAddr},
7 sys::sentinel::TrackedStorages,
8};
9use authc::Uuid;
10use common::{
11 comp::{self, Admin, Player, Stats},
12 event::{ClientDisconnectEvent, EventBus, MakeAdminEvent},
13 recipe::{default_component_recipe_book, default_repair_recipe_book},
14 resources::TimeOfDay,
15 shared_server_config::ServerConstants,
16 uid::Uid,
17};
18use common_base::prof_span;
19use common_ecs::{Job, Origin, Phase, System};
20use common_net::msg::{
21 CharacterInfo, ClientRegister, DisconnectReason, PlayerInfo, PlayerListUpdate, RegisterError,
22 ServerGeneral, ServerInit, WorldMapMsg, server::ServerDescription,
23};
24use hashbrown::{HashMap, hash_map};
25use itertools::Either;
26use rayon::prelude::*;
27use specs::{
28 Entities, Join, LendJoin, ParJoin, Read, ReadExpect, ReadStorage, SystemData, WriteExpect,
29 WriteStorage, shred,
30};
31use tracing::{debug, info, trace, warn};
32
33#[cfg(feature = "plugins")]
34use common_state::plugin::PluginMgr;
35
36#[derive(SystemData)]
37pub struct ReadData<'a> {
38 entities: Entities<'a>,
39 stats: ReadStorage<'a, Stats>,
40 uids: ReadStorage<'a, Uid>,
41 client_disconnect_events: Read<'a, EventBus<ClientDisconnectEvent>>,
42 make_admin_events: Read<'a, EventBus<MakeAdminEvent>>,
43 login_provider: ReadExpect<'a, LoginProvider>,
44 player_metrics: ReadExpect<'a, PlayerMetrics>,
45 settings: ReadExpect<'a, Settings>,
46 time_of_day: Read<'a, TimeOfDay>,
47 material_stats: ReadExpect<'a, comp::item::MaterialStatManifest>,
48 ability_map: ReadExpect<'a, comp::item::tool::AbilityMap>,
49 recipe_book: ReadExpect<'a, common::recipe::RecipeBookManifest>,
50 map: ReadExpect<'a, WorldMapMsg>,
51 trackers: TrackedStorages<'a>,
52 #[cfg(feature = "plugins")]
53 plugin_mgr: Read<'a, PluginMgr>,
54 data_dir: ReadExpect<'a, crate::DataDir>,
55}
56
57/// This system will handle new messages from clients
58#[derive(Default)]
59pub struct Sys;
60impl<'a> System<'a> for Sys {
61 type SystemData = (
62 ReadData<'a>,
63 WriteStorage<'a, Client>,
64 WriteStorage<'a, Player>,
65 WriteStorage<'a, PendingLogin>,
66 WriteExpect<'a, EditableSettings>,
67 );
68
69 const NAME: &'static str = "msg::register";
70 const ORIGIN: Origin = Origin::Server;
71 const PHASE: Phase = Phase::Create;
72
73 fn run(
74 _job: &mut Job<Self>,
75 (read_data, mut clients, mut players, mut pending_logins, mut editable_settings): Self::SystemData,
76 ) {
77 let mut make_admin_emitter = read_data.make_admin_events.emitter();
78 // Player list to send new players, and lookup from UUID to entity (so we don't
79 // have to do a linear scan over all entities on each login to see if
80 // it's a duplicate).
81 //
82 // NOTE: For this to work as desired, we must maintain the invariant that there
83 // is just one player per UUID!
84 let (player_list, old_players_by_uuid): (HashMap<_, _>, HashMap<_, _>) = (
85 &read_data.entities,
86 &read_data.uids,
87 clients.maybe(),
88 &players,
89 read_data.stats.maybe(),
90 read_data.trackers.admin.maybe(),
91 )
92 .join()
93 .filter(|(_, _, client, _, _, _)| {
94 client.is_none_or(|client| client.client_type.emit_login_events())
95 })
96 .map(|(entity, uid, _, player, stats, admin)| {
97 (
98 (*uid, PlayerInfo {
99 is_online: true,
100 is_moderator: admin.is_some(),
101 player_alias: player.alias.clone(),
102 character: stats.map(|stats| CharacterInfo {
103 name: stats.name.clone(),
104 // NOTE: hack, read docs for body::Gender for more
105 gender: stats.original_body.humanoid_gender(),
106 battle_mode: player.battle_mode,
107 }),
108 uuid: player.uuid(),
109 }),
110 (player.uuid(), entity),
111 )
112 })
113 .unzip();
114 let max_players = usize::from(read_data.settings.max_players);
115 // NOTE: max_players starts as a u16, so this will not use unlimited memory even
116 // if people set absurdly high values (though we should also cap the value
117 // elsewhere).
118 let capacity = max_players * 2;
119 // List of new players to update player lists of all clients.
120 //
121 // Big enough that we hopefully won't have to reallocate.
122 //
123 // Also includes a list of logins to retry and finished_pending, since we
124 // happen to update those around the same time that we update the new
125 // players list.
126 //
127 // NOTE: stdlib mutex is more than good enough on Linux and (probably) Windows,
128 // but not Mac.
129 let new_players = parking_lot::Mutex::new((
130 HashMap::<_, (_, _, _, _)>::with_capacity(capacity),
131 Vec::with_capacity(capacity),
132 Vec::with_capacity(capacity),
133 ));
134
135 // defer auth lockup
136 for (entity, client) in (&read_data.entities, &mut clients).join() {
137 let mut locale = None;
138
139 let _ = super::try_recv_all(client, 0, |_, msg: ClientRegister| {
140 trace!(?msg.token_or_username, "defer auth lockup");
141 let pending = read_data.login_provider.verify(&msg.token_or_username);
142 locale = msg.locale;
143 let _ = pending_logins.insert(entity, pending);
144 Ok(())
145 });
146
147 // Update locale
148 if let Some(locale) = locale {
149 client.locale = Some(locale);
150 }
151 }
152
153 let old_player_count = player_list.len();
154
155 // NOTE: this is just default value.
156 //
157 // It will be overwritten in ServerExt::update_character_data.
158 let battle_mode = read_data.settings.gameplay.battle_mode.default_mode();
159 let mut upgradeable_bans: EventBus<(NormalizedIpAddr, Uuid, String)> = EventBus::default();
160
161 (
162 &read_data.entities,
163 &read_data.uids,
164 &clients,
165 !players.mask(),
166 &mut pending_logins,
167 )
168 .join()
169 // NOTE: Required because Specs has very poor work splitting for sparse joins.
170 .par_bridge()
171 .for_each_init(
172 || (read_data.client_disconnect_events.emitter(), upgradeable_bans.emitter()),
173 |(client_disconnect_emitter, upgradeable_ban_emitter), (entity, uid, client, _, pending)| {
174 prof_span!("msg::register login");
175 if let Err(e) = || -> Result<(), crate::error::Error> {
176 let extra_checks = |username: String, uuid: authc::Uuid| {
177 // We construct a few things outside the lock to reduce contention.
178 let pending_login = PendingLogin::new_success(username.clone(), uuid);
179 let player = Player::new(username, battle_mode, uuid, None);
180 let admin = editable_settings.admins.get(&uuid);
181 let player_list_update_msg = player
182 .is_valid()
183 .then_some(PlayerInfo {
184 player_alias: player.alias.clone(),
185 is_online: true,
186 is_moderator: admin.is_some(),
187 character: None, // new players will be on character select.
188 uuid: player.uuid(),
189 })
190 .map(|player_info| {
191 // Prepare the player list update to be sent to all clients.
192 client.prepare(ServerGeneral::PlayerListUpdate(
193 PlayerListUpdate::Add(*uid, player_info),
194 ))
195 });
196 // Check if this player was already logged in before the system
197 // started.
198 let old_player = old_players_by_uuid
199 .get(&uuid)
200 .copied()
201 // We only need the old client to report an error; however, we
202 // can't assume the old player has a client (even though it would
203 // be a bit strange for them not to), so we have to remember that
204 // case. So we grab the old client (outside the lock, to avoid
205 // contention). We have to distinguish this from the case of a
206 // *new* player already having logged in (which we can't check
207 // until the lock is taken); in that case, we *know* the client
208 // is present, since the list is only populated by the current
209 // join (which includes the client).
210 .map(|entity| (entity, Some(clients.get(entity))));
211 // We take the lock only when necessary, and for a short duration,
212 // to avoid contention with other threads. We need to hold the
213 // guard past the end of the login function because otherwise
214 // there's a race between when we read it and when we (potentially)
215 // write to it.
216 let guard = new_players.lock();
217 // Guard comes first in the tuple so it's dropped before the other
218 // stuff if login returns an error.
219 (
220 old_player_count + guard.0.len() >= max_players,
221 (
222 guard,
223 (
224 pending_login,
225 player,
226 admin,
227 player_list_update_msg,
228 old_player,
229 ),
230 ),
231 )
232 };
233
234 // Destructure new_players_guard last so it gets dropped before the other
235 // three.
236 let (
237 (pending_login, player, admin, player_list_update_msg, old_player),
238 mut new_players_guard,
239 ) = match LoginProvider::login(
240 pending,
241 client,
242 &editable_settings.admins,
243 &editable_settings.whitelist,
244 &editable_settings.banlist,
245 extra_checks,
246 |ip, uuid, username| {
247 upgradeable_ban_emitter.emit((ip, uuid, username))
248 },
249 ) {
250 None => return Ok(()),
251 Some(r) => {
252 match r {
253 Err(e) => {
254 new_players.lock().2.push(entity);
255 // NOTE: Done only on error to avoid doing extra work within
256 // the lock.
257 trace!(?e, "pending login returned error");
258 client_disconnect_emitter.emit(ClientDisconnectEvent(
259 entity,
260 common::comp::DisconnectReason::Kicked,
261 ));
262 client.send(Err(e))?;
263 return Ok(());
264 },
265 // Swap the order of the tuple, so when it's destructured guard
266 // is dropped first.
267 Ok((guard, res)) => (res, guard),
268 }
269 },
270 };
271
272 if !client
273 .client_type
274 .is_valid_for_role(admin.map(|admin| admin.role.into()))
275 {
276 drop(new_players_guard);
277 client_disconnect_emitter.emit(ClientDisconnectEvent(
278 entity,
279 common::comp::DisconnectReason::InvalidClientType,
280 ));
281 return Ok(());
282 }
283
284 let (new_players_by_uuid, retries, finished_pending) =
285 &mut *new_players_guard;
286 finished_pending.push(entity);
287 // Check if the user logged in before us during this tick (this is why we
288 // need the lock held).
289 let uuid = player.uuid();
290 let old_player = old_player.map_or_else(
291 move || match new_players_by_uuid.entry(uuid) {
292 // We don't actually extract the client yet, to avoid doing extra
293 // work with the lock held.
294 hash_map::Entry::Occupied(o) => Either::Left((o.get().0, None)),
295 hash_map::Entry::Vacant(v) => Either::Right(v),
296 },
297 Either::Left,
298 );
299 let vacant_player = match old_player {
300 Either::Left((old_entity, old_client)) => {
301 if matches!(old_client, None | Some(Some(_))) {
302 // We can't login the new client right now as the
303 // removal of the old client and player occurs later in
304 // the tick, so we instead setup the new login to be
305 // processed in the next tick
306 // Create "fake" successful pending auth and mark it to
307 // be inserted into pending_logins at the end of this
308 // run.
309 retries.push((entity, pending_login));
310 drop(new_players_guard);
311 let old_client = old_client
312 .flatten()
313 .or_else(|| clients.get(old_entity))
314 .expect(
315 "All entries in the new player list were explicitly \
316 joining on client",
317 );
318 let _ = old_client.send(ServerGeneral::Disconnect(
319 DisconnectReason::Kicked(String::from(
320 "You have logged in from another location.",
321 )),
322 ));
323 } else {
324 drop(new_players_guard);
325 // A player without a client is strange, so we don't really want
326 // to retry. Warn about this case and hope that trying to
327 // perform the disconnect process removes the invalid player
328 // entry.
329 warn!(
330 "Player without client detected for entity {:?}",
331 old_entity
332 );
333 }
334 // Remove old client
335 client_disconnect_emitter.emit(ClientDisconnectEvent(
336 old_entity,
337 common::comp::DisconnectReason::NewerLogin,
338 ));
339 return Ok(());
340 },
341 Either::Right(v) => v,
342 };
343
344 let Some(player_login_msg) = player_list_update_msg else {
345 drop(new_players_guard);
346 // Invalid player
347 client.send(Err(RegisterError::InvalidCharacter))?;
348 return Ok(());
349 };
350
351 // We know the player list didn't already contain this entity because we
352 // joined on !players, so we can assume from here that we'll definitely be
353 // adding a new player.
354
355 // Add to list to notify all clients of the new player
356 vacant_player.insert((
357 entity,
358 player,
359 admin,
360 client
361 .client_type
362 .emit_login_events()
363 .then_some(player_login_msg),
364 ));
365 drop(new_players_guard);
366 read_data.player_metrics.players_connected.inc();
367
368 // Tell the client its request was successful.
369 client.send(Ok(()))?;
370
371 #[cfg(feature = "plugins")]
372 let active_plugins = read_data.plugin_mgr.plugin_list();
373 #[cfg(not(feature = "plugins"))]
374 let active_plugins = Vec::default();
375
376 let server_descriptions = &editable_settings.server_description;
377 let description = ServerDescription {
378 motd: server_descriptions
379 .get(client.locale.as_deref())
380 .map(|d| d.motd.clone())
381 .unwrap_or_default(),
382 rules: server_descriptions
383 .get_rules(client.locale.as_deref())
384 .map(str::to_string),
385 };
386
387 // Send client all the tracked components currently attached to its entity
388 // as well as synced resources (currently only `TimeOfDay`)
389 debug!("Starting initial sync with client.");
390 client.send(ServerInit::GameSync {
391 // Send client their entity
392 entity_package: read_data
393 .trackers
394 .create_entity_package_with_uid(entity, *uid, None, None, None),
395 role: admin.map(|admin| admin.role.into()),
396 time_of_day: *read_data.time_of_day,
397 max_group_size: read_data.settings.max_player_group_size,
398 client_timeout: read_data.settings.client_timeout,
399 world_map: (*read_data.map).clone(),
400 recipe_book: (*read_data.recipe_book).clone(),
401 component_recipe_book: default_component_recipe_book().cloned(),
402 repair_recipe_book: default_repair_recipe_book().cloned(),
403 material_stats: (*read_data.material_stats).clone(),
404 ability_map: (*read_data.ability_map).clone(),
405 server_constants: ServerConstants {
406 day_cycle_coefficient: read_data.settings.day_cycle_coefficient(),
407 },
408 description,
409 active_plugins,
410 })?;
411 debug!("Done initial sync with client.");
412
413 // Send initial player list
414 client.send(ServerGeneral::PlayerListUpdate(PlayerListUpdate::Init(
415 player_list.clone(),
416 )))?;
417
418 Ok(())
419 }() {
420 trace!(?e, "failed to process register");
421 }
422 },
423 );
424
425 let (new_players, retries, finished_pending) = new_players.into_inner();
426 finished_pending.into_iter().for_each(|e| {
427 // Remove all entities in finished_pending from pending_logins.
428 pending_logins.remove(e);
429 });
430
431 // Insert retry attempts back into pending_logins to be processed next tick
432 for (entity, pending) in retries {
433 let _ = pending_logins.insert(entity, pending);
434 }
435
436 // Handle new players.
437 let msgs = new_players
438 .into_values()
439 .filter_map(|(entity, player, admin, msg)| {
440 let username = &player.alias;
441 let uuid = player.uuid();
442 info!(?username, "New User");
443 // Add Player component to this client.
444 //
445 // Note that since players has been write locked for the duration of this
446 // system, we know that nobody else added any players since we
447 // last checked its value, and we checked that everything in
448 // new_players was not already in players, so we know the insert
449 // succeeds and the old entry was vacant. Moreover, we know that all new
450 // players we added have different UUIDs both from each other, and from any old
451 // players, preserving the uniqueness invariant.
452 players
453 .insert(entity, player)
454 .expect("The entity was joined against in the same system, so it exists");
455
456 // Give the Admin component to the player if their name exists in
457 // admin list
458 if let Some(admin) = admin {
459 // We need to defer writing to the Admin storage since it's borrowed immutably
460 // by this system via TrackedStorages.
461 make_admin_emitter.emit(MakeAdminEvent {
462 entity,
463 admin: Admin(admin.role.into()),
464 uuid,
465 });
466 }
467 msg
468 })
469 .collect::<Vec<_>>();
470
471 // Tell all clients to add the new players to the player list, in parallel.
472 (players.mask(), &clients)
473 .par_join()
474 .for_each(|(_, client)| {
475 // Send messages sequentially within each client; by the time we have enough
476 // players to make parallelizing useful, we will have way more
477 // players than cores.
478 msgs.iter().for_each(|msg| {
479 let _ = client.send_prepared(msg);
480 });
481 });
482
483 for (ip, uuid, username) in upgradeable_bans.recv_all_mut() {
484 if let Err(error) = editable_settings.banlist.ban_operation(
485 read_data.data_dir.as_ref(),
486 chrono::Utc::now(),
487 uuid,
488 username,
489 BanOperation::UpgradeToIpBan { ip },
490 false,
491 ) {
492 warn!(?error, ?uuid, "Upgrading ban to IP ban failed");
493 }
494 }
495 }
496}