-
Notifications
You must be signed in to change notification settings - Fork 202
/
Copy pathsession.rs
410 lines (353 loc) · 13.9 KB
/
session.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
//! Core logic for sshx sessions, independent of message transport.
use std::collections::HashMap;
use std::ops::DerefMut;
use std::sync::Arc;
use anyhow::{bail, Context, Result};
use bytes::Bytes;
use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
use sshx_core::{
proto::{server_update::ServerMessage, SequenceNumbers},
IdCounter, Sid, Uid,
};
use tokio::sync::{broadcast, watch, Notify};
use tokio::time::Instant;
use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream, WatchStream};
use tokio_stream::Stream;
use tracing::{debug, warn};
use crate::utils::Shutdown;
use crate::web::protocol::{WsServer, WsUser, WsWinsize};
mod snapshot;
/// Store a rolling buffer with at most this quantity of output, per shell.
const SHELL_STORED_BYTES: u64 = 1 << 21; // 2 MiB
/// Static metadata for this session.
#[derive(Debug, Clone)]
pub struct Metadata {
/// Used to validate that clients have the correct encryption key.
pub encrypted_zeros: Bytes,
/// Name of the session (human-readable).
pub name: String,
}
/// In-memory state for a single sshx session.
#[derive(Debug)]
pub struct Session {
/// Static metadata for this session.
metadata: Metadata,
/// In-memory state for the session.
shells: RwLock<HashMap<Sid, State>>,
/// Metadata for currently connected users.
users: RwLock<HashMap<Uid, WsUser>>,
/// Atomic counter to get new, unique IDs.
counter: IdCounter,
/// Timestamp of the last backend client message from an active connection.
last_accessed: Mutex<Instant>,
/// Watch channel source for the ordered list of open shells and sizes.
source: watch::Sender<Vec<(Sid, WsWinsize)>>,
/// Broadcasts updates to all WebSocket clients.
///
/// Every update inside this channel must be of idempotent form, since
/// messages may arrive before or after any snapshot of the current session
/// state. Duplicated events should remain consistent.
broadcast: broadcast::Sender<WsServer>,
/// Sender end of a channel that buffers messages for the client.
update_tx: async_channel::Sender<ServerMessage>,
/// Receiver end of a channel that buffers messages for the client.
update_rx: async_channel::Receiver<ServerMessage>,
/// Triggered from metadata events when an immediate snapshot is needed.
sync_notify: Notify,
/// Set when this session has been closed and removed.
shutdown: Shutdown,
}
/// Internal state for each shell.
#[derive(Default, Debug)]
struct State {
/// Sequence number, indicating how many bytes have been received.
seqnum: u64,
/// Terminal data chunks.
data: Vec<Bytes>,
/// Number of pruned data chunks before `data[0]`.
chunk_offset: u64,
/// Number of bytes in pruned data chunks.
byte_offset: u64,
/// Set when this shell is terminated.
closed: bool,
/// Updated when any of the above fields change.
notify: Arc<Notify>,
}
impl Session {
/// Construct a new session.
pub fn new(metadata: Metadata) -> Self {
let now = Instant::now();
let (update_tx, update_rx) = async_channel::bounded(256);
Session {
metadata,
shells: RwLock::new(HashMap::new()),
users: RwLock::new(HashMap::new()),
counter: IdCounter::default(),
last_accessed: Mutex::new(now),
source: watch::channel(Vec::new()).0,
broadcast: broadcast::channel(64).0,
update_tx,
update_rx,
sync_notify: Notify::new(),
shutdown: Shutdown::new(),
}
}
/// Returns the metadata for this session.
pub fn metadata(&self) -> &Metadata {
&self.metadata
}
/// Gives access to the ID counter for obtaining new IDs.
pub fn counter(&self) -> &IdCounter {
&self.counter
}
/// Return the sequence numbers for current shells.
pub fn sequence_numbers(&self) -> SequenceNumbers {
let shells = self.shells.read();
let mut map = HashMap::with_capacity(shells.len());
for (key, value) in &*shells {
if !value.closed {
map.insert(key.0, value.seqnum);
}
}
SequenceNumbers { map }
}
/// Receive a notification on broadcasted message events.
pub fn subscribe_broadcast(
&self,
) -> impl Stream<Item = Result<WsServer, BroadcastStreamRecvError>> + Unpin {
BroadcastStream::new(self.broadcast.subscribe())
}
/// Receive a notification every time the set of shells is changed.
pub fn subscribe_shells(&self) -> impl Stream<Item = Vec<(Sid, WsWinsize)>> + Unpin {
WatchStream::new(self.source.subscribe())
}
/// Subscribe for chunks from a shell, until it is closed.
pub fn subscribe_chunks(
&self,
id: Sid,
mut chunknum: u64,
) -> impl Stream<Item = (u64, Vec<Bytes>)> + '_ {
async_stream::stream! {
while !self.shutdown.is_terminated() {
// We absolutely cannot hold `shells` across an await point,
// since that would cause deadlocks.
let (seqnum, chunks, notified) = {
let shells = self.shells.read();
let shell = match shells.get(&id) {
Some(shell) if !shell.closed => shell,
_ => return,
};
let notify = Arc::clone(&shell.notify);
let notified = async move { notify.notified().await };
let mut seqnum = shell.byte_offset;
let mut chunks = Vec::new();
let current_chunks = shell.chunk_offset + shell.data.len() as u64;
if chunknum < current_chunks {
let start = chunknum.saturating_sub(shell.chunk_offset) as usize;
seqnum += shell.data[..start].iter().map(|x| x.len() as u64).sum::<u64>();
chunks = shell.data[start..].to_vec();
chunknum = current_chunks;
}
(seqnum, chunks, notified)
};
if !chunks.is_empty() {
yield (seqnum, chunks);
}
tokio::select! {
_ = notified => (),
_ = self.terminated() => return,
}
}
}
}
/// Add a new shell to the session.
pub fn add_shell(&self, id: Sid, center: (i32, i32)) -> Result<()> {
use std::collections::hash_map::Entry::*;
let _guard = match self.shells.write().entry(id) {
Occupied(_) => bail!("shell already exists with id={id}"),
Vacant(v) => v.insert(State::default()),
};
self.source.send_modify(|source| {
let winsize = WsWinsize {
x: center.0,
y: center.1,
..Default::default()
};
source.push((id, winsize));
});
self.sync_now();
Ok(())
}
/// Terminates an existing shell.
pub fn close_shell(&self, id: Sid) -> Result<()> {
match self.shells.write().get_mut(&id) {
Some(shell) if !shell.closed => {
shell.closed = true;
shell.notify.notify_waiters();
}
Some(_) => return Ok(()),
None => bail!("cannot close shell with id={id}, does not exist"),
}
self.source.send_modify(|source| {
source.retain(|&(x, _)| x != id);
});
self.sync_now();
Ok(())
}
fn get_shell_mut(&self, id: Sid) -> Result<impl DerefMut<Target = State> + '_> {
let shells = self.shells.write();
match shells.get(&id) {
Some(shell) if !shell.closed => {
Ok(RwLockWriteGuard::map(shells, |s| s.get_mut(&id).unwrap()))
}
Some(_) => bail!("cannot update shell with id={id}, already closed"),
None => bail!("cannot update shell with id={id}, does not exist"),
}
}
/// Change the size of a terminal, notifying clients if necessary.
pub fn move_shell(&self, id: Sid, winsize: Option<WsWinsize>) -> Result<()> {
let _guard = self.get_shell_mut(id)?; // Ensures mutual exclusion.
self.source.send_modify(|source| {
if let Some(idx) = source.iter().position(|&(sid, _)| sid == id) {
let (_, oldsize) = source.remove(idx);
source.push((id, winsize.unwrap_or(oldsize)));
}
});
Ok(())
}
/// Receive new data into the session.
pub fn add_data(&self, id: Sid, data: Bytes, seq: u64) -> Result<()> {
let mut shell = self.get_shell_mut(id)?;
if seq <= shell.seqnum && seq + data.len() as u64 > shell.seqnum {
let start = shell.seqnum - seq;
let segment = data.slice(start as usize..);
debug!(%id, bytes = segment.len(), "adding data to shell");
shell.seqnum += segment.len() as u64;
shell.data.push(segment);
// Prune old chunks if we've exceeded the maximum stored bytes.
let mut stored_bytes = shell.seqnum - shell.byte_offset;
if stored_bytes > SHELL_STORED_BYTES {
let mut offset = 0;
while offset < shell.data.len() && stored_bytes > SHELL_STORED_BYTES {
let bytes = shell.data[offset].len() as u64;
stored_bytes -= bytes;
shell.chunk_offset += 1;
shell.byte_offset += bytes;
offset += 1;
}
shell.data.drain(..offset);
}
shell.notify.notify_waiters();
}
Ok(())
}
/// List all the users in the session.
pub fn list_users(&self) -> Vec<(Uid, WsUser)> {
self.users
.read()
.iter()
.map(|(k, v)| (*k, v.clone()))
.collect()
}
/// Update a user in place by ID, applying a callback to the object.
pub fn update_user(&self, id: Uid, f: impl FnOnce(&mut WsUser)) -> Result<()> {
let updated_user = {
let mut users = self.users.write();
let user = users.get_mut(&id).context("user not found")?;
f(user);
user.clone()
};
self.broadcast
.send(WsServer::UserDiff(id, Some(updated_user)))
.ok();
Ok(())
}
/// Add a new user, and return a guard that removes the user when dropped.
pub fn user_scope(&self, id: Uid) -> Result<impl Drop + '_> {
use std::collections::hash_map::Entry::*;
#[must_use]
struct UserGuard<'a>(&'a Session, Uid);
impl Drop for UserGuard<'_> {
fn drop(&mut self) {
self.0.remove_user(self.1);
}
}
match self.users.write().entry(id) {
Occupied(_) => bail!("user already exists with id={id}"),
Vacant(v) => {
let user = WsUser {
name: format!("User {id}"),
cursor: None,
focus: None,
};
v.insert(user.clone());
self.broadcast.send(WsServer::UserDiff(id, Some(user))).ok();
Ok(UserGuard(self, id))
}
}
}
/// Remove an existing user.
fn remove_user(&self, id: Uid) {
if self.users.write().remove(&id).is_none() {
warn!(%id, "invariant violation: removed user that does not exist");
}
self.broadcast.send(WsServer::UserDiff(id, None)).ok();
}
/// Send a chat message into the room.
pub fn send_chat(&self, id: Uid, msg: &str) -> Result<()> {
// Populate the message with the current name in case it's not known later.
let name = {
let users = self.users.read();
users.get(&id).context("user not found")?.name.clone()
};
self.broadcast
.send(WsServer::Hear(id, name, msg.into()))
.ok();
Ok(())
}
/// Send a measurement of the shell latency.
pub fn send_latency_measurement(&self, latency: u64) {
self.broadcast.send(WsServer::ShellLatency(latency)).ok();
}
/// Register a backend client heartbeat, refreshing the timestamp.
pub fn access(&self) {
*self.last_accessed.lock() = Instant::now();
}
/// Returns the timestamp of the last backend client activity.
pub fn last_accessed(&self) -> Instant {
*self.last_accessed.lock()
}
/// Access the sender of the client message channel for this session.
pub fn update_tx(&self) -> &async_channel::Sender<ServerMessage> {
&self.update_tx
}
/// Access the receiver of the client message channel for this session.
pub fn update_rx(&self) -> &async_channel::Receiver<ServerMessage> {
&self.update_rx
}
/// Mark the session as requiring an immediate storage sync.
///
/// This is needed for consistency when creating new shells, removing old
/// shells, or updating the ID counter. If these operations are lost in a
/// server restart, then the snapshot that contains them would be invalid
/// compared to the current backend client state.
///
/// Note that it is not necessary to do this all the time though, since that
/// would put too much pressure on the database. Lost terminal data is
/// already re-synchronized periodically.
pub fn sync_now(&self) {
self.sync_notify.notify_one();
}
/// Resolves when the session has been marked for an immediate sync.
pub async fn sync_now_wait(&self) {
self.sync_notify.notified().await
}
/// Send a termination signal to exit this session.
pub fn shutdown(&self) {
self.shutdown.shutdown()
}
/// Resolves when the session has received a shutdown signal.
pub async fn terminated(&self) {
self.shutdown.wait().await
}
}