-
Notifications
You must be signed in to change notification settings - Fork 1.8k
Description
We handle client to server notifications synchronously, which means we should make sure that these notifications are processed fast as otherwise the server will block on them. If a user opens a very large rust file (100k+ lines) they will start encountering overly long loop and the general editor experience becomes very laggy. We can't make the processing for this notification async unfortunately so we should try and optimize the things we do there instead.
Relevant code snippets:
rust-analyzer/crates/rust-analyzer/src/main_loop.rs
Lines 746 to 768 in bbcb77e
.on::<lsp_types::notification::DidChangeTextDocument>(|this, params| { | |
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) { | |
match this.mem_docs.get_mut(&path) { | |
Some(doc) => { | |
// The version passed in DidChangeTextDocument is the version after all edits are applied | |
// so we should apply it before the vfs is notified. | |
doc.version = params.text_document.version; | |
} | |
None => { | |
tracing::error!("unexpected DidChangeTextDocument: {}", path); | |
return Ok(()); | |
} | |
}; | |
let vfs = &mut this.vfs.write().0; | |
let file_id = vfs.file_id(&path).unwrap(); | |
let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap(); | |
apply_document_changes(&mut text, params.content_changes); | |
vfs.set_file_contents(path, Some(text.into_bytes())); | |
} | |
Ok(()) | |
})? |
rust-analyzer/crates/rust-analyzer/src/lsp_utils.rs
Lines 135 to 183 in bbcb77e
pub(crate) fn apply_document_changes( | |
old_text: &mut String, | |
content_changes: Vec<lsp_types::TextDocumentContentChangeEvent>, | |
) { | |
let mut line_index = LineIndex { | |
index: Arc::new(ide::LineIndex::new(old_text)), | |
// We don't care about line endings or offset encoding here. | |
endings: LineEndings::Unix, | |
encoding: PositionEncoding::Utf16, | |
}; | |
// The changes we got must be applied sequentially, but can cross lines so we | |
// have to keep our line index updated. | |
// Some clients (e.g. Code) sort the ranges in reverse. As an optimization, we | |
// remember the last valid line in the index and only rebuild it if needed. | |
// The VFS will normalize the end of lines to `\n`. | |
enum IndexValid { | |
All, | |
UpToLineExclusive(u32), | |
} | |
impl IndexValid { | |
fn covers(&self, line: u32) -> bool { | |
match *self { | |
IndexValid::UpToLineExclusive(to) => to > line, | |
_ => true, | |
} | |
} | |
} | |
let mut index_valid = IndexValid::All; | |
for change in content_changes { | |
match change.range { | |
Some(range) => { | |
if !index_valid.covers(range.end.line) { | |
line_index.index = Arc::new(ide::LineIndex::new(old_text)); | |
} | |
index_valid = IndexValid::UpToLineExclusive(range.start.line); | |
if let Ok(range) = from_proto::text_range(&line_index, range) { | |
old_text.replace_range(Range::<usize>::from(range), &change.text); | |
} | |
} | |
None => { | |
*old_text = change.text; | |
index_valid = IndexValid::UpToLineExclusive(0); | |
} | |
} | |
} | |
} |
rust-analyzer/crates/ide-db/src/line_index.rs
Lines 57 to 94 in bbcb77e
pub fn new(text: &str) -> LineIndex { | |
let mut utf16_lines = NoHashHashMap::default(); | |
let mut utf16_chars = Vec::new(); | |
let mut newlines = vec![0.into()]; | |
let mut curr_row @ mut curr_col = 0.into(); | |
let mut line = 0; | |
for c in text.chars() { | |
let c_len = TextSize::of(c); | |
curr_row += c_len; | |
if c == '\n' { | |
newlines.push(curr_row); | |
// Save any utf-16 characters seen in the previous line | |
if !utf16_chars.is_empty() { | |
utf16_lines.insert(line, mem::take(&mut utf16_chars)); | |
} | |
// Prepare for processing the next line | |
curr_col = 0.into(); | |
line += 1; | |
continue; | |
} | |
if !c.is_ascii() { | |
utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + c_len }); | |
} | |
curr_col += c_len; | |
} | |
// Save any utf-16 characters seen in the last line | |
if !utf16_chars.is_empty() { | |
utf16_lines.insert(line, utf16_chars); | |
} | |
LineIndex { newlines, utf16_lines } | |
} |
The main part here is the LineIndex
calculation as we have to traverse the entire document text character by character, so we should look into possibly optimizing the creation of it.