Skip to content

Commit eb0c646

Browse files
committedMay 27, 2021
fix(http1): reduce memory used with flatten write strategy
If the write buffer was filled with large bufs from the user, such that it couldn't be fully written to the transport, the write buffer could start to grow significantly as it moved its cursor without shifting over the unwritten bytes. This will now try to shift over the unwritten bytes if the next buf wouldn't fit in the already allocated space.
1 parent e61b494 commit eb0c646

File tree

1 file changed

+62
-8
lines changed

1 file changed

+62
-8
lines changed
 

‎src/proto/h1/io.rs

+62-8
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,12 @@ where
5656
B: Buf,
5757
{
5858
pub(crate) fn new(io: T) -> Buffered<T, B> {
59-
let write_buf = WriteBuf::new(&io);
59+
let strategy = if io.is_write_vectored() {
60+
WriteStrategy::Queue
61+
} else {
62+
WriteStrategy::Flatten
63+
};
64+
let write_buf = WriteBuf::new(strategy);
6065
Buffered {
6166
flush_pipeline: false,
6267
io,
@@ -419,6 +424,24 @@ impl<T: AsRef<[u8]>> Cursor<T> {
419424
}
420425

421426
impl Cursor<Vec<u8>> {
427+
/// If we've advanced the position a bit in this cursor, and wish to
428+
/// extend the underlying vector, we may wish to unshift the "read" bytes
429+
/// off, and move everything else over.
430+
fn maybe_unshift(&mut self, additional: usize) {
431+
if self.pos == 0 {
432+
// nothing to do
433+
return;
434+
}
435+
436+
if self.bytes.capacity() - self.bytes.len() >= additional {
437+
// there's room!
438+
return;
439+
}
440+
441+
self.bytes.drain(0..self.pos);
442+
self.pos = 0;
443+
}
444+
422445
fn reset(&mut self) {
423446
self.pos = 0;
424447
self.bytes.clear();
@@ -463,12 +486,7 @@ pub(super) struct WriteBuf<B> {
463486
}
464487

465488
impl<B: Buf> WriteBuf<B> {
466-
fn new(io: &impl AsyncWrite) -> WriteBuf<B> {
467-
let strategy = if io.is_write_vectored() {
468-
WriteStrategy::Queue
469-
} else {
470-
WriteStrategy::Flatten
471-
};
489+
fn new(strategy: WriteStrategy) -> WriteBuf<B> {
472490
WriteBuf {
473491
headers: Cursor::new(Vec::with_capacity(INIT_BUFFER_SIZE)),
474492
max_buf_size: DEFAULT_MAX_BUFFER_SIZE,
@@ -492,6 +510,8 @@ where
492510
match self.strategy {
493511
WriteStrategy::Flatten => {
494512
let head = self.headers_mut();
513+
514+
head.maybe_unshift(buf.remaining());
495515
//perf: This is a little faster than <Vec as BufMut>>::put,
496516
//but accomplishes the same result.
497517
loop {
@@ -804,7 +824,6 @@ mod tests {
804824
let _ = pretty_env_logger::try_init();
805825

806826
let mock = Mock::new()
807-
// Just a single write
808827
.write(b"hello world, it's hyper!")
809828
.build();
810829

@@ -820,6 +839,41 @@ mod tests {
820839
buffered.flush().await.expect("flush");
821840
}
822841

842+
#[test]
843+
fn write_buf_flatten_partially_flushed() {
844+
let _ = pretty_env_logger::try_init();
845+
846+
let b = |s: &str| Cursor::new(s.as_bytes().to_vec());
847+
848+
let mut write_buf = WriteBuf::<Cursor<Vec<u8>>>::new(WriteStrategy::Flatten);
849+
850+
write_buf.buffer(b("hello "));
851+
write_buf.buffer(b("world, "));
852+
853+
assert_eq!(write_buf.chunk(), b"hello world, ");
854+
855+
// advance most of the way, but not all
856+
write_buf.advance(11);
857+
858+
assert_eq!(write_buf.chunk(), b", ");
859+
assert_eq!(write_buf.headers.pos, 11);
860+
assert_eq!(write_buf.headers.bytes.capacity(), INIT_BUFFER_SIZE);
861+
862+
// there's still room in the headers buffer, so just push on the end
863+
write_buf.buffer(b("it's hyper!"));
864+
865+
assert_eq!(write_buf.chunk(), b", it's hyper!");
866+
assert_eq!(write_buf.headers.pos, 11);
867+
868+
let rem1 = write_buf.remaining();
869+
let cap = write_buf.headers.bytes.capacity();
870+
871+
// but when this would go over capacity, don't copy the old bytes
872+
write_buf.buffer(Cursor::new(vec![b'X'; cap]));
873+
assert_eq!(write_buf.remaining(), cap + rem1);
874+
assert_eq!(write_buf.headers.pos, 0);
875+
}
876+
823877
#[tokio::test]
824878
async fn write_buf_queue_disable_auto() {
825879
let _ = pretty_env_logger::try_init();

0 commit comments

Comments
 (0)
Please sign in to comment.