refactor(files): rename read_mode_threshold fn

This commit is contained in:
Rob Ede 2025-08-29 22:30:47 +01:00
parent 00b0f8f700
commit 4966a54e05
No known key found for this signature in database
GPG Key ID: F5E3FCAA33CBF062
6 changed files with 57 additions and 35 deletions

View File

@ -9,4 +9,5 @@ words:
- rustls - rustls
- rustup - rustup
- serde - serde
- uring
- zstd - zstd

View File

@ -2,7 +2,7 @@
## Unreleased ## Unreleased
- Opt-In filesize threshold for faster synchronus reads that allow for 20x better performance. - Add `{Files, NamedFile}::read_mode_threshold()` methods to allow faster synchronous reads of small files.
- Minimum supported Rust version (MSRV) is now 1.75. - Minimum supported Rust version (MSRV) is now 1.75.
## 0.6.6 ## 0.6.6

View File

@ -14,6 +14,12 @@ use pin_project_lite::pin_project;
use super::named::File; use super::named::File;
#[derive(Debug, Clone, Copy)]
pub(crate) enum ReadMode {
Sync,
Async,
}
pin_project! { pin_project! {
/// Adapter to read a `std::file::File` in chunks. /// Adapter to read a `std::file::File` in chunks.
#[doc(hidden)] #[doc(hidden)]
@ -24,7 +30,7 @@ pin_project! {
state: ChunkedReadFileState<Fut>, state: ChunkedReadFileState<Fut>,
counter: u64, counter: u64,
callback: F, callback: F,
read_sync: bool, read_mode: ReadMode,
} }
} }
@ -58,7 +64,7 @@ pub(crate) fn new_chunked_read(
size: u64, size: u64,
offset: u64, offset: u64,
file: File, file: File,
size_threshold: u64, read_mode_threshold: u64,
) -> impl Stream<Item = Result<Bytes, Error>> { ) -> impl Stream<Item = Result<Bytes, Error>> {
ChunkedReadFile { ChunkedReadFile {
size, size,
@ -71,7 +77,11 @@ pub(crate) fn new_chunked_read(
}, },
counter: 0, counter: 0,
callback: chunked_read_file_callback, callback: chunked_read_file_callback,
read_sync: size < size_threshold, read_mode: if size < read_mode_threshold {
ReadMode::Sync
} else {
ReadMode::Async
},
} }
} }
@ -102,13 +112,14 @@ async fn chunked_read_file_callback(
file: File, file: File,
offset: u64, offset: u64,
max_bytes: usize, max_bytes: usize,
read_sync: bool, read_mode: ReadMode,
) -> Result<(File, Bytes), Error> { ) -> Result<(File, Bytes), Error> {
let res = if read_sync { let res = match read_mode {
chunked_read_file_callback_sync(file, offset, max_bytes)? ReadMode::Sync => chunked_read_file_callback_sync(file, offset, max_bytes)?,
} else { ReadMode::Async => {
actix_web::web::block(move || chunked_read_file_callback_sync(file, offset, max_bytes)) actix_web::web::block(move || chunked_read_file_callback_sync(file, offset, max_bytes))
.await?? .await??
}
}; };
Ok(res) Ok(res)
@ -187,7 +198,7 @@ where
#[cfg(not(feature = "experimental-io-uring"))] #[cfg(not(feature = "experimental-io-uring"))]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut> impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where where
F: Fn(File, u64, usize, bool) -> Fut, F: Fn(File, u64, usize, ReadMode) -> Fut,
Fut: Future<Output = Result<(File, Bytes), Error>>, Fut: Future<Output = Result<(File, Bytes), Error>>,
{ {
type Item = Result<Bytes, Error>; type Item = Result<Bytes, Error>;
@ -209,7 +220,7 @@ where
.take() .take()
.expect("ChunkedReadFile polled after completion"); .expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes, *this.read_sync); let fut = (this.callback)(file, offset, max_bytes, *this.read_mode);
this.state this.state
.project_replace(ChunkedReadFileState::Future { fut }); .project_replace(ChunkedReadFileState::Future { fut });

View File

@ -49,7 +49,7 @@ pub struct Files {
use_guards: Option<Rc<dyn Guard>>, use_guards: Option<Rc<dyn Guard>>,
guards: Vec<Rc<dyn Guard>>, guards: Vec<Rc<dyn Guard>>,
hidden_files: bool, hidden_files: bool,
size_threshold: u64, read_mode_threshold: u64,
} }
impl fmt::Debug for Files { impl fmt::Debug for Files {
@ -74,7 +74,7 @@ impl Clone for Files {
use_guards: self.use_guards.clone(), use_guards: self.use_guards.clone(),
guards: self.guards.clone(), guards: self.guards.clone(),
hidden_files: self.hidden_files, hidden_files: self.hidden_files,
size_threshold: self.size_threshold, read_mode_threshold: self.read_mode_threshold,
} }
} }
} }
@ -121,7 +121,7 @@ impl Files {
use_guards: None, use_guards: None,
guards: Vec::new(), guards: Vec::new(),
hidden_files: false, hidden_files: false,
size_threshold: 0, read_mode_threshold: 0,
} }
} }
@ -207,15 +207,20 @@ impl Files {
self self
} }
/// Sets the async file-size threshold. /// Sets the size threshold that determines file read mode (sync/async).
/// ///
/// When a file is larger than the threshold, the reader /// When a file is smaller than the threshold (bytes), the reader will switch from synchronous
/// will switch from faster blocking file-reads to slower async reads /// (blocking) file-reads to async reads to avoid blocking the main-thread when processing large
/// to avoid blocking the main-thread when processing large files. /// files.
/// ///
/// Default is 0, meaning all files are read asyncly. /// Tweaking this value according to your expected usage may lead to signifiant performance
pub fn set_size_threshold(mut self, size: u64) -> Self { /// gains (or losses in other handlers, if `size` is too high).
self.size_threshold = size; ///
/// When the `experimental-io-uring` crate feature is enabled, file reads are always async.
///
/// Default is 0, meaning all files are read asynchronously.
pub fn read_mode_threshold(mut self, size: u64) -> Self {
self.read_mode_threshold = size;
self self
} }
@ -382,7 +387,7 @@ impl ServiceFactory<ServiceRequest> for Files {
file_flags: self.file_flags, file_flags: self.file_flags,
guards: self.use_guards.clone(), guards: self.use_guards.clone(),
hidden_files: self.hidden_files, hidden_files: self.hidden_files,
size_threshold: self.size_threshold, size_threshold: self.read_mode_threshold,
}; };
if let Some(ref default) = *self.default.borrow() { if let Some(ref default) = *self.default.borrow() {

View File

@ -80,7 +80,7 @@ pub struct NamedFile {
pub(crate) content_type: Mime, pub(crate) content_type: Mime,
pub(crate) content_disposition: ContentDisposition, pub(crate) content_disposition: ContentDisposition,
pub(crate) encoding: Option<ContentEncoding>, pub(crate) encoding: Option<ContentEncoding>,
pub(crate) size_threshold: u64, pub(crate) read_mode_threshold: u64,
} }
#[cfg(not(feature = "experimental-io-uring"))] #[cfg(not(feature = "experimental-io-uring"))]
@ -201,7 +201,7 @@ impl NamedFile {
encoding, encoding,
status_code: StatusCode::OK, status_code: StatusCode::OK,
flags: Flags::default(), flags: Flags::default(),
size_threshold: 0, read_mode_threshold: 0,
}) })
} }
@ -355,15 +355,20 @@ impl NamedFile {
self self
} }
/// Sets the async file-size threshold. /// Sets the size threshold that determines file read mode (sync/async).
/// ///
/// When a file is larger than the threshold, the reader /// When a file is smaller than the threshold (bytes), the reader will switch from synchronous
/// will switch from faster blocking file-reads to slower async reads /// (blocking) file-reads to async reads to avoid blocking the main-thread when processing large
/// to avoid blocking the main-thread when processing large files. /// files.
/// ///
/// Default is 0, meaning all files are read asyncly. /// Tweaking this value according to your expected usage may lead to signifiant performance
pub fn set_size_threshold(mut self, size: u64) -> Self { /// gains (or losses in other handlers, if `size` is too high).
self.size_threshold = size; ///
/// When the `experimental-io-uring` crate feature is enabled, file reads are always async.
///
/// Default is 0, meaning all files are read asynchronously.
pub fn read_mode_threshold(mut self, size: u64) -> Self {
self.read_mode_threshold = size;
self self
} }
@ -455,7 +460,7 @@ impl NamedFile {
} }
let reader = let reader =
chunked::new_chunked_read(self.md.len(), 0, self.file, self.size_threshold); chunked::new_chunked_read(self.md.len(), 0, self.file, self.read_mode_threshold);
return res.streaming(reader); return res.streaming(reader);
} }
@ -592,7 +597,7 @@ impl NamedFile {
.map_into_boxed_body(); .map_into_boxed_body();
} }
let reader = chunked::new_chunked_read(length, offset, self.file, self.size_threshold); let reader = chunked::new_chunked_read(length, offset, self.file, self.read_mode_threshold);
if offset != 0 || length != self.md.len() { if offset != 0 || length != self.md.len() {
res.status(StatusCode::PARTIAL_CONTENT); res.status(StatusCode::PARTIAL_CONTENT);

View File

@ -72,7 +72,7 @@ impl FilesService {
let (req, _) = req.into_parts(); let (req, _) = req.into_parts();
let res = named_file let res = named_file
.set_size_threshold(self.size_threshold) .read_mode_threshold(self.size_threshold)
.into_response(&req); .into_response(&req);
ServiceResponse::new(req, res) ServiceResponse::new(req, res)
} }