rename parts of ServiceConfig

This commit is contained in:
Rob Ede 2022-01-16 04:15:47 +00:00
parent 5454699bab
commit ab226de475
No known key found for this signature in database
GPG Key ID: 97C636207D3EF933
5 changed files with 77 additions and 68 deletions

View File

@ -10,6 +10,10 @@
### Changed
- Brotli (de)compression support is now provided by the `brotli` crate. [#2538]
- Rename `ServiceConfig::{client_timer_expire => client_request_deadline}`. [#????]
- Rename `ServiceConfig::{client_timer => client_request_timer}`. [#????]
- Rename `ServiceConfig::{client_disconnect_timer => client_disconnect_deadline}`. [#????]
- Rename `ServiceConfig::{keep_alive_timer => keep_alive_deadline}`. [#????]
### Removed
- `ResponseHead::extensions[_mut]()`. [#2585]
@ -18,6 +22,7 @@
[#2538]: https://github.com/actix/actix-web/pull/2538
[#2585]: https://github.com/actix/actix-web/pull/2585
[#2587]: https://github.com/actix/actix-web/pull/2587
[#????]: https://github.com/actix/actix-web/pull/????
## 3.0.0-beta.18 - 2022-01-04

View File

@ -12,19 +12,19 @@ use actix_rt::{
};
use bytes::BytesMut;
/// "Sun, 06 Nov 1994 08:49:37 GMT".len()
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
/// Keep-alive time in seconds.
Timeout(usize),
/// Rely on OS to shutdown tcp connection
/// Rely on OS to shutdown TCP connection.
Os,
/// Disabled
/// Keep-alive is disabled.
Disabled,
}
@ -44,13 +44,13 @@ impl From<Option<usize>> for KeepAlive {
}
}
/// Http service configuration
/// HTTP service configuration.
pub struct ServiceConfig(Rc<Inner>);
struct Inner {
keep_alive: Option<Duration>,
client_timeout: u64,
client_disconnect: u64,
client_request_timeout: u64,
client_disconnect_timeout: u64,
ka_enabled: bool,
secure: bool,
local_addr: Option<std::net::SocketAddr>,
@ -73,8 +73,8 @@ impl ServiceConfig {
/// Create instance of `ServiceConfig`
pub fn new(
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
client_request_timeout: u64,
client_disconnect_timeout: u64,
secure: bool,
local_addr: Option<net::SocketAddr>,
) -> ServiceConfig {
@ -83,24 +83,22 @@ impl ServiceConfig {
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
let keep_alive =
(ka_enabled && keep_alive > 0).then(|| Duration::from_secs(keep_alive));
ServiceConfig(Rc::new(Inner {
keep_alive,
ka_enabled,
client_timeout,
client_disconnect,
client_request_timeout,
client_disconnect_timeout,
secure,
local_addr,
date_service: DateService::new(),
}))
}
/// Returns true if connection is secure (HTTPS)
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS).
#[inline]
pub fn secure(&self) -> bool {
self.0.secure
@ -114,32 +112,46 @@ impl ServiceConfig {
self.0.local_addr
}
/// Keep alive duration if configured.
/// Keep-alive duration, if configured.
#[inline]
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
/// Return state of connection keep-alive functionality
/// Returns `true` if connection if set to use keep-alive functionality.
#[inline]
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
/// Client timeout for first request.
/// Creates a time object representing the deadline for the client to finish sending the head of
/// its first request.
///
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`.
pub fn client_request_deadline(&self) -> Option<Instant> {
let delay = self.0.client_request_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Creates a timer that resolves at the [client's first request deadline].
///
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`.
///
/// [client request deadline]: Self::client_deadline
#[inline]
pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
} else {
None
}
pub fn client_request_timer(&self) -> Option<Sleep> {
self.client_request_deadline().map(sleep_until)
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
/// Creates a time object representing the deadline for the client to disconnect.
pub fn client_disconnect_deadline(&self) -> Option<Instant> {
let delay = self.0.client_disconnect_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
@ -147,25 +159,18 @@ impl ServiceConfig {
}
}
/// Client disconnect timer
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
/// Creates a time object representing the deadline for the connection keep-alive,
/// if configured.
pub fn keep_alive_deadline(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka)
}
/// Return keep-alive timer delay is configured.
/// Creates a timer that resolves at the [keep-alive deadline].
///
/// [keep-alive deadline]: Self::keep_alive_deadline
#[inline]
pub fn keep_alive_timer(&self) -> Option<Sleep> {
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
self.keep_alive().map(|ka| self.now() + ka)
self.keep_alive_deadline().map(sleep_until)
}
#[inline]
@ -243,7 +248,7 @@ impl DateService {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
@ -296,9 +301,8 @@ mod notify_on_drop {
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
/// # Panics
/// Panics hen construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();

View File

@ -8,7 +8,7 @@ use std::{
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed, FramedParts};
use actix_codec::{AsyncRead, AsyncWrite, Decoder as _, Encoder as _, Framed, FramedParts};
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_service::Service;
use bitflags::bitflags;
@ -142,7 +142,7 @@ pin_project! {
payload: Option<PayloadSender>,
messages: VecDeque<DispatcherMessage>,
ka_expire: Instant,
ka_deadline: Instant,
#[pin]
ka_timer: Option<Sleep>,
@ -244,7 +244,7 @@ where
payload: None,
messages: VecDeque::new(),
ka_expire,
ka_deadline: ka_expire,
ka_timer,
io: Some(io),
@ -733,8 +733,8 @@ where
}
if updated && this.ka_timer.is_some() {
if let Some(expire) = this.codec.config().keep_alive_expire() {
*this.ka_expire = expire;
if let Some(expire) = this.codec.config().keep_alive_deadline() {
*this.ka_deadline = expire;
}
}
Ok(updated)
@ -753,7 +753,7 @@ where
None => {
// conditionally go into shutdown timeout
if this.flags.contains(Flags::SHUTDOWN) {
if let Some(deadline) = this.codec.config().client_disconnect_timer() {
if let Some(deadline) = this.codec.config().client_disconnect_deadline() {
// write client disconnect time out and poll again to
// go into Some<Pin<&mut Sleep>> branch
this.ka_timer.set(Some(sleep_until(deadline)));
@ -768,16 +768,16 @@ where
if this.flags.contains(Flags::SHUTDOWN) {
return Err(DispatchError::DisconnectTimeout);
// exceed deadline. check for any outstanding tasks
} else if timer.deadline() >= *this.ka_expire {
// have no task at hand.
} else if timer.deadline() >= *this.ka_deadline {
if this.state.is_empty() && this.write_buf.is_empty() {
// have no task at hand
if this.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
this.flags.insert(Flags::SHUTDOWN);
// start shutdown timeout
if let Some(deadline) =
this.codec.config().client_disconnect_timer()
this.codec.config().client_disconnect_deadline()
{
timer.as_mut().reset(deadline);
let _ = timer.poll(cx);
@ -795,15 +795,16 @@ where
this = self.project();
this.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
}
// still have unfinished task. try to reset and register keep-alive.
} else if let Some(deadline) = this.codec.config().keep_alive_expire() {
} else if let Some(deadline) = this.codec.config().keep_alive_deadline()
{
// still have unfinished task. try to reset and register keep-alive
timer.as_mut().reset(deadline);
let _ = timer.poll(cx);
}
// timer resolved but still have not met the keep-alive expire deadline.
// reset and register for later wakeup.
} else {
timer.as_mut().reset(*this.ka_expire);
// timer resolved but still have not met the keep-alive expire deadline
// reset and register for later wakeup
timer.as_mut().reset(*this.ka_deadline);
let _ = timer.poll(cx);
}
}
@ -851,8 +852,7 @@ where
// Note:
// This is a perf choice to reduce branch on <Request as MessageType>::decode.
//
// A Request head too large to parse is only checked on
// `httparse::Status::Partial` condition.
// A Request head too large to parse is only checked on `httparse::Status::Partial`.
if this.payload.is_none() {
// When dispatcher has a payload the responsibility of wake up it would be shift

View File

@ -160,7 +160,7 @@ where
Poll::Ready(_) => {
ping_pong.on_flight = false;
let dead_line = this.config.keep_alive_expire().unwrap();
let dead_line = this.config.keep_alive_deadline().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
}
Poll::Pending => {
@ -174,7 +174,7 @@ where
ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_expire().unwrap();
let dead_line = this.config.keep_alive_deadline().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
ping_pong.on_flight = true;

View File

@ -67,7 +67,7 @@ where
{
HandshakeWithTimeout {
handshake: handshake(io),
timer: config.client_timer().map(Box::pin),
timer: config.client_request_timer().map(Box::pin),
}
}