clean up h1 dispatcher. add comments

This commit is contained in:
fakeshadow 2021-02-03 04:32:17 -08:00
parent bfe47b2034
commit 15b8dba2c7
11 changed files with 182 additions and 167 deletions

View File

@ -75,8 +75,8 @@ required-features = ["rustls"]
[dependencies] [dependencies]
actix-codec = "0.4.0-beta.1" actix-codec = "0.4.0-beta.1"
actix-macros = "0.2.0" actix-macros = "0.2.0"
actix-router = "0.2.4" actix-router = "0.2.6"
actix-rt = "2.0.0-beta.2" actix-rt = "2"
actix-server = "2.0.0-beta.2" actix-server = "2.0.0-beta.2"
actix-service = "2.0.0-beta.3" actix-service = "2.0.0-beta.3"
actix-utils = "3.0.0-beta.1" actix-utils = "3.0.0-beta.1"
@ -137,8 +137,7 @@ actix-server = { git = "https://github.com/actix/actix-net.git" }
actix-tls = { git = "https://github.com/actix/actix-net.git" } actix-tls = { git = "https://github.com/actix/actix-net.git" }
actix-utils = { git = "https://github.com/actix/actix-net.git" } actix-utils = { git = "https://github.com/actix/actix-net.git" }
actix-router = { git = "https://github.com/actix/actix-net.git" } actix-router = { git = "https://github.com/actix/actix-net.git" }
actix-macros = { git = "https://github.com/actix/actix-net" } actix = { git = "https://github.com/actix/actix.git", branch = "feat/actix-rt-2" }
actix = { git = "https://github.com/actix/actix", branch = "feat/actix-rt-2" }
[[bench]] [[bench]]
name = "server" name = "server"

View File

@ -31,5 +31,5 @@ percent-encoding = "2.1"
v_htmlescape = "0.12" v_htmlescape = "0.12"
[dev-dependencies] [dev-dependencies]
actix-rt = "2.0.0-beta.2" actix-rt = "2"
actix-web = "4.0.0-beta.1" actix-web = "4.0.0-beta.1"

View File

@ -33,7 +33,7 @@ actix-service = "2.0.0-beta.3"
actix-codec = "0.4.0-beta.1" actix-codec = "0.4.0-beta.1"
actix-tls = "3.0.0-beta.2" actix-tls = "3.0.0-beta.2"
actix-utils = "3.0.0-beta.1" actix-utils = "3.0.0-beta.1"
actix-rt = "2.0.0-beta.2" actix-rt = "2"
actix-server = "2.0.0-beta.2" actix-server = "2.0.0-beta.2"
awc = "3.0.0-beta.1" awc = "3.0.0-beta.1"

View File

@ -43,7 +43,7 @@ actors = ["actix"]
actix-service = "2.0.0-beta.3" actix-service = "2.0.0-beta.3"
actix-codec = "0.4.0-beta.1" actix-codec = "0.4.0-beta.1"
actix-utils = "3.0.0-beta.1" actix-utils = "3.0.0-beta.1"
actix-rt = "2.0.0-beta.3" actix-rt = "2"
actix-tls = "3.0.0-beta.2" actix-tls = "3.0.0-beta.2"
actix = { version = "0.11.0-beta.1", optional = true } actix = { version = "0.11.0-beta.1", optional = true }
@ -70,6 +70,7 @@ log = "0.4"
mime = "0.3" mime = "0.3"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project = "1.0.0" pin-project = "1.0.0"
pin-project-lite = "0.2"
rand = "0.8" rand = "0.8"
regex = "1.3" regex = "1.3"
serde = "1.0" serde = "1.0"

View File

@ -14,7 +14,7 @@ use crate::header::HeaderMap;
use crate::message::{ConnectionType, ResponseHead}; use crate::message::{ConnectionType, ResponseHead};
use crate::request::Request; use crate::request::Request;
const MAX_BUFFER_SIZE: usize = 131_072; pub(crate) const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96; const MAX_HEADERS: usize = 96;
/// Incoming message decoder /// Incoming message decoder

View File

@ -45,7 +45,7 @@ bitflags! {
} }
} }
#[pin_project::pin_project] #[pin_project]
/// Dispatcher for HTTP/1.1 protocol /// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S, B, X, U> pub struct Dispatcher<T, S, B, X, U>
where where
@ -139,11 +139,8 @@ where
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
matches!(self, State::None) matches!(self, State::None)
} }
}
fn is_call(&self) -> bool {
matches!(self, State::ServiceCall(_))
}
}
enum PollResponse { enum PollResponse {
Upgrade(Request), Upgrade(Request),
DoNothing, DoNothing,
@ -345,55 +342,81 @@ where
) -> Result<PollResponse, DispatchError> { ) -> Result<PollResponse, DispatchError> {
loop { loop {
let mut this = self.as_mut().project(); let mut this = self.as_mut().project();
// state is not changed on Poll::Pending. match this.state.as_mut().project() {
// other variant and conditions always trigger a state change(or an error). // no future is in InnerDispatcher state. pop next message.
let state_change = match this.state.project() {
StateProj::None => match this.messages.pop_front() { StateProj::None => match this.messages.pop_front() {
// handle request message.
Some(DispatcherMessage::Item(req)) => { Some(DispatcherMessage::Item(req)) => {
self.as_mut().handle_request(req, cx)?; // Handle `EXPECT: 100-Continue` header
true if req.head().expect() {
// set InnerDispatcher state and continue loop to poll it.
let task = this.flow.expect.call(req);
this.state.set(State::ExpectCall(task));
} else {
// the same as expect call.
let task = this.flow.service.call(req);
this.state.set(State::ServiceCall(task));
};
} }
// handle error message.
Some(DispatcherMessage::Error(res)) => { Some(DispatcherMessage::Error(res)) => {
// send_response would update InnerDispatcher state to SendPayload or
// None(If response body is empty).
// continue loop to poll it.
self.as_mut() self.as_mut()
.send_response(res, ResponseBody::Other(Body::Empty))?; .send_response(res, ResponseBody::Other(Body::Empty))?;
true
} }
// return with upgrade request and poll it exclusively.
Some(DispatcherMessage::Upgrade(req)) => { Some(DispatcherMessage::Upgrade(req)) => {
return Ok(PollResponse::Upgrade(req)); return Ok(PollResponse::Upgrade(req));
} }
None => false, // all messages are dealt with.
None => return Ok(PollResponse::DoNothing),
}, },
StateProj::ExpectCall(fut) => match fut.poll(cx) { StateProj::ExpectCall(fut) => match fut.poll(cx) {
// expect resolved. write continue to buffer and set InnerDispatcher state
// to service call.
Poll::Ready(Ok(req)) => { Poll::Ready(Ok(req)) => {
self.as_mut().send_continue(); self.as_mut().send_continue();
this = self.as_mut().project(); this = self.as_mut().project();
let fut = this.flow.service.call(req); let fut = this.flow.service.call(req);
this.state.set(State::ServiceCall(fut)); this.state.set(State::ServiceCall(fut));
continue;
} }
// send expect error as response
Poll::Ready(Err(e)) => { Poll::Ready(Err(e)) => {
let res: Response = e.into().into(); let res: Response = e.into().into();
let (res, body) = res.replace_body(()); let (res, body) = res.replace_body(());
self.as_mut().send_response(res, body.into_body())?; self.as_mut().send_response(res, body.into_body())?;
true
} }
Poll::Pending => false, // expect must be solved before progress can be made.
Poll::Pending => return Ok(PollResponse::DoNothing),
}, },
StateProj::ServiceCall(fut) => match fut.poll(cx) { StateProj::ServiceCall(fut) => match fut.poll(cx) {
// service call resolved. send response.
Poll::Ready(Ok(res)) => { Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(()); let (res, body) = res.into().replace_body(());
self.as_mut().send_response(res, body)?; self.as_mut().send_response(res, body)?;
continue;
} }
// send service call error as response
Poll::Ready(Err(e)) => { Poll::Ready(Err(e)) => {
let res: Response = e.into().into(); let res: Response = e.into().into();
let (res, body) = res.replace_body(()); let (res, body) = res.replace_body(());
self.as_mut().send_response(res, body.into_body())?; self.as_mut().send_response(res, body.into_body())?;
true
} }
Poll::Pending => false, // service call pending and could be waiting for more chunk messages.
// (pipeline message limit and/or payload can_read limit)
Poll::Pending => {
// no new message is decoded and no new payload is feed.
// nothing to do except waiting for new incoming data from client.
if !self.as_mut().poll_request(cx)? {
return Ok(PollResponse::DoNothing);
}
// otherwise keep loop.
}
}, },
StateProj::SendPayload(mut stream) => { StateProj::SendPayload(mut stream) => {
// keep populate writer buffer until buffer size limit hit,
// get blocked or finished.
loop { loop {
if this.write_buf.len() < HW_BUFFER_SIZE { if this.write_buf.len() < HW_BUFFER_SIZE {
match stream.as_mut().poll_next(cx) { match stream.as_mut().poll_next(cx) {
@ -402,50 +425,42 @@ where
Message::Chunk(Some(item)), Message::Chunk(Some(item)),
&mut this.write_buf, &mut this.write_buf,
)?; )?;
continue;
} }
Poll::Ready(None) => { Poll::Ready(None) => {
this.codec.encode( this.codec.encode(
Message::Chunk(None), Message::Chunk(None),
&mut this.write_buf, &mut this.write_buf,
)?; )?;
this = self.as_mut().project(); // payload stream finished.
this.state.set(State::None); // break and goes out of scope of borrowed stream.
break;
} }
Poll::Ready(Some(Err(_))) => { Poll::Ready(Some(Err(e))) => {
return Err(DispatchError::Unknown) return Err(DispatchError::Service(e))
} }
// Payload Stream Pending should only be given when the caller
// promise to wake it up properly.
//
// TODO: Think if it's an good idea to mix in a self wake up.
// It would turn dispatcher into a busy polling style of stream
// handling. (Or another timer as source of scheduled wake up)
// As There is no way to know when or how the caller would wake
// up the stream so a self wake up is an overhead that would
// result in a double polling(or an extra timer)
Poll::Pending => return Ok(PollResponse::DoNothing), Poll::Pending => return Ok(PollResponse::DoNothing),
} }
} else { } else {
// buffer is beyond max size.
// return and write the whole buffer to io stream.
return Ok(PollResponse::DrainWriteBuf); return Ok(PollResponse::DrainWriteBuf);
} }
break;
} }
continue; // break from Poll::Ready(None) on stream finished.
} // this is for re borrow InnerDispatcher state and set it to None.
}; this.state.set(State::None);
// state is changed and continue when the state is not Empty
if state_change {
if !self.state.is_empty() {
continue;
}
} else {
// if read-backpressure is enabled and we consumed some data.
// we may read more data and retry
if self.state.is_call() {
if self.as_mut().poll_request(cx)? {
continue;
}
} else if !self.messages.is_empty() {
continue;
} }
} }
break;
} }
Ok(PollResponse::DoNothing)
} }
fn handle_request( fn handle_request(
@ -457,7 +472,7 @@ where
// Handle `EXPECT: 100-Continue` header // Handle `EXPECT: 100-Continue` header
if req.head().expect() { if req.head().expect() {
// set dispatcher state so the future is pinned. // set InnerDispatcher state so the future is pinned.
let task = this.flow.expect.call(req); let task = this.flow.expect.call(req);
this.state.set(State::ExpectCall(task)); this.state.set(State::ExpectCall(task));
} else { } else {
@ -468,26 +483,22 @@ where
// eagerly poll the future for once(or twice if expect is resolved immediately). // eagerly poll the future for once(or twice if expect is resolved immediately).
loop { loop {
match this.state.project() { match this.state.as_mut().project() {
StateProj::ExpectCall(fut) => { StateProj::ExpectCall(fut) => {
match fut.poll(cx) { match fut.poll(cx) {
// expect is resolved. continue loop and poll the service call branch. // expect is resolved. continue loop and poll the service call branch.
Poll::Ready(Ok(req)) => { Poll::Ready(Ok(req)) => {
self.as_mut().send_continue();
this = self.as_mut().project();
let task = this.flow.service.call(req); let task = this.flow.service.call(req);
this.state.set(State::ServiceCall(task)); this.state.as_mut().set(State::ServiceCall(task));
continue;
} }
// future is pending. return Ok(()) to notify that a new state is // future is pending. return Ok(()) to notify that a new InnerDispatcher
// set and the outer loop should be continue. // state is set and the outer loop should be continue.
Poll::Pending => return Ok(()), Poll::Pending => return Ok(()),
// future is error. send response and return a result. On success // future is error. send response and return a result. On success
// to notify the dispatcher a new state is set and the outer loop // to notify the dispatcher a new InnerDispatcher state is set and
// should be continue. // the outer loop should be continue.
Poll::Ready(Err(e)) => { Poll::Ready(Err(e)) => {
let e = e.into(); let res: Response = e.into().into();
let res: Response = e.into();
let (res, body) = res.replace_body(()); let (res, body) = res.replace_body(());
return self.send_response(res, body.into_body()); return self.send_response(res, body.into_body());
} }
@ -497,8 +508,8 @@ where
// return no matter the service call future's result. // return no matter the service call future's result.
return match fut.poll(cx) { return match fut.poll(cx) {
// future is resolved. send response and return a result. On success // future is resolved. send response and return a result. On success
// to notify the dispatcher a new state is set and the outer loop // to notify the dispatcher a new InnerDispatcher state is set and the
// should be continue. // outer loop should be continue.
Poll::Ready(Ok(res)) => { Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(()); let (res, body) = res.into().replace_body(());
self.send_response(res, body) self.send_response(res, body)
@ -540,25 +551,28 @@ where
match msg { match msg {
Message::Item(mut req) => { Message::Item(mut req) => {
let pl = this.codec.message_type();
req.head_mut().peer_addr = *this.peer_addr; req.head_mut().peer_addr = *this.peer_addr;
// merge on_connect_ext data into request extensions // merge on_connect_ext data into request extensions
this.on_connect_data.merge_into(&mut req); this.on_connect_data.merge_into(&mut req);
if pl == MessageType::Stream && this.flow.upgrade.is_some() { match this.codec.message_type() {
this.messages.push_back(DispatcherMessage::Upgrade(req)); MessageType::Stream if this.flow.upgrade.is_some() => {
this.messages
.push_back(DispatcherMessage::Upgrade(req));
break; break;
} }
if pl == MessageType::Payload || pl == MessageType::Stream { MessageType::Payload | MessageType::Stream => {
let (ps, pl) = Payload::create(false); let (ps, pl) = Payload::create(false);
let (req1, _) = let (req1, _) =
req.replace_payload(crate::Payload::H1(pl)); req.replace_payload(crate::Payload::H1(pl));
req = req1; req = req1;
*this.payload = Some(ps); *this.payload = Some(ps);
} }
MessageType::None => {}
}
// handle request early // handle request early if no future lives in InnerDispatcher state.
if this.state.is_empty() { if this.state.is_empty() {
self.as_mut().handle_request(req, cx)?; self.as_mut().handle_request(req, cx)?;
this = self.as_mut().project(); this = self.as_mut().project();
@ -624,6 +638,7 @@ where
*this.ka_expire = expire; *this.ka_expire = expire;
} }
} }
Ok(updated) Ok(updated)
} }
@ -711,6 +726,75 @@ where
} }
Ok(()) Ok(())
} }
/// Returns true when io stream can be disconnected after write to it.
///
/// It covers these conditions:
///
/// - `Flags::READ_DISCONNECT` flag active.
/// - `std::io::ErrorKind::ConnectionReset` after partial read.
/// - all data read done.
#[inline(always)]
fn read_available(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
let this = self.project();
if this.flags.contains(Flags::READ_DISCONNECT) {
return Ok(true);
};
let buf = this.read_buf;
let mut io = Pin::new(this.io.as_mut().unwrap());
let mut read_some = false;
loop {
// grow buffer if necessary.
let remaining = buf.capacity() - buf.len();
if remaining < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE - remaining);
}
match actix_codec::poll_read_buf(io.as_mut(), cx, buf) {
Poll::Pending => return Ok(false),
Poll::Ready(Ok(n)) => {
if n == 0 {
return Ok(true);
} else {
// Return early when read buf exceed decoder's max buffer size.
if buf.len() >= super::decoder::MAX_BUFFER_SIZE {
return Ok(false);
}
read_some = true;
}
}
Poll::Ready(Err(err)) => {
return if err.kind() == io::ErrorKind::WouldBlock {
Ok(false)
} else if err.kind() == io::ErrorKind::ConnectionReset && read_some {
Ok(true)
} else {
Err(DispatchError::Io(err))
}
}
}
}
}
/// call upgrade service with request.
fn upgrade(self: Pin<&mut Self>, req: Request) -> U::Future {
let this = self.project();
let mut parts = FramedParts::with_read_buf(
this.io.take().unwrap(),
mem::take(this.codec),
mem::take(this.read_buf),
);
parts.write_buf = mem::take(this.write_buf);
let framed = Framed::from_parts(parts);
this.flow.upgrade.as_ref().unwrap().call((req, framed))
}
} }
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U> impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
@ -744,9 +828,10 @@ where
if inner.flags.contains(Flags::WRITE_DISCONNECT) { if inner.flags.contains(Flags::WRITE_DISCONNECT) {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} else { } else {
// flush buffer // flush buffer.
inner.as_mut().poll_flush(cx)?; inner.as_mut().poll_flush(cx)?;
if !inner.write_buf.is_empty() { if !inner.write_buf.is_empty() {
// still have unfinished data. wait.
Poll::Pending Poll::Pending
} else { } else {
Pin::new(inner.project().io.as_mut().unwrap()) Pin::new(inner.project().io.as_mut().unwrap())
@ -755,21 +840,14 @@ where
} }
} }
} else { } else {
// read socket into a buf // read from io stream and fill read buffer.
let should_disconnect = let should_disconnect = inner.as_mut().read_available(cx)?;
if !inner.flags.contains(Flags::READ_DISCONNECT) {
let mut inner_p = inner.as_mut().project();
read_available(
cx,
inner_p.io.as_mut().unwrap(),
&mut inner_p.read_buf,
)?
} else {
None
};
// parse read buffer into http requests and payloads.
inner.as_mut().poll_request(cx)?; inner.as_mut().poll_request(cx)?;
if let Some(true) = should_disconnect {
// io stream should to be closed.
if should_disconnect {
let inner_p = inner.as_mut().project(); let inner_p = inner.as_mut().project();
inner_p.flags.insert(Flags::READ_DISCONNECT); inner_p.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner_p.payload.take() { if let Some(mut payload) = inner_p.payload.take() {
@ -778,6 +856,7 @@ where
}; };
loop { loop {
// grow buffer if necessary.
let inner_p = inner.as_mut().project(); let inner_p = inner.as_mut().project();
let remaining = let remaining =
inner_p.write_buf.capacity() - inner_p.write_buf.len(); inner_p.write_buf.capacity() - inner_p.write_buf.len();
@ -785,24 +864,14 @@ where
inner_p.write_buf.reserve(HW_BUFFER_SIZE - remaining); inner_p.write_buf.reserve(HW_BUFFER_SIZE - remaining);
} }
// poll_response and populate write buffer.
// drain indicate if write buffer should be emptied before next run.
let drain = match inner.as_mut().poll_response(cx)? { let drain = match inner.as_mut().poll_response(cx)? {
PollResponse::DrainWriteBuf => true, PollResponse::DrainWriteBuf => true,
PollResponse::DoNothing => false, PollResponse::DoNothing => false,
// upgrade request and goes Upgrade variant of DispatcherState.
PollResponse::Upgrade(req) => { PollResponse::Upgrade(req) => {
let inner_p = inner.as_mut().project(); let upgrade = inner.upgrade(req);
let mut parts = FramedParts::with_read_buf(
inner_p.io.take().unwrap(),
mem::take(inner_p.codec),
mem::take(inner_p.read_buf),
);
parts.write_buf = mem::take(inner_p.write_buf);
let framed = Framed::from_parts(parts);
let upgrade = inner_p
.flow
.upgrade
.as_ref()
.unwrap()
.call((req, framed));
self.as_mut() self.as_mut()
.project() .project()
.inner .inner
@ -827,6 +896,7 @@ where
return Poll::Ready(Ok(())); return Poll::Ready(Ok(()));
} }
// check if still have unsolved future in InnerDispatcher state.
let is_empty = inner.state.is_empty(); let is_empty = inner.state.is_empty();
let inner_p = inner.as_mut().project(); let inner_p = inner.as_mut().project();
@ -866,61 +936,6 @@ where
} }
} }
/// Returns either:
/// - `Ok(Some(true))` - data was read and done reading all data.
/// - `Ok(Some(false))` - data was read but there should be more to read.
/// - `Ok(None)` - no data was read but there should be more to read later.
/// - Unhandled Errors
fn read_available<T>(
cx: &mut Context<'_>,
io: &mut T,
buf: &mut BytesMut,
) -> Result<Option<bool>, io::Error>
where
T: AsyncRead + Unpin,
{
let mut read_some = false;
loop {
// If buf is full return but do not disconnect since
// there is more reading to be done
if buf.len() >= HW_BUFFER_SIZE {
return Ok(Some(false));
}
let remaining = buf.capacity() - buf.len();
if remaining < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE - remaining);
}
match actix_codec::poll_read_buf(Pin::new(io), cx, buf) {
Poll::Pending => {
return if read_some { Ok(Some(false)) } else { Ok(None) };
}
Poll::Ready(Ok(n)) => {
if n == 0 {
return Ok(Some(true));
} else {
read_some = true;
}
}
Poll::Ready(Err(err)) => {
return if err.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Some(false))
} else {
Ok(None)
}
} else if err.kind() == io::ErrorKind::ConnectionReset && read_some {
Ok(Some(true))
} else {
Err(err)
}
}
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::str; use std::str;

View File

@ -126,6 +126,6 @@ impl IntoHeaderValue for Mime {
#[inline] #[inline]
fn try_into_value(self) -> Result<HeaderValue, Self::Error> { fn try_into_value(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::try_from(format!("{}", self)) HeaderValue::from_str(self.as_ref())
} }
} }

View File

@ -28,5 +28,5 @@ mime = "0.3"
twoway = "0.2" twoway = "0.2"
[dev-dependencies] [dev-dependencies]
actix-rt = "2.0.0-beta.2" actix-rt = "2"
actix-http = "3.0.0-beta.1" actix-http = "3.0.0-beta.1"

View File

@ -28,6 +28,6 @@ pin-project = "1.0.0"
tokio = { version = "1", features = ["sync"] } tokio = { version = "1", features = ["sync"] }
[dev-dependencies] [dev-dependencies]
actix-rt = "2.0.0-beta.2" actix-rt = "2"
env_logger = "0.7" env_logger = "0.7"
futures-util = { version = "0.3.7", default-features = false } futures-util = { version = "0.3.7", default-features = false }

View File

@ -19,7 +19,7 @@ syn = { version = "1", features = ["full", "parsing"] }
proc-macro2 = "1" proc-macro2 = "1"
[dev-dependencies] [dev-dependencies]
actix-rt = "2.0.0-beta.2" actix-rt = "2"
actix-web = "4.0.0-beta.1" actix-web = "4.0.0-beta.1"
futures-util = { version = "0.3.7", default-features = false } futures-util = { version = "0.3.7", default-features = false }
trybuild = "1" trybuild = "1"

View File

@ -40,7 +40,7 @@ compress = ["actix-http/compress"]
actix-codec = "0.4.0-beta.1" actix-codec = "0.4.0-beta.1"
actix-service = "2.0.0-beta.3" actix-service = "2.0.0-beta.3"
actix-http = "3.0.0-beta.1" actix-http = "3.0.0-beta.1"
actix-rt = "2.0.0-beta.3" actix-rt = "2"
base64 = "0.13" base64 = "0.13"
bytes = "1" bytes = "1"