From 56b924e155b54706dd981dcc4907fd37ff871b5a Mon Sep 17 00:00:00 2001 From: Damjan Georgievski Date: Sat, 21 Jul 2018 15:15:28 +0200 Subject: [PATCH 001/219] remove the timestamp from the default logger middleware env_logger and other logging systems will (or should) already add their own timestamp. --- src/middleware/logger.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/middleware/logger.rs b/src/middleware/logger.rs index 103cbf373..b7bb1bb80 100644 --- a/src/middleware/logger.rs +++ b/src/middleware/logger.rs @@ -25,7 +25,7 @@ use middleware::{Finished, Middleware, Started}; /// default format: /// /// ```ignore -/// %a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i" %T +/// %a "%r" %s %b "%{Referer}i" "%{User-Agent}i" %T /// ``` /// ```rust /// # extern crate actix_web; @@ -94,7 +94,7 @@ impl Default for Logger { /// Create `Logger` middleware with format: /// /// ```ignore - /// %a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i" %T + /// %a "%r" %s %b "%{Referer}i" "%{User-Agent}i" %T /// ``` fn default() -> Logger { Logger { @@ -143,7 +143,7 @@ struct Format(Vec); impl Default for Format { /// Return the default formatting style for the `Logger`: fn default() -> Format { - Format::new(r#"%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i" %T"#) + Format::new(r#"%a "%r" %s %b "%{Referer}i" "%{User-Agent}i" %T"#) } } From 6a75a3d68339a084567c5bf62267cfd3d0daa2e9 Mon Sep 17 00:00:00 2001 From: Damjan Georgievski Date: Sat, 21 Jul 2018 16:01:42 +0200 Subject: [PATCH 002/219] document the change in the default logger --- CHANGES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.md b/CHANGES.md index d83736eb5..ad06fc03c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,6 +5,7 @@ ### Fixed * Fixed default_resource 'not yet implemented' panic #410 +* removed the timestamp from the default logger middleware ## [0.7.0] - 2018-07-21 From b79a9aaec7a3a44dc6f5766e1db9d90147af657d Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 24 Jul 2018 14:18:04 -0700 Subject: [PATCH 003/219] fix changelog --- CHANGES.md | 11 +++++++++-- Cargo.toml | 2 +- README.md | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1f9688f66..882563302 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.1] - 2018-07-21 +## [0.7.2] - 2018-07-xx ### Added @@ -8,11 +8,18 @@ ### Fixed -* Fixed default_resource 'not yet implemented' panic #410 * removed the timestamp from the default logger middleware * Add `CookieSessionBackend::http_only` method to set `HttpOnly` directive of cookies + +## [0.7.1] - 2018-07-21 + +### Fixed + +* Fixed default_resource 'not yet implemented' panic #410 + + ## [0.7.0] - 2018-07-21 ### Added diff --git a/Cargo.toml b/Cargo.toml index a6b73ee55..6fb2e1a2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.1" +version = "0.7.2" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" diff --git a/README.md b/README.md index ec8c439ef..4e396cb91 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust. * Multipart streams * Static assets * SSL support with OpenSSL or `native-tls` -* Middlewares ([Logger,Session,CORS,CSRF,etc](https://actix.rs/docs/middleware/)) +* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/)) * Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html) * Built on top of [Actix actor framework](https://github.com/actix/actix) From d6abd2fe22f98e22a6ef7eba422d559d029dbf9d Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 24 Jul 2018 14:51:48 -0700 Subject: [PATCH 004/219] allow to handle empty path for application with prefix --- CHANGES.md | 6 ++++++ src/application.rs | 49 ++++++++++++++++++++++++++++++++++------------ src/router.rs | 2 +- 3 files changed, 43 insertions(+), 14 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 882563302..494ad7a65 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,12 @@ * Add implementation of `FromRequest` for `Option` and `Result` + * Allow to handle application prefix, i.e. allow to handle `/app` path + for application with `/app` prefix. + Check [`App::prefix()`](https://actix.rs/actix-web/actix_web/struct.App.html#method.prefix) + api doc. + + ### Fixed * removed the timestamp from the default logger middleware diff --git a/src/application.rs b/src/application.rs index f36adf69e..a5cd3386f 100644 --- a/src/application.rs +++ b/src/application.rs @@ -171,7 +171,9 @@ where /// In the following example only requests with an `/app/` path /// prefix get handled. Requests with path `/app/test/` would be /// handled, while requests with the paths `/application` or - /// `/other/...` would return `NOT FOUND`. + /// `/other/...` would return `NOT FOUND`. It is also possible to + /// handle `/app` path, to do this you can register resource for + /// empty string `""` /// /// ```rust /// # extern crate actix_web; @@ -180,6 +182,8 @@ where /// fn main() { /// let app = App::new() /// .prefix("/app") + /// .resource("", |r| r.f(|_| HttpResponse::Ok())) // <- handle `/app` path + /// .resource("/", |r| r.f(|_| HttpResponse::Ok())) // <- handle `/app/` path /// .resource("/test", |r| { /// r.get().f(|_| HttpResponse::Ok()); /// r.head().f(|_| HttpResponse::MethodNotAllowed()); @@ -822,6 +826,23 @@ mod tests { assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); } + #[test] + fn test_option_responder() { + let app = App::new() + .resource("/none", |r| r.f(|_| -> Option<&'static str> { None })) + .resource("/some", |r| r.f(|_| Some("some"))) + .finish(); + + let req = TestRequest::with_uri("/none").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); + + let req = TestRequest::with_uri("/some").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::OK); + assert_eq!(resp.as_msg().body(), &Body::Binary(Binary::Slice(b"some"))); + } + #[test] fn test_filter() { let mut srv = TestServer::with_factory(|| { @@ -840,19 +861,21 @@ mod tests { } #[test] - fn test_option_responder() { - let app = App::new() - .resource("/none", |r| r.f(|_| -> Option<&'static str> { None })) - .resource("/some", |r| r.f(|_| Some("some"))) - .finish(); + fn test_prefix_root() { + let mut srv = TestServer::with_factory(|| { + App::new() + .prefix("/test") + .resource("/", |r| r.f(|_| HttpResponse::Ok())) + .resource("", |r| r.f(|_| HttpResponse::Created())) + }); - let req = TestRequest::with_uri("/none").request(); - let resp = app.run(req); - assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); + let request = srv.get().uri(srv.url("/test/")).finish().unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert_eq!(response.status(), StatusCode::OK); - let req = TestRequest::with_uri("/some").request(); - let resp = app.run(req); - assert_eq!(resp.as_msg().status(), StatusCode::OK); - assert_eq!(resp.as_msg().body(), &Body::Binary(Binary::Slice(b"some"))); + let request = srv.get().uri(srv.url("/test")).finish().unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert_eq!(response.status(), StatusCode::CREATED); } + } diff --git a/src/router.rs b/src/router.rs index e79dc93da..f3f657b58 100644 --- a/src/router.rs +++ b/src/router.rs @@ -463,7 +463,7 @@ impl ResourceDef { /// /// Panics if path pattern is wrong. pub fn new(path: &str) -> Self { - ResourceDef::with_prefix(path, "/", false) + ResourceDef::with_prefix(path, if path.is_empty() { "" } else { "/" }, false) } /// Parse path pattern and create new `Resource` instance. From 85b275bb2b896624ed52d86cf7b93655704fc57e Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 24 Jul 2018 14:52:56 -0700 Subject: [PATCH 005/219] fix warnings --- Cargo.toml | 2 +- src/client/connector.rs | 8 ++--- src/client/pipeline.rs | 2 +- src/extractor.rs | 65 ++++++++++++++++++++++++++++------------- 4 files changed, 51 insertions(+), 26 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6fb2e1a2e..89a51c66b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ base64 = "0.9" bitflags = "1.0" h2 = "0.1" htmlescape = "0.3" -http = "^0.1.5" +http = "^0.1.8" httparse = "1.3" log = "0.4" mime = "0.3" diff --git a/src/client/connector.rs b/src/client/connector.rs index 6d391af87..03ad3bd98 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -599,7 +599,7 @@ impl ClientConnector { } Acquire::Available => { // create new connection - self.connect_waiter(key.clone(), waiter, ctx); + self.connect_waiter(&key, waiter, ctx); } } } @@ -608,7 +608,7 @@ impl ClientConnector { self.waiters = Some(act_waiters); } - fn connect_waiter(&mut self, key: Key, waiter: Waiter, ctx: &mut Context) { + fn connect_waiter(&mut self, key: &Key, waiter: Waiter, ctx: &mut Context) { let conn = AcquiredConn(key.clone(), Some(self.acq_tx.clone())); let key2 = key.clone(); @@ -828,7 +828,7 @@ impl Handler for ClientConnector { wait, conn_timeout, }; - self.connect_waiter(key.clone(), waiter, ctx); + self.connect_waiter(&key, waiter, ctx); return ActorResponse::async( rx.map_err(|_| ClientConnectorError::Disconnected) @@ -885,7 +885,7 @@ impl Handler for ClientConnector { wait, conn_timeout, }; - self.connect_waiter(key.clone(), waiter, ctx); + self.connect_waiter(&key, waiter, ctx); ActorResponse::async( rx.map_err(|_| ClientConnectorError::Disconnected) diff --git a/src/client/pipeline.rs b/src/client/pipeline.rs index e5538b060..394b7a6cd 100644 --- a/src/client/pipeline.rs +++ b/src/client/pipeline.rs @@ -216,7 +216,7 @@ impl Future for SendRequest { match pl.parse() { Ok(Async::Ready(mut resp)) => { - if self.req.method() == &Method::HEAD { + if self.req.method() == Method::HEAD { pl.parser.take(); } resp.set_pipeline(pl); diff --git a/src/extractor.rs b/src/extractor.rs index 768edfb76..aa4fdea7a 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -6,7 +6,7 @@ use std::{fmt, str}; use bytes::Bytes; use encoding::all::UTF_8; use encoding::types::{DecoderTrap, Encoding}; -use futures::{Async, Future, Poll, future}; +use futures::{future, Async, Future, Poll}; use mime::Mime; use serde::de::{self, DeserializeOwned}; use serde_urlencoded; @@ -504,19 +504,18 @@ impl FromRequest for String { /// }); /// } /// ``` -impl FromRequest for Option where T: FromRequest { +impl FromRequest for Option +where + T: FromRequest, +{ type Config = T::Config; type Result = Box, Error = Error>>; #[inline] fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result { - Box::new(T::from_request(req, cfg).into().then( |r| { - match r { - Ok(v) => future::ok(Some(v)), - Err(e) => { - future::ok(None) - } - } + Box::new(T::from_request(req, cfg).into().then(|r| match r { + Ok(v) => future::ok(Some(v)), + Err(_) => future::ok(None), })) } } @@ -566,13 +565,16 @@ impl FromRequest for Option where T: FromRequest FromRequest for Result where T: FromRequest{ +impl FromRequest for Result +where + T: FromRequest, +{ type Config = T::Config; type Result = Box, Error = Error>>; #[inline] fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result { - Box::new(T::from_request(req, cfg).into().then( |r| { future::ok(r) })) + Box::new(T::from_request(req, cfg).into().then(future::ok)) } } @@ -811,7 +813,10 @@ mod tests { let mut cfg = FormConfig::default(); cfg.limit(4096); - match Option::>::from_request(&req, &cfg).poll().unwrap() { + match Option::>::from_request(&req, &cfg) + .poll() + .unwrap() + { Async::Ready(r) => assert_eq!(r, None), _ => unreachable!(), } @@ -823,8 +828,16 @@ mod tests { .set_payload(Bytes::from_static(b"hello=world")) .finish(); - match Option::>::from_request(&req, &cfg).poll().unwrap() { - Async::Ready(r) => assert_eq!(r, Some(Form(Info { hello: "world".into() }))), + match Option::>::from_request(&req, &cfg) + .poll() + .unwrap() + { + Async::Ready(r) => assert_eq!( + r, + Some(Form(Info { + hello: "world".into() + })) + ), _ => unreachable!(), } @@ -835,7 +848,10 @@ mod tests { .set_payload(Bytes::from_static(b"bye=world")) .finish(); - match Option::>::from_request(&req, &cfg).poll().unwrap() { + match Option::>::from_request(&req, &cfg) + .poll() + .unwrap() + { Async::Ready(r) => assert_eq!(r, None), _ => unreachable!(), } @@ -850,8 +866,16 @@ mod tests { .set_payload(Bytes::from_static(b"hello=world")) .finish(); - match Result::, Error>::from_request(&req, &FormConfig::default()).poll().unwrap() { - Async::Ready(Ok(r)) => assert_eq!(r, Form(Info { hello: "world".into() })), + match Result::, Error>::from_request(&req, &FormConfig::default()) + .poll() + .unwrap() + { + Async::Ready(Ok(r)) => assert_eq!( + r, + Form(Info { + hello: "world".into() + }) + ), _ => unreachable!(), } @@ -862,14 +886,15 @@ mod tests { .set_payload(Bytes::from_static(b"bye=world")) .finish(); - match Result::, Error>::from_request(&req, &FormConfig::default()).poll().unwrap() { + match Result::, Error>::from_request(&req, &FormConfig::default()) + .poll() + .unwrap() + { Async::Ready(r) => assert!(r.is_err()), _ => unreachable!(), } } - - #[test] fn test_payload_config() { let req = TestRequest::default().finish(); From b878613e104a5ae8e958a10c7484401f851bfbee Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 24 Jul 2018 15:49:46 -0700 Subject: [PATCH 006/219] fix warning --- src/client/connector.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 03ad3bd98..c2ff328ea 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -609,6 +609,7 @@ impl ClientConnector { } fn connect_waiter(&mut self, key: &Key, waiter: Waiter, ctx: &mut Context) { + let key = key.clone(); let conn = AcquiredConn(key.clone(), Some(self.acq_tx.clone())); let key2 = key.clone(); @@ -635,7 +636,7 @@ impl ClientConnector { act.connector .connect_async(&key.host, stream) .into_actor(act) - .then(move |res, act, _| { + .then(move |res, _, _| { match res { Err(e) => { let _ = waiter.tx.send(Err( From e408b68744a10ae02555ea84a8960712b62affb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Miku=C5=82a?= Date: Wed, 25 Jul 2018 17:01:22 +0200 Subject: [PATCH 007/219] Update cookie dependency (#422) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 89a51c66b..29c2dadbf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ lazy_static = "1.0" lazycell = "1.0.0" parking_lot = "0.6" url = { version="1.7", features=["query_encoding"] } -cookie = { version="0.10", features=["percent-encode"] } +cookie = { version="0.11", features=["percent-encode"] } brotli2 = { version="^0.3.2", optional = true } flate2 = { version="1.0", optional = true, default-features = false } From 6048817ba74f5a916bff72c17ec220656ea49c80 Mon Sep 17 00:00:00 2001 From: Douman Date: Wed, 25 Jul 2018 20:22:18 +0300 Subject: [PATCH 008/219] Correct flate feature names in documentation --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0ab4a1bef..528eb7b7c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -70,9 +70,9 @@ //! dependency //! * `brotli` - enables `brotli` compression support, requires `c` //! compiler -//! * `flate-c` - enables `gzip`, `deflate` compression support, requires +//! * `flate2-c` - enables `gzip`, `deflate` compression support, requires //! `c` compiler -//! * `flate-rust` - experimental rust based implementation for +//! * `flate2-rust` - experimental rust based implementation for //! `gzip`, `deflate` compression. //! #![cfg_attr(actix_nightly, feature( From f58065082e69f023a73faeed1d646a8ef067e02e Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 25 Jul 2018 10:30:55 -0700 Subject: [PATCH 009/219] fix missing content-encoding header for h2 connections #421 --- CHANGES.md | 15 ++++--- src/server/h2writer.rs | 94 ++++++++++++++++++++++++------------------ 2 files changed, 65 insertions(+), 44 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 494ad7a65..2b13657a0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,19 +4,24 @@ ### Added - * Add implementation of `FromRequest` for `Option` and `Result` +* Add implementation of `FromRequest` for `Option` and `Result` - * Allow to handle application prefix, i.e. allow to handle `/app` path +* Allow to handle application prefix, i.e. allow to handle `/app` path for application with `/app` prefix. Check [`App::prefix()`](https://actix.rs/actix-web/actix_web/struct.App.html#method.prefix) api doc. +* Add `CookieSessionBackend::http_only` method to set `HttpOnly` directive of cookies + +### Changed + +* Upgrade to cookie 0.11 + +* Removed the timestamp from the default logger middleware ### Fixed -* removed the timestamp from the default logger middleware - -* Add `CookieSessionBackend::http_only` method to set `HttpOnly` directive of cookies +* Missing response header "content-encoding" #421 ## [0.7.1] - 2018-07-21 diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index c4fc59972..c877250dd 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -8,16 +8,18 @@ use modhttp::Response; use std::rc::Rc; use std::{cmp, io}; -use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING}; use http::{HttpTryFrom, Method, Version}; use super::helpers; use super::message::Request; -use super::output::{Output, ResponseInfo}; +use super::output::{Output, ResponseInfo, ResponseLength}; use super::settings::WorkerSettings; use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE}; use body::{Binary, Body}; use header::ContentEncoding; +use http::header::{ + HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, +}; use httpresponse::HttpResponse; const CHUNK_SIZE: usize = 16_384; @@ -92,50 +94,63 @@ impl Writer for H2Writer { let mut info = ResponseInfo::new(req.inner.method == Method::HEAD); self.buffer.for_server(&mut info, &req.inner, msg, encoding); - // http2 specific - msg.headers_mut().remove(CONNECTION); - msg.headers_mut().remove(TRANSFER_ENCODING); - - // using helpers::date is quite a lot faster - if !msg.headers().contains_key(DATE) { - let mut bytes = BytesMut::with_capacity(29); - self.settings.set_date(&mut bytes, false); - msg.headers_mut() - .insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap()); - } - - let body = msg.replace_body(Body::Empty); - match body { - Body::Binary(ref bytes) => { - if bytes.is_empty() { - msg.headers_mut() - .insert(CONTENT_LENGTH, HeaderValue::from_static("0")); - self.flags.insert(Flags::EOF); - } else { - let mut val = BytesMut::new(); - helpers::convert_usize(bytes.len(), &mut val); - let l = val.len(); - msg.headers_mut().insert( - CONTENT_LENGTH, - HeaderValue::try_from(val.split_to(l - 2).freeze()).unwrap(), - ); - } - } - Body::Empty => { - self.flags.insert(Flags::EOF); - msg.headers_mut() - .insert(CONTENT_LENGTH, HeaderValue::from_static("0")); - } - _ => (), - } - + let mut has_date = false; let mut resp = Response::new(()); *resp.status_mut() = msg.status(); *resp.version_mut() = Version::HTTP_2; for (key, value) in msg.headers().iter() { + match *key { + // http2 specific + CONNECTION | TRANSFER_ENCODING => continue, + CONTENT_ENCODING => if encoding != ContentEncoding::Identity { + continue; + }, + CONTENT_LENGTH => match info.length { + ResponseLength::None => (), + _ => continue, + }, + DATE => has_date = true, + _ => (), + } resp.headers_mut().insert(key, value.clone()); } + // set date header + if !has_date { + let mut bytes = BytesMut::with_capacity(29); + self.settings.set_date(&mut bytes, false); + resp.headers_mut() + .insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap()); + } + + // content length + match info.length { + ResponseLength::Zero => { + resp.headers_mut() + .insert(CONTENT_LENGTH, HeaderValue::from_static("0")); + self.flags.insert(Flags::EOF); + } + ResponseLength::Length(len) => { + let mut val = BytesMut::new(); + helpers::convert_usize(len, &mut val); + let l = val.len(); + resp.headers_mut().insert( + CONTENT_LENGTH, + HeaderValue::try_from(val.split_to(l - 2).freeze()).unwrap(), + ); + } + ResponseLength::Length64(len) => { + let l = format!("{}", len); + resp.headers_mut() + .insert(CONTENT_LENGTH, HeaderValue::try_from(l.as_str()).unwrap()); + } + _ => (), + } + if let Some(ce) = info.content_encoding { + resp.headers_mut() + .insert(CONTENT_ENCODING, HeaderValue::try_from(ce).unwrap()); + } + match self .respond .send_response(resp, self.flags.contains(Flags::EOF)) @@ -146,6 +161,7 @@ impl Writer for H2Writer { trace!("Response: {:?}", msg); + let body = msg.replace_body(Body::Empty); if let Body::Binary(bytes) = body { if bytes.is_empty() { Ok(WriterState::Done) From 80fbc2e9ec7fb675ba184921714fc924db5d83a8 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 25 Jul 2018 15:38:02 -0700 Subject: [PATCH 010/219] Fix stream draining for http/2 connections #290 --- CHANGES.md | 2 ++ src/pipeline.rs | 2 +- src/server/h2.rs | 4 +++- src/server/h2writer.rs | 8 ++++++-- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2b13657a0..051ab1cc4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,6 +23,8 @@ * Missing response header "content-encoding" #421 +* Fix stream draining for http/2 connections #290 + ## [0.7.1] - 2018-07-21 diff --git a/src/pipeline.rs b/src/pipeline.rs index dbe9e58ad..7c277a587 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -409,7 +409,7 @@ struct ProcessResponse { _h: PhantomData, } -#[derive(PartialEq)] +#[derive(PartialEq, Debug)] enum RunningState { Running, Paused, diff --git a/src/server/h2.rs b/src/server/h2.rs index 2322f755a..e5355a1fd 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -155,7 +155,9 @@ where } } - if !item.flags.contains(EntryFlags::WRITE_DONE) { + if item.flags.contains(EntryFlags::FINISHED) + && !item.flags.contains(EntryFlags::WRITE_DONE) + { match item.stream.poll_completed(false) { Ok(Async::NotReady) => (), Ok(Async::Ready(_)) => { diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index c877250dd..ff87b693e 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -245,14 +245,18 @@ impl Writer for H2Writer { let cap = cmp::min(self.buffer.len(), CHUNK_SIZE); stream.reserve_capacity(cap); } else { + if eof { + stream.reserve_capacity(0); + continue; + } self.flags.remove(Flags::RESERVED); - return Ok(Async::NotReady); + return Ok(Async::Ready(())); } } Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), } } } - Ok(Async::NotReady) + Ok(Async::Ready(())) } } From b4ed564e5d146cded58ea989c538e29a0968cdb3 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 26 Jul 2018 09:11:50 -0700 Subject: [PATCH 011/219] update changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 051ab1cc4..d63d60101 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.2] - 2018-07-xx +## [0.7.2] - 2018-07-26 ### Added From 196da6d570b1f93b6892f4d7ba11fdd9d8ef630f Mon Sep 17 00:00:00 2001 From: Marat Safin Date: Sun, 29 Jul 2018 09:43:04 +0300 Subject: [PATCH 012/219] add rustls --- Cargo.toml | 11 ++- src/client/connector.rs | 153 ++++++++++++++++++++++++++++++++++++++-- src/lib.rs | 14 ++++ src/server/mod.rs | 43 +++++++++++ src/server/srv.rs | 51 ++++++++++++++ src/server/worker.rs | 43 ++++++++++- src/test.rs | 45 +++++++++--- tests/cert.pem | 58 +++++++-------- tests/test_ws.rs | 42 +++++++++++ 9 files changed, 413 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 29c2dadbf..54bd0e383 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"] build = "build.rs" [package.metadata.docs.rs] -features = ["tls", "alpn", "session", "brotli", "flate2-c"] +features = ["tls", "alpn", "rust-tls", "session", "brotli", "flate2-c"] [badges] travis-ci = { repository = "actix/actix-web", branch = "master" } @@ -37,6 +37,9 @@ tls = ["native-tls", "tokio-tls"] # openssl alpn = ["openssl", "tokio-openssl"] +# rustls +rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"] + # sessions feature, session require "ring" crate and c compiler session = ["cookie/secure"] @@ -104,6 +107,12 @@ tokio-tls = { version="0.1", optional = true } openssl = { version="0.10", optional = true } tokio-openssl = { version="0.2", optional = true } +#rustls +rustls = { version = "0.13", optional = true } +tokio-rustls = { version = "0.7", optional = true } +webpki = { version = "0.18", optional = true } +webpki-roots = { version = "0.15", optional = true } + # forked url_encoded itoa = "0.4" dtoa = "0.4" diff --git a/src/client/connector.rs b/src/client/connector.rs index c2ff328ea..a00546719 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -22,12 +22,25 @@ use openssl::ssl::{Error as OpensslError, SslConnector, SslMethod}; use tokio_openssl::SslConnectorExt; #[cfg(all(feature = "tls", not(feature = "alpn")))] -use native_tls::{Error as TlsError, TlsConnector}; +use native_tls::{Error as TlsError, TlsConnector, TlsStream}; #[cfg(all(feature = "tls", not(feature = "alpn")))] use tokio_tls::TlsConnectorExt; +#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +use rustls::ClientConfig; +#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +use std::io::Error as TLSError; +#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +use std::sync::Arc; +#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +use tokio_rustls::ClientConfigExt; +#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +use webpki::DNSNameRef; +#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +use webpki_roots; + use server::IoStream; -use {HAS_OPENSSL, HAS_TLS}; +use {HAS_OPENSSL, HAS_TLS, HAS_RUSTLS}; /// Client connector usage stats #[derive(Default, Message)] @@ -139,6 +152,11 @@ pub enum ClientConnectorError { #[fail(display = "{}", _0)] SslError(#[cause] TlsError), + /// SSL error + #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + #[fail(display = "{}", _0)] + SslError(#[cause] TLSError), + /// Resolver error #[fail(display = "{}", _0)] Resolver(#[cause] ResolverError), @@ -193,6 +211,8 @@ pub struct ClientConnector { connector: SslConnector, #[cfg(all(feature = "tls", not(feature = "alpn")))] connector: TlsConnector, + #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + connector: Arc, stats: ClientConnectorStats, subscriber: Option>, @@ -262,8 +282,16 @@ impl Default for ClientConnector { paused: Paused::No, } } + #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + { + let mut config = ClientConfig::new(); + config + .root_store + .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); + ClientConnector::with_connector(Arc::new(config)) + } - #[cfg(not(any(feature = "alpn", feature = "tls")))] + #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] { let (tx, rx) = mpsc::unbounded(); ClientConnector { @@ -325,7 +353,7 @@ impl ClientConnector { /// # actix::System::current().stop(); /// Ok(()) /// }) - /// ); + /// }); /// } /// ``` pub fn with_connector(connector: SslConnector) -> ClientConnector { @@ -352,6 +380,75 @@ impl ClientConnector { } } + #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + /// Create `ClientConnector` actor with custom `SslConnector` instance. + /// + /// By default `ClientConnector` uses very a simple SSL configuration. + /// With `with_connector` method it is possible to use a custom + /// `SslConnector` object. + /// + /// ```rust + /// # #![cfg(feature = "rust-tls")] + /// # extern crate actix_web; + /// # extern crate futures; + /// # extern crate tokio; + /// # use futures::{future, Future}; + /// # use std::io::Write; + /// # use std::process; + /// # use actix_web::actix::Actor; + /// extern crate rustls; + /// extern crate webpki_roots; + /// use actix_web::{actix, client::ClientConnector, client::Connect}; + /// + /// use rustls::ClientConfig; + /// use std::sync::Arc; + /// + /// fn main() { + /// actix::run(|| { + /// // Start `ClientConnector` with custom `ClientConfig` + /// let mut config = ClientConfig::new(); + /// config + /// .root_store + /// .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); + /// let conn = ClientConnector::with_connector(Arc::new(config)).start(); + /// + /// conn.send( + /// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host + /// .map_err(|_| ()) + /// .and_then(|res| { + /// if let Ok(mut stream) = res { + /// stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap(); + /// } + /// # actix::System::current().stop(); + /// Ok(()) + /// }) + /// }); + /// } + /// ``` + pub fn with_connector(connector: Arc) -> ClientConnector { + let (tx, rx) = mpsc::unbounded(); + + ClientConnector { + connector, + stats: ClientConnectorStats::default(), + subscriber: None, + acq_tx: tx, + acq_rx: Some(rx), + resolver: None, + conn_lifetime: Duration::from_secs(75), + conn_keep_alive: Duration::from_secs(15), + limit: 100, + limit_per_host: 0, + acquired: 0, + acquired_per_host: HashMap::new(), + available: HashMap::new(), + to_close: Vec::new(), + waiters: Some(HashMap::new()), + wait_timeout: None, + paused: Paused::No, + } + } + /// Set total number of simultaneous connections. /// /// If limit is 0, the connector has no limit. @@ -709,7 +806,51 @@ impl ClientConnector { } } - #[cfg(not(any(feature = "alpn", feature = "tls")))] + #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + match res { + Err(err) => { + let _ = waiter.tx.send(Err(err.into())); + fut::Either::B(fut::err(())) + } + Ok(stream) => { + act.stats.opened += 1; + if conn.0.ssl { + let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap(); + fut::Either::A( + act.connector + .connect_async(host, stream) + .into_actor(act) + .then(move |res, _, _| { + match res { + Err(e) => { + let _ = waiter.tx.send(Err( + ClientConnectorError::SslError(e), + )); + } + Ok(stream) => { + let _ = + waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + } + } + fut::ok(()) + }), + ) + } else { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + fut::Either::B(fut::ok(())) + } + } + } + + #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] match res { Err(err) => { let _ = waiter.tx.send(Err(err.into())); @@ -784,7 +925,7 @@ impl Handler for ClientConnector { }; // check ssl availability - if proto.is_secure() && !HAS_OPENSSL && !HAS_TLS { + if proto.is_secure() && !HAS_OPENSSL && !HAS_TLS && !HAS_RUSTLS { return ActorResponse::reply(Err(ClientConnectorError::SslIsNotSupported)); } diff --git a/src/lib.rs b/src/lib.rs index 528eb7b7c..626bb95f8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -151,6 +151,15 @@ extern crate openssl; #[cfg(feature = "openssl")] extern crate tokio_openssl; +#[cfg(feature = "rust-tls")] +extern crate rustls; +#[cfg(feature = "rust-tls")] +extern crate tokio_rustls; +#[cfg(feature = "rust-tls")] +extern crate webpki; +#[cfg(feature = "rust-tls")] +extern crate webpki_roots; + mod application; mod body; mod context; @@ -224,6 +233,11 @@ pub(crate) const HAS_TLS: bool = true; #[cfg(not(feature = "tls"))] pub(crate) const HAS_TLS: bool = false; +#[cfg(feature = "rust-tls")] +pub(crate) const HAS_RUSTLS: bool = true; +#[cfg(not(feature = "rust-tls"))] +pub(crate) const HAS_RUSTLS: bool = false; + pub mod dev { //! The `actix-web` prelude for library developers //! diff --git a/src/server/mod.rs b/src/server/mod.rs index a302f5e73..dc8ecd810 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -310,3 +310,46 @@ impl IoStream for TlsStream { self.get_mut().get_mut().set_linger(dur) } } + +#[cfg(feature = "rust-tls")] +use rustls::{ClientSession, ServerSession}; +#[cfg(feature = "rust-tls")] +use tokio_rustls::TlsStream; + +#[cfg(feature = "rust-tls")] +impl IoStream for TlsStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = ::shutdown(self); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().0.set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().0.set_linger(dur) + } +} + +#[cfg(feature = "rust-tls")] +impl IoStream for TlsStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = ::shutdown(self); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().0.set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().0.set_linger(dur) + } +} diff --git a/src/server/srv.rs b/src/server/srv.rs index 02580d015..d6f5cf4d9 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -22,6 +22,9 @@ use native_tls::TlsAcceptor; #[cfg(feature = "alpn")] use openssl::ssl::{AlpnError, SslAcceptorBuilder}; +#[cfg(feature = "rust-tls")] +use rustls::ServerConfig; + use super::channel::{HttpChannel, WrapperStream}; use super::settings::{ServerSettings, WorkerSettings}; use super::worker::{Conn, SocketInfo, StopWorker, StreamHandlerType, Worker}; @@ -42,6 +45,14 @@ fn configure_alpn(builder: &mut SslAcceptorBuilder) -> io::Result<()> { Ok(()) } +#[cfg(all(feature = "rust-tls", not(feature = "alpn")))] +fn configure_alpn(builder: &mut Arc) -> io::Result<()> { + Arc::::get_mut(builder) + .unwrap() + .set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]); + Ok(()) +} + /// An HTTP Server pub struct HttpServer where @@ -265,6 +276,26 @@ where Ok(self) } + #[cfg(all(feature = "rust-tls", not(feature = "alpn")))] + /// Use listener for accepting incoming tls connection requests + /// + /// This method sets alpn protocols to "h2" and "http/1.1" + pub fn listen_ssl( + mut self, lst: net::TcpListener, mut builder: Arc, + ) -> io::Result { + // alpn support + if !self.no_http2 { + configure_alpn(&mut builder)?; + } + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + addr, + lst, + tp: StreamHandlerType::Rustls(builder.clone()), + }); + Ok(self) + } + fn bind2(&mut self, addr: S) -> io::Result> { let mut err = None; let mut succ = false; @@ -343,6 +374,26 @@ where Ok(self) } + #[cfg(all(feature = "rust-tls", not(feature = "alpn")))] + /// Start listening for incoming tls connections. + /// + /// This method sets alpn protocols to "h2" and "http/1.1" + pub fn bind_ssl( + mut self, addr: S, mut builder: Arc, + ) -> io::Result { + // alpn support + if !self.no_http2 { + configure_alpn(&mut builder)?; + } + + let sockets = self.bind2(addr)?; + self.sockets.extend(sockets.into_iter().map(|mut s| { + s.tp = StreamHandlerType::Rustls(builder.clone()); + s + })); + Ok(self) + } + fn start_workers( &mut self, settings: &ServerSettings, sockets: &Slab, ) -> Vec<(usize, mpsc::UnboundedSender>)> { diff --git a/src/server/worker.rs b/src/server/worker.rs index 8fd3fe601..5e753ce58 100644 --- a/src/server/worker.rs +++ b/src/server/worker.rs @@ -8,7 +8,7 @@ use tokio::executor::current_thread; use tokio_reactor::Handle; use tokio_tcp::TcpStream; -#[cfg(any(feature = "tls", feature = "alpn"))] +#[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))] use futures::future; #[cfg(feature = "tls")] @@ -21,6 +21,13 @@ use openssl::ssl::SslAcceptor; #[cfg(feature = "alpn")] use tokio_openssl::SslAcceptorExt; +#[cfg(feature = "rust-tls")] +use rustls::{ServerConfig, Session}; +#[cfg(feature = "rust-tls")] +use std::sync::Arc; +#[cfg(feature = "rust-tls")] +use tokio_rustls::ServerConfigExt; + use actix::msgs::StopArbiter; use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response}; @@ -170,6 +177,8 @@ pub(crate) enum StreamHandlerType { Tls(TlsAcceptor), #[cfg(feature = "alpn")] Alpn(SslAcceptor), + #[cfg(feature = "rust-tls")] + Rustls(Arc), } impl StreamHandlerType { @@ -237,6 +246,36 @@ impl StreamHandlerType { }, )); } + #[cfg(feature = "rust-tls")] + StreamHandlerType::Rustls(ref acceptor) => { + let Conn { io, peer, .. } = msg; + let _ = io.set_nodelay(true); + let io = TcpStream::from_std(io, &Handle::default()) + .expect("failed to associate TCP stream"); + + current_thread::spawn(ServerConfigExt::accept_async(acceptor, io).then( + move |res| { + match res { + Ok(io) => { + let http2 = if let Some(p) = + io.get_ref().1.get_alpn_protocol() + { + p.len() == 2 && &p == &"h2" + } else { + false + }; + current_thread::spawn(HttpChannel::new( + h, io, peer, http2, + )); + } + Err(err) => { + trace!("Error during handling tls connection: {}", err) + } + }; + future::result(Ok(())) + }, + )); + } } } @@ -247,6 +286,8 @@ impl StreamHandlerType { StreamHandlerType::Tls(_) => "https", #[cfg(feature = "alpn")] StreamHandlerType::Alpn(_) => "https", + #[cfg(feature = "rust-tls")] + StreamHandlerType::Rustls(_) => "https", } } } diff --git a/src/test.rs b/src/test.rs index c2e5c7569..f466db2d5 100644 --- a/src/test.rs +++ b/src/test.rs @@ -15,6 +15,10 @@ use tokio::runtime::current_thread::Runtime; #[cfg(feature = "alpn")] use openssl::ssl::SslAcceptorBuilder; +#[cfg(feature = "rust-tls")] +use rustls::ServerConfig; +#[cfg(feature = "rust-tls")] +use std::sync::Arc; use application::{App, HttpApplication}; use body::Binary; @@ -140,7 +144,19 @@ impl TestServer { builder.set_verify(SslVerifyMode::NONE); ClientConnector::with_connector(builder.build()).start() } - #[cfg(not(feature = "alpn"))] + #[cfg(feature = "rust-tls")] + { + use rustls::ClientConfig; + use std::io::BufReader; + use std::fs::File; + let mut config = ClientConfig::new(); + let pem_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap()); + config + .root_store + .add_pem_file(pem_file).unwrap(); + ClientConnector::with_connector(Arc::new(config)).start() + } + #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] { ClientConnector::default().start() } @@ -165,16 +181,16 @@ impl TestServer { pub fn url(&self, uri: &str) -> String { if uri.starts_with('/') { format!( - "{}://{}{}", + "{}://localhost:{}{}", if self.ssl { "https" } else { "http" }, - self.addr, + self.addr.port(), uri ) } else { format!( - "{}://{}/{}", + "{}://localhost:{}/{}", if self.ssl { "https" } else { "http" }, - self.addr, + self.addr.port(), uri ) } @@ -241,6 +257,8 @@ pub struct TestServerBuilder { state: Box S + Sync + Send + 'static>, #[cfg(feature = "alpn")] ssl: Option, + #[cfg(feature = "rust-tls")] + ssl: Option>, } impl TestServerBuilder { @@ -251,7 +269,7 @@ impl TestServerBuilder { { TestServerBuilder { state: Box::new(state), - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "rust-tls"))] ssl: None, } } @@ -263,6 +281,13 @@ impl TestServerBuilder { self } + #[cfg(feature = "rust-tls")] + /// Create ssl server + pub fn ssl(mut self, ssl: Arc) -> Self { + self.ssl = Some(ssl); + self + } + #[allow(unused_mut)] /// Configure test application and run test server pub fn start(mut self, config: F) -> TestServer @@ -271,9 +296,9 @@ impl TestServerBuilder { { let (tx, rx) = mpsc::channel(); - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "rust-tls"))] let ssl = self.ssl.is_some(); - #[cfg(not(feature = "alpn"))] + #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] let ssl = false; // run server in separate thread @@ -293,7 +318,7 @@ impl TestServerBuilder { tx.send((System::current(), local_addr, TestServer::get_conn())) .unwrap(); - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "rust-tls"))] { let ssl = self.ssl.take(); if let Some(ssl) = ssl { @@ -302,7 +327,7 @@ impl TestServerBuilder { srv.listen(tcp).start(); } } - #[cfg(not(feature = "alpn"))] + #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] { srv.listen(tcp).start(); } diff --git a/tests/cert.pem b/tests/cert.pem index 159aacea2..db04fbfae 100644 --- a/tests/cert.pem +++ b/tests/cert.pem @@ -1,31 +1,31 @@ -----BEGIN CERTIFICATE----- -MIIFPjCCAyYCCQDvLYiYD+jqeTANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJV -UzELMAkGA1UECAwCQ0ExCzAJBgNVBAcMAlNGMRAwDgYDVQQKDAdDb21wYW55MQww -CgYDVQQLDANPcmcxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbTAeFw0xODAxMjUx -NzQ2MDFaFw0xOTAxMjUxNzQ2MDFaMGExCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJD -QTELMAkGA1UEBwwCU0YxEDAOBgNVBAoMB0NvbXBhbnkxDDAKBgNVBAsMA09yZzEY -MBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A -MIICCgKCAgEA2WzIA2IpVR9Tb9EFhITlxuhE5rY2a3S6qzYNzQVgSFggxXEPn8k1 -sQEcer5BfAP986Sck3H0FvB4Bt/I8PwOtUCmhwcc8KtB5TcGPR4fjXnrpC+MIK5U -NLkwuyBDKziYzTdBj8kUFX1WxmvEHEgqToPOZfBgsS71cJAR/zOWraDLSRM54jXy -voLZN4Ti9rQagQrvTQ44Vz5ycDQy7UxtbUGh1CVv69vNVr7/SOOh/Nw5FNOZWLWr -odGyoec5wh9iqRZgRqiTUc6Lt7V2RWc2X2gjwST2UfI+U46Ip3oaQ7ZD4eAkoqND -xdniBZAykVG3c/99ux4BAESTF8fsNch6UticBxYMuTu+ouvP0psfI9wwwNliJDmA -CRUTB9AgRynbL1AzhqQoDfsb98IZfjfNOpwnwuLwpMAPhbgd5KNdZaIJ4Hb6/stI -yFElOExxd3TAxF2Gshd/lq1JcNHAZ1DSXV5MvOWT/NWgXwbIzUgQ8eIi+HuDYX2U -UuaB6R8tbd52H7rbUv6HrfinuSlKWqjSYLkiKHkwUpoMw8y9UycRSzs1E9nPwPTO -vRXb0mNCQeBCV9FvStNVXdCUTT8LGPv87xSD2pmt7LijlE6mHLG8McfcWkzA69un -CEHIFAFDimTuN7EBljc119xWFTcHMyoZAfFF+oTqwSbBGImruCxnaJECAwEAATAN -BgkqhkiG9w0BAQsFAAOCAgEApavsgsn7SpPHfhDSN5iZs1ILZQRewJg0Bty0xPfk -3tynSW6bNH3nSaKbpsdmxxomthNSQgD2heOq1By9YzeOoNR+7Pk3s4FkASnf3ToI -JNTUasBFFfaCG96s4Yvs8KiWS/k84yaWuU8c3Wb1jXs5Rv1qE1Uvuwat1DSGXSoD -JNluuIkCsC4kWkyq5pWCGQrabWPRTWsHwC3PTcwSRBaFgYLJaR72SloHB1ot02zL -d2age9dmFRFLLCBzP+D7RojBvL37qS/HR+rQ4SoQwiVc/JzaeqSe7ZbvEH9sZYEu -ALowJzgbwro7oZflwTWunSeSGDSltkqKjvWvZI61pwfHKDahUTmZ5h2y67FuGEaC -CIOUI8dSVSPKITxaq3JL4ze2e9/0Lt7hj19YK2uUmtMAW5Tirz4Yx5lyGH9U8Wur -y/X8VPxTc4A9TMlJgkyz0hqvhbPOT/zSWB10zXh0glKAsSBryAOEDxV1UygmSir7 -YV8Qaq+oyKUTMc1MFq5vZ07M51EPaietn85t8V2Y+k/8XYltRp32NxsypxAJuyxh -g/ko6RVTrWa1sMvz/F9LFqAdKiK5eM96lh9IU4xiLg4ob8aS/GRAA8oIFkZFhLrt -tOwjIUPmEPyHWFi8dLpNuQKYalLYhuwZftG/9xV+wqhKGZO9iPrpHSYBRTap8w2y -1QU= +MIIFXTCCA0WgAwIBAgIJAJ3tqfd0MLLNMA0GCSqGSIb3DQEBCwUAMGExCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDRjELMAkGA1UEBwwCU0YxEDAOBgNVBAoMB0NvbXBh +bnkxDDAKBgNVBAsMA09yZzEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMB4XDTE4 +MDcyOTE4MDgzNFoXDTE5MDcyOTE4MDgzNFowYTELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNGMQswCQYDVQQHDAJTRjEQMA4GA1UECgwHQ29tcGFueTEMMAoGA1UECwwD +T3JnMRgwFgYDVQQDDA93d3cuZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDZbMgDYilVH1Nv0QWEhOXG6ETmtjZrdLqrNg3NBWBIWCDF +cQ+fyTWxARx6vkF8A/3zpJyTcfQW8HgG38jw/A61QKaHBxzwq0HlNwY9Hh+Neeuk +L4wgrlQ0uTC7IEMrOJjNN0GPyRQVfVbGa8QcSCpOg85l8GCxLvVwkBH/M5atoMtJ +EzniNfK+gtk3hOL2tBqBCu9NDjhXPnJwNDLtTG1tQaHUJW/r281Wvv9I46H83DkU +05lYtauh0bKh5znCH2KpFmBGqJNRzou3tXZFZzZfaCPBJPZR8j5TjoinehpDtkPh +4CSio0PF2eIFkDKRUbdz/327HgEARJMXx+w1yHpS2JwHFgy5O76i68/Smx8j3DDA +2WIkOYAJFRMH0CBHKdsvUDOGpCgN+xv3whl+N806nCfC4vCkwA+FuB3ko11logng +dvr+y0jIUSU4THF3dMDEXYayF3+WrUlw0cBnUNJdXky85ZP81aBfBsjNSBDx4iL4 +e4NhfZRS5oHpHy1t3nYfuttS/oet+Ke5KUpaqNJguSIoeTBSmgzDzL1TJxFLOzUT +2c/A9M69FdvSY0JB4EJX0W9K01Vd0JRNPwsY+/zvFIPama3suKOUTqYcsbwxx9xa +TMDr26cIQcgUAUOKZO43sQGWNzXX3FYVNwczKhkB8UX6hOrBJsEYiau4LGdokQID +AQABoxgwFjAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggIB +AIX+Qb4QRBxHl5X2UjRyLfWVkimtGlwI8P+eJZL3DrHBH/TpqAaCvTf0EbRC32nm +ASDMwIghaMvyrW40QN6V/CWRRi25cXUfsIZr1iHAHK0eZJV8SWooYtt4iNrcUs3g +4OTvDxhNmDyNwV9AXhJsBKf80dCW6/84jItqVAj20/OO4Rkd2tEeI8NomiYBc6a1 +hgwvv02myYF5hG/xZ9YSqeroBCZHwGYoJJnSpMPqJsxbCVnx2/U9FzGwcRmNHFCe +0g7EJZd3//8Plza6nkTBjJ/V7JnLqMU+ltx4mAgZO8rfzIr84qZdt0YN33VJQhYq +seuMySxrsuaAoxAmm8IoK9cW4IPzx1JveBQiroNlq5YJGf2UW7BTc3gz6c2tINZi +7ailBVdhlMnDXAf3/9xiiVlRAHOxgZh/7sRrKU7kDEHM4fGoc0YyZBTQKndPYMwO +3Bd82rlQ4sd46XYutTrB+mBYClVrJs+OzbNedTsR61DVNKKsRG4mNPyKSAIgOfM5 +XmSvCMPN5JK9U0DsNIV2/SnVsmcklQczT35FLTxl9ntx8ys7ZYK+SppD7XuLfWMq +GT9YMWhlpw0aRDg/aayeeOcnsNBhzAFMcOpQj1t6Fgv4+zbS9BM2bT0hbX86xjkr +E6wWgkuCslMgQlEJ+TM5RhYrI5/rVZQhvmgcob/9gPZv -----END CERTIFICATE----- diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 66a9153dc..1ed80bf77 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -12,6 +12,8 @@ use rand::Rng; #[cfg(feature = "alpn")] extern crate openssl; +#[cfg(feature = "rust-tls")] +extern crate rustls; use actix::prelude::*; use actix_web::*; @@ -272,3 +274,43 @@ fn test_ws_server_ssl() { assert_eq!(item, data); } } + +#[test] +#[cfg(feature = "rust-tls")] +fn test_ws_server_ssl() { + extern crate rustls; + use rustls::{ServerConfig, NoClientAuth}; + use rustls::internal::pemfile::{certs, rsa_private_keys}; + use std::io::BufReader; + use std::sync::Arc; + use std::fs::File; + + // load ssl keys + let mut config = ServerConfig::new(NoClientAuth::new()); + let cert_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap()); + let key_file = &mut BufReader::new(File::open("tests/key.pem").unwrap()); + let cert_chain = certs(cert_file).unwrap(); + let mut keys = rsa_private_keys(key_file).unwrap(); + config.set_single_cert(cert_chain, keys.remove(0)).unwrap(); + + let mut srv = test::TestServer::build().ssl(Arc::new(config)).start(|app| { + app.handler(|req| { + ws::start( + req, + Ws2 { + count: 0, + bin: false, + }, + ) + }) + }); + + let (mut reader, _writer) = srv.ws().unwrap(); + + let data = Some(ws::Message::Text("0".repeat(65_536))); + for _ in 0..10_000 { + let (item, r) = srv.execute(reader.into_future()).unwrap(); + reader = r; + assert_eq!(item, data); + } +} From 4c4d0d2745f1c49ea2d1a2ac50521684d31c846c Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 30 Jul 2018 10:23:28 -0700 Subject: [PATCH 013/219] update changes --- CHANGES.md | 7 +++++++ Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index d63d60101..c5c0499dd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.7.3] - 2018-07-xx + +### Added + +* Support HTTP/2 with rustls #36 + + ## [0.7.2] - 2018-07-26 ### Added diff --git a/Cargo.toml b/Cargo.toml index 54bd0e383..139c647af 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.2" +version = "0.7.3" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From 7bc0ace52d5045f6dc17a084e452dba4631a1d63 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 30 Jul 2018 13:42:42 -0700 Subject: [PATCH 014/219] move server accept impl to seprate module --- src/server/accept.rs | 207 ++++++++++++++++++++++++++++++++++++++++++ src/server/mod.rs | 1 + src/server/srv.rs | 212 +++---------------------------------------- 3 files changed, 223 insertions(+), 197 deletions(-) create mode 100644 src/server/accept.rs diff --git a/src/server/accept.rs b/src/server/accept.rs new file mode 100644 index 000000000..a91ca8141 --- /dev/null +++ b/src/server/accept.rs @@ -0,0 +1,207 @@ +use std::sync::mpsc as sync_mpsc; +use std::time::Duration; +use std::{io, net, thread}; + +use futures::sync::mpsc; +use mio; +use slab::Slab; + +#[cfg(feature = "tls")] +use native_tls::TlsAcceptor; + +#[cfg(feature = "alpn")] +use openssl::ssl::{AlpnError, SslAcceptorBuilder}; + +#[cfg(feature = "rust-tls")] +use rustls::ServerConfig; + +use super::srv::{ServerCommand, Socket}; +use super::worker::{Conn, SocketInfo}; + +pub(crate) enum Command { + Pause, + Resume, + Stop, + Worker(usize, mpsc::UnboundedSender>), +} + +pub(crate) fn start_accept_thread( + token: usize, sock: Socket, srv: mpsc::UnboundedSender, + socks: Slab, + mut workers: Vec<(usize, mpsc::UnboundedSender>)>, +) -> (mio::SetReadiness, sync_mpsc::Sender) { + let (tx, rx) = sync_mpsc::channel(); + let (reg, readiness) = mio::Registration::new2(); + + // start accept thread + #[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))] + let _ = thread::Builder::new() + .name(format!("Accept on {}", sock.addr)) + .spawn(move || { + const SRV: mio::Token = mio::Token(0); + const CMD: mio::Token = mio::Token(1); + + let addr = sock.addr; + let mut server = Some( + mio::net::TcpListener::from_std(sock.lst) + .expect("Can not create mio::net::TcpListener"), + ); + + // Create a poll instance + let poll = match mio::Poll::new() { + Ok(poll) => poll, + Err(err) => panic!("Can not create mio::Poll: {}", err), + }; + + // Start listening for incoming connections + if let Some(ref srv) = server { + if let Err(err) = + poll.register(srv, SRV, mio::Ready::readable(), mio::PollOpt::edge()) + { + panic!("Can not register io: {}", err); + } + } + + // Start listening for incoming commands + if let Err(err) = + poll.register(®, CMD, mio::Ready::readable(), mio::PollOpt::edge()) + { + panic!("Can not register Registration: {}", err); + } + + // Create storage for events + let mut events = mio::Events::with_capacity(128); + + // Sleep on error + let sleep = Duration::from_millis(100); + + let mut next = 0; + loop { + if let Err(err) = poll.poll(&mut events, None) { + panic!("Poll error: {}", err); + } + + for event in events.iter() { + match event.token() { + SRV => if let Some(ref server) = server { + loop { + match server.accept_std() { + Ok((io, addr)) => { + let mut msg = Conn { + io, + token, + peer: Some(addr), + http2: false, + }; + while !workers.is_empty() { + match workers[next].1.unbounded_send(msg) { + Ok(_) => (), + Err(err) => { + let _ = srv.unbounded_send( + ServerCommand::WorkerDied( + workers[next].0, + socks.clone(), + ), + ); + msg = err.into_inner(); + workers.swap_remove(next); + if workers.is_empty() { + error!("No workers"); + thread::sleep(sleep); + break; + } else if workers.len() <= next { + next = 0; + } + continue; + } + } + next = (next + 1) % workers.len(); + break; + } + } + Err(ref e) + if e.kind() == io::ErrorKind::WouldBlock => + { + break + } + Err(ref e) if connection_error(e) => continue, + Err(e) => { + error!("Error accepting connection: {}", e); + // sleep after error + thread::sleep(sleep); + break; + } + } + } + }, + CMD => match rx.try_recv() { + Ok(cmd) => match cmd { + Command::Pause => if let Some(ref server) = server { + if let Err(err) = poll.deregister(server) { + error!( + "Can not deregister server socket {}", + err + ); + } else { + info!( + "Paused accepting connections on {}", + addr + ); + } + }, + Command::Resume => { + if let Some(ref server) = server { + if let Err(err) = poll.register( + server, + SRV, + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + error!("Can not resume socket accept process: {}", err); + } else { + info!("Accepting connections on {} has been resumed", + addr); + } + } + } + Command::Stop => { + if let Some(server) = server.take() { + let _ = poll.deregister(&server); + } + return; + } + Command::Worker(idx, addr) => { + workers.push((idx, addr)); + } + }, + Err(err) => match err { + sync_mpsc::TryRecvError::Empty => (), + sync_mpsc::TryRecvError::Disconnected => { + if let Some(server) = server.take() { + let _ = poll.deregister(&server); + } + return; + } + }, + }, + _ => unreachable!(), + } + } + } + }); + + (readiness, tx) +} + +/// This function defines errors that are per-connection. Which basically +/// means that if we get this error from `accept()` system call it means +/// next connection might be ready to be accepted. +/// +/// All other errors will incur a timeout before next `accept()` is performed. +/// The timeout is useful to handle resource exhaustion errors like ENFILE +/// and EMFILE. Otherwise, could enter into tight loop. +fn connection_error(e: &io::Error) -> bool { + e.kind() == io::ErrorKind::ConnectionRefused + || e.kind() == io::ErrorKind::ConnectionAborted + || e.kind() == io::ErrorKind::ConnectionReset +} diff --git a/src/server/mod.rs b/src/server/mod.rs index dc8ecd810..a4f5e87d7 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -7,6 +7,7 @@ use futures::{Async, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_tcp::TcpStream; +pub(crate) mod accept; mod channel; mod error; pub(crate) mod h1; diff --git a/src/server/srv.rs b/src/server/srv.rs index d6f5cf4d9..a054d5a70 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -1,7 +1,7 @@ use std::rc::Rc; use std::sync::{mpsc as sync_mpsc, Arc}; use std::time::Duration; -use std::{io, net, thread}; +use std::{io, net}; use actix::{ fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler, @@ -25,6 +25,7 @@ use openssl::ssl::{AlpnError, SslAcceptorBuilder}; #[cfg(feature = "rust-tls")] use rustls::ServerConfig; +use super::accept::{start_accept_thread, Command}; use super::channel::{HttpChannel, WrapperStream}; use super::settings::{ServerSettings, WorkerSettings}; use super::worker::{Conn, SocketInfo, StopWorker, StreamHandlerType, Worker}; @@ -75,7 +76,7 @@ where no_signals: bool, } -enum ServerCommand { +pub(crate) enum ServerCommand { WorkerDied(usize, Slab), } @@ -86,10 +87,10 @@ where type Context = Context; } -struct Socket { - lst: net::TcpListener, - addr: net::SocketAddr, - tp: StreamHandlerType, +pub(crate) struct Socket { + pub lst: net::TcpListener, + pub addr: net::SocketAddr, + pub tp: StreamHandlerType, } impl HttpServer @@ -132,7 +133,10 @@ where } #[doc(hidden)] - #[deprecated(since = "0.6.0", note = "please use `HttpServer::workers()` instead")] + #[deprecated( + since = "0.6.0", + note = "please use `HttpServer::workers()` instead" + )] pub fn threads(self, num: usize) -> Self { self.workers(num) } @@ -538,7 +542,8 @@ impl HttpServer { #[doc(hidden)] #[cfg(feature = "tls")] #[deprecated( - since = "0.6.0", note = "please use `actix_web::HttpServer::bind_tls` instead" + since = "0.6.0", + note = "please use `actix_web::HttpServer::bind_tls` instead" )] impl HttpServer { /// Start listening for incoming tls connections. @@ -557,7 +562,8 @@ impl HttpServer { #[doc(hidden)] #[cfg(feature = "alpn")] #[deprecated( - since = "0.6.0", note = "please use `actix_web::HttpServer::bind_ssl` instead" + since = "0.6.0", + note = "please use `actix_web::HttpServer::bind_ssl` instead" )] impl HttpServer { /// Start listening for incoming tls connections. @@ -810,181 +816,6 @@ impl Handler for HttpServer { } } -enum Command { - Pause, - Resume, - Stop, - Worker(usize, mpsc::UnboundedSender>), -} - -fn start_accept_thread( - token: usize, sock: Socket, srv: mpsc::UnboundedSender, - socks: Slab, - mut workers: Vec<(usize, mpsc::UnboundedSender>)>, -) -> (mio::SetReadiness, sync_mpsc::Sender) { - let (tx, rx) = sync_mpsc::channel(); - let (reg, readiness) = mio::Registration::new2(); - - // start accept thread - #[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))] - let _ = thread::Builder::new() - .name(format!("Accept on {}", sock.addr)) - .spawn(move || { - const SRV: mio::Token = mio::Token(0); - const CMD: mio::Token = mio::Token(1); - - let addr = sock.addr; - let mut server = Some( - mio::net::TcpListener::from_std(sock.lst) - .expect("Can not create mio::net::TcpListener"), - ); - - // Create a poll instance - let poll = match mio::Poll::new() { - Ok(poll) => poll, - Err(err) => panic!("Can not create mio::Poll: {}", err), - }; - - // Start listening for incoming connections - if let Some(ref srv) = server { - if let Err(err) = - poll.register(srv, SRV, mio::Ready::readable(), mio::PollOpt::edge()) - { - panic!("Can not register io: {}", err); - } - } - - // Start listening for incoming commands - if let Err(err) = - poll.register(®, CMD, mio::Ready::readable(), mio::PollOpt::edge()) - { - panic!("Can not register Registration: {}", err); - } - - // Create storage for events - let mut events = mio::Events::with_capacity(128); - - // Sleep on error - let sleep = Duration::from_millis(100); - - let mut next = 0; - loop { - if let Err(err) = poll.poll(&mut events, None) { - panic!("Poll error: {}", err); - } - - for event in events.iter() { - match event.token() { - SRV => if let Some(ref server) = server { - loop { - match server.accept_std() { - Ok((io, addr)) => { - let mut msg = Conn { - io, - token, - peer: Some(addr), - http2: false, - }; - while !workers.is_empty() { - match workers[next].1.unbounded_send(msg) { - Ok(_) => (), - Err(err) => { - let _ = srv.unbounded_send( - ServerCommand::WorkerDied( - workers[next].0, - socks.clone(), - ), - ); - msg = err.into_inner(); - workers.swap_remove(next); - if workers.is_empty() { - error!("No workers"); - thread::sleep(sleep); - break; - } else if workers.len() <= next { - next = 0; - } - continue; - } - } - next = (next + 1) % workers.len(); - break; - } - } - Err(ref e) - if e.kind() == io::ErrorKind::WouldBlock => - { - break - } - Err(ref e) if connection_error(e) => continue, - Err(e) => { - error!("Error accepting connection: {}", e); - // sleep after error - thread::sleep(sleep); - break; - } - } - } - }, - CMD => match rx.try_recv() { - Ok(cmd) => match cmd { - Command::Pause => if let Some(ref server) = server { - if let Err(err) = poll.deregister(server) { - error!( - "Can not deregister server socket {}", - err - ); - } else { - info!( - "Paused accepting connections on {}", - addr - ); - } - }, - Command::Resume => { - if let Some(ref server) = server { - if let Err(err) = poll.register( - server, - SRV, - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - error!("Can not resume socket accept process: {}", err); - } else { - info!("Accepting connections on {} has been resumed", - addr); - } - } - } - Command::Stop => { - if let Some(server) = server.take() { - let _ = poll.deregister(&server); - } - return; - } - Command::Worker(idx, addr) => { - workers.push((idx, addr)); - } - }, - Err(err) => match err { - sync_mpsc::TryRecvError::Empty => (), - sync_mpsc::TryRecvError::Disconnected => { - if let Some(server) = server.take() { - let _ = poll.deregister(&server); - } - return; - } - }, - }, - _ => unreachable!(), - } - } - } - }); - - (readiness, tx) -} - fn create_tcp_listener( addr: net::SocketAddr, backlog: i32, ) -> io::Result { @@ -996,16 +827,3 @@ fn create_tcp_listener( builder.bind(addr)?; Ok(builder.listen(backlog)?) } - -/// This function defines errors that are per-connection. Which basically -/// means that if we get this error from `accept()` system call it means -/// next connection might be ready to be accepted. -/// -/// All other errors will incur a timeout before next `accept()` is performed. -/// The timeout is useful to handle resource exhaustion errors like ENFILE -/// and EMFILE. Otherwise, could enter into tight loop. -fn connection_error(e: &io::Error) -> bool { - e.kind() == io::ErrorKind::ConnectionRefused - || e.kind() == io::ErrorKind::ConnectionAborted - || e.kind() == io::ErrorKind::ConnectionReset -} From 2072c933ba6448966c50ad50887af51f95ee39c2 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 30 Jul 2018 15:04:52 -0700 Subject: [PATCH 015/219] handle error during request creation --- src/client/request.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/request.rs b/src/client/request.rs index 650f0eeaa..72aab259d 100644 --- a/src/client/request.rs +++ b/src/client/request.rs @@ -316,8 +316,7 @@ impl ClientRequestBuilder { /// Set HTTP method of this request. #[inline] pub fn get_method(&mut self) -> &Method { - let parts = - parts(&mut self.request, &self.err).expect("cannot reuse request builder"); + let parts = self.request.as_ref().expect("cannot reuse request builder"); &parts.method } From 4dba531bf91fc68aa4a3625e4cc205f2ec266f8a Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 31 Jul 2018 08:51:24 -0700 Subject: [PATCH 016/219] do not override HOST header for client request #428 --- CHANGES.md | 4 ++++ src/client/request.rs | 21 ++++++++++++++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index c5c0499dd..8ba3ef566 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,10 @@ * Support HTTP/2 with rustls #36 +### Fixed + +* Do not override HOST header for client request #428 + ## [0.7.2] - 2018-07-26 diff --git a/src/client/request.rs b/src/client/request.rs index 72aab259d..4d506c3fa 100644 --- a/src/client/request.rs +++ b/src/client/request.rs @@ -291,10 +291,6 @@ impl ClientRequestBuilder { fn _uri(&mut self, url: &str) -> &mut Self { match Uri::try_from(url) { Ok(uri) => { - // set request host header - if let Some(host) = uri.host() { - self.set_header(header::HOST, host); - } if let Some(parts) = parts(&mut self.request, &self.err) { parts.uri = uri; } @@ -629,9 +625,24 @@ impl ClientRequestBuilder { self.set_header_if_none(header::ACCEPT_ENCODING, "gzip, deflate"); } + // set request host header + if let Some(parts) = parts(&mut self.request, &self.err) { + if let Some(host) = parts.uri.host() { + if !parts.headers.contains_key(header::HOST) { + match host.try_into() { + Ok(value) => { + parts.headers.insert(header::HOST, value); + } + Err(e) => self.err = Some(e.into()), + } + } + } + } + + // user agent self.set_header_if_none( header::USER_AGENT, - concat!("Actix-web/", env!("CARGO_PKG_VERSION")), + concat!("actix-web/", env!("CARGO_PKG_VERSION")), ); } From 3bd43090fb7ec452251840c1bd813d6745bf6916 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 31 Jul 2018 09:06:05 -0700 Subject: [PATCH 017/219] use new gzdecoder, fixes gz streaming #228 --- CHANGES.md | 2 + Cargo.toml | 2 +- src/server/input.rs | 125 ++++++++++--------------------------------- tests/test_client.rs | 2 +- 4 files changed, 32 insertions(+), 99 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8ba3ef566..95144ce19 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,8 @@ * Do not override HOST header for client request #428 +* Gz streaming, use `flate2::write::GzDecoder` #228 + ## [0.7.2] - 2018-07-26 diff --git a/Cargo.toml b/Cargo.toml index 139c647af..695b2e31f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,7 @@ parking_lot = "0.6" url = { version="1.7", features=["query_encoding"] } cookie = { version="0.11", features=["percent-encode"] } brotli2 = { version="^0.3.2", optional = true } -flate2 = { version="1.0", optional = true, default-features = false } +flate2 = { version="^1.0.2", optional = true, default-features = false } failure = "=0.1.1" diff --git a/src/server/input.rs b/src/server/input.rs index 8c11c2463..fe62e760a 100644 --- a/src/server/input.rs +++ b/src/server/input.rs @@ -1,14 +1,11 @@ -use std::io::{Read, Write}; -use std::{cmp, io}; +use std::io::{self, Write}; #[cfg(feature = "brotli")] use brotli2::write::BrotliDecoder; -use bytes::{BufMut, Bytes, BytesMut}; +use bytes::{Bytes, BytesMut}; use error::PayloadError; #[cfg(feature = "flate2")] -use flate2::read::GzDecoder; -#[cfg(feature = "flate2")] -use flate2::write::DeflateDecoder; +use flate2::write::{DeflateDecoder, GzDecoder}; use header::ContentEncoding; use http::header::{HeaderMap, CONTENT_ENCODING}; use payload::{PayloadSender, PayloadStatus, PayloadWriter}; @@ -144,46 +141,12 @@ pub(crate) enum Decoder { #[cfg(feature = "flate2")] Deflate(Box>), #[cfg(feature = "flate2")] - Gzip(Option>>), + Gzip(Box>), #[cfg(feature = "brotli")] Br(Box>), Identity, } -// should go after write::GzDecoder get implemented -#[derive(Debug)] -pub(crate) struct Wrapper { - pub buf: BytesMut, - pub eof: bool, -} - -impl io::Read for Wrapper { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let len = cmp::min(buf.len(), self.buf.len()); - buf[..len].copy_from_slice(&self.buf[..len]); - self.buf.split_to(len); - if len == 0 { - if self.eof { - Ok(0) - } else { - Err(io::Error::new(io::ErrorKind::WouldBlock, "")) - } - } else { - Ok(len) - } - } -} - -impl io::Write for Wrapper { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.buf.extend_from_slice(buf); - Ok(buf.len()) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - pub(crate) struct Writer { buf: BytesMut, } @@ -212,12 +175,11 @@ impl io::Write for Writer { /// Payload stream with decompression support pub(crate) struct PayloadStream { decoder: Decoder, - dst: BytesMut, } impl PayloadStream { pub fn new(enc: ContentEncoding) -> PayloadStream { - let dec = match enc { + let decoder = match enc { #[cfg(feature = "brotli")] ContentEncoding::Br => { Decoder::Br(Box::new(BrotliDecoder::new(Writer::new()))) @@ -227,13 +189,12 @@ impl PayloadStream { Decoder::Deflate(Box::new(DeflateDecoder::new(Writer::new()))) } #[cfg(feature = "flate2")] - ContentEncoding::Gzip => Decoder::Gzip(None), + ContentEncoding::Gzip => { + Decoder::Gzip(Box::new(GzDecoder::new(Writer::new()))) + } _ => Decoder::Identity, }; - PayloadStream { - decoder: dec, - dst: BytesMut::new(), - } + PayloadStream { decoder } } } @@ -253,22 +214,17 @@ impl PayloadStream { Err(e) => Err(e), }, #[cfg(feature = "flate2")] - Decoder::Gzip(ref mut decoder) => { - if let Some(ref mut decoder) = *decoder { - decoder.as_mut().get_mut().eof = true; - - self.dst.reserve(8192); - match decoder.read(unsafe { self.dst.bytes_mut() }) { - Ok(n) => { - unsafe { self.dst.advance_mut(n) }; - return Ok(Some(self.dst.take().freeze())); - } - Err(e) => return Err(e), + Decoder::Gzip(ref mut decoder) => match decoder.try_finish() { + Ok(_) => { + let b = decoder.get_mut().take(); + if !b.is_empty() { + Ok(Some(b)) + } else { + Ok(None) } - } else { - Ok(None) } - } + Err(e) => Err(e), + }, #[cfg(feature = "flate2")] Decoder::Deflate(ref mut decoder) => match decoder.try_finish() { Ok(_) => { @@ -301,43 +257,18 @@ impl PayloadStream { Err(e) => Err(e), }, #[cfg(feature = "flate2")] - Decoder::Gzip(ref mut decoder) => { - if decoder.is_none() { - *decoder = Some(Box::new(GzDecoder::new(Wrapper { - buf: BytesMut::from(data), - eof: false, - }))); - } else { - let _ = decoder.as_mut().unwrap().write(&data); - } - - loop { - self.dst.reserve(8192); - match decoder - .as_mut() - .as_mut() - .unwrap() - .read(unsafe { self.dst.bytes_mut() }) - { - Ok(n) => { - if n != 0 { - unsafe { self.dst.advance_mut(n) }; - } - if n == 0 { - return Ok(Some(self.dst.take().freeze())); - } - } - Err(e) => { - if e.kind() == io::ErrorKind::WouldBlock - && !self.dst.is_empty() - { - return Ok(Some(self.dst.take().freeze())); - } - return Err(e); - } + Decoder::Gzip(ref mut decoder) => match decoder.write_all(&data) { + Ok(_) => { + decoder.flush()?; + let b = decoder.get_mut().take(); + if !b.is_empty() { + Ok(Some(b)) + } else { + Ok(None) } } - } + Err(e) => Err(e), + }, #[cfg(feature = "flate2")] Decoder::Deflate(ref mut decoder) => match decoder.write_all(&data) { Ok(_) => { diff --git a/tests/test_client.rs b/tests/test_client.rs index cf20fb8b8..5e6856998 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -438,7 +438,7 @@ fn test_default_headers() { let repr = format!("{:?}", request); assert!(repr.contains("\"accept-encoding\": \"gzip, deflate\"")); assert!(repr.contains(concat!( - "\"user-agent\": \"Actix-web/", + "\"user-agent\": \"actix-web/", env!("CARGO_PKG_VERSION"), "\"" ))); From 2071ea053293e1f1bfde4e43bfab9137ac62ba48 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 31 Jul 2018 15:40:52 -0700 Subject: [PATCH 018/219] HttpRequest::url_for is not working with scopes #429 --- CHANGES.md | 2 + src/application.rs | 3 +- src/extractor.rs | 8 +- src/httprequest.rs | 12 +- src/router.rs | 297 ++++++++++++++++++++++++++++++++++----------- src/scope.rs | 23 ++-- src/test.rs | 8 +- 7 files changed, 257 insertions(+), 96 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 95144ce19..237b4bfbc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,6 +12,8 @@ * Gz streaming, use `flate2::write::GzDecoder` #228 +* HttpRequest::url_for is not working with scopes #429 + ## [0.7.2] - 2018-07-26 diff --git a/src/application.rs b/src/application.rs index a5cd3386f..6885185f2 100644 --- a/src/application.rs +++ b/src/application.rs @@ -140,7 +140,7 @@ where parts: Some(ApplicationParts { state, prefix: "".to_owned(), - router: Router::new(), + router: Router::new(ResourceDef::prefix("")), middlewares: Vec::new(), filters: Vec::new(), encoding: ContentEncoding::Auto, @@ -198,6 +198,7 @@ where if !prefix.starts_with('/') { prefix.insert(0, '/') } + parts.router.set_prefix(&prefix); parts.prefix = prefix; } self diff --git a/src/extractor.rs b/src/extractor.rs index aa4fdea7a..5c2c7f600 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -934,7 +934,7 @@ mod tests { fn test_request_extract() { let req = TestRequest::with_uri("/name/user1/?id=test").finish(); - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/"))); let info = router.recognize(&req, &(), 0); let req = req.with_route_info(info); @@ -950,7 +950,7 @@ mod tests { let s = Query::::from_request(&req, &()).unwrap(); assert_eq!(s.id, "test"); - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/"))); let req = TestRequest::with_uri("/name/32/").finish(); let info = router.recognize(&req, &(), 0); @@ -971,7 +971,7 @@ mod tests { #[test] fn test_extract_path_single() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/{value}/"))); let req = TestRequest::with_uri("/32/").finish(); @@ -982,7 +982,7 @@ mod tests { #[test] fn test_tuple_extract() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/"))); let req = TestRequest::with_uri("/name/user1/?id=test").finish(); diff --git a/src/httprequest.rs b/src/httprequest.rs index 83017dfa0..6f3bfe13e 100644 --- a/src/httprequest.rs +++ b/src/httprequest.rs @@ -420,7 +420,7 @@ mod tests { #[test] fn test_request_match_info() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/{key}/"))); let req = TestRequest::with_uri("/value/?id=test").finish(); @@ -430,7 +430,7 @@ mod tests { #[test] fn test_url_for() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); let mut resource = Resource::new(ResourceDef::new("/user/{name}.{ext}")); resource.name("index"); router.register_resource(resource); @@ -464,7 +464,8 @@ mod tests { fn test_url_for_with_prefix() { let mut resource = Resource::new(ResourceDef::new("/user/{name}.html")); resource.name("index"); - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); + router.set_prefix("/prefix"); router.register_resource(resource); let mut info = router.default_route_info(); @@ -490,7 +491,8 @@ mod tests { fn test_url_for_static() { let mut resource = Resource::new(ResourceDef::new("/index.html")); resource.name("index"); - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); + router.set_prefix("/prefix"); router.register_resource(resource); let mut info = router.default_route_info(); @@ -513,7 +515,7 @@ mod tests { #[test] fn test_url_for_external() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_external( "youtube", ResourceDef::external("https://youtube.com/watch/{video_id}"), diff --git a/src/router.rs b/src/router.rs index f3f657b58..3d112bf60 100644 --- a/src/router.rs +++ b/src/router.rs @@ -1,3 +1,4 @@ +use std::cell::RefCell; use std::cmp::min; use std::collections::HashMap; use std::hash::{Hash, Hasher}; @@ -111,9 +112,14 @@ impl ResourceInfo { U: IntoIterator, I: AsRef, { - if let Some(pattern) = self.rmap.named.get(name) { - let path = - pattern.resource_path(elements, &req.path()[..(self.prefix as usize)])?; + let mut path = String::new(); + let mut elements = elements.into_iter(); + + if self + .rmap + .patterns_for(name, &mut path, &mut elements)? + .is_some() + { if path.starts_with('/') { let conn = req.connection_info(); Ok(Url::parse(&format!( @@ -160,12 +166,15 @@ impl ResourceInfo { } pub(crate) struct ResourceMap { + root: ResourceDef, + parent: RefCell>>, named: HashMap, patterns: Vec<(ResourceDef, Option>)>, + nested: Vec>, } impl ResourceMap { - pub fn has_resource(&self, path: &str) -> bool { + fn has_resource(&self, path: &str) -> bool { let path = if path.is_empty() { "/" } else { path }; for (pattern, rmap) in &self.patterns { @@ -179,20 +188,91 @@ impl ResourceMap { } false } + + fn patterns_for( + &self, name: &str, path: &mut String, elements: &mut U, + ) -> Result, UrlGenerationError> + where + U: Iterator, + I: AsRef, + { + if self.pattern_for(name, path, elements)?.is_some() { + Ok(Some(())) + } else { + self.parent_pattern_for(name, path, elements) + } + } + + fn pattern_for( + &self, name: &str, path: &mut String, elements: &mut U, + ) -> Result, UrlGenerationError> + where + U: Iterator, + I: AsRef, + { + if let Some(pattern) = self.named.get(name) { + self.fill_root(path, elements)?; + pattern.resource_path(path, elements)?; + Ok(Some(())) + } else { + for rmap in &self.nested { + if rmap.pattern_for(name, path, elements)?.is_some() { + return Ok(Some(())); + } + } + Ok(None) + } + } + + fn fill_root( + &self, path: &mut String, elements: &mut U, + ) -> Result<(), UrlGenerationError> + where + U: Iterator, + I: AsRef, + { + if let Some(ref parent) = *self.parent.borrow() { + parent.fill_root(path, elements)?; + } + self.root.resource_path(path, elements) + } + + fn parent_pattern_for( + &self, name: &str, path: &mut String, elements: &mut U, + ) -> Result, UrlGenerationError> + where + U: Iterator, + I: AsRef, + { + if let Some(ref parent) = *self.parent.borrow() { + if let Some(pattern) = parent.named.get(name) { + self.fill_root(path, elements)?; + pattern.resource_path(path, elements)?; + Ok(Some(())) + } else { + parent.parent_pattern_for(name, path, elements) + } + } else { + Ok(None) + } + } } impl Default for Router { fn default() -> Self { - Router::new() + Router::new(ResourceDef::new("")) } } impl Router { - pub(crate) fn new() -> Self { + pub(crate) fn new(root: ResourceDef) -> Self { Router { rmap: Rc::new(ResourceMap { + root, + parent: RefCell::new(None), named: HashMap::new(), patterns: Vec::new(), + nested: Vec::new(), }), resources: Vec::new(), patterns: Vec::new(), @@ -233,6 +313,10 @@ impl Router { } } + pub(crate) fn set_prefix(&mut self, path: &str) { + Rc::get_mut(&mut self.rmap).unwrap().root = ResourceDef::new(path); + } + pub(crate) fn register_resource(&mut self, resource: Resource) { { let rmap = Rc::get_mut(&mut self.rmap).unwrap(); @@ -258,6 +342,11 @@ impl Router { .unwrap() .patterns .push((scope.rdef().clone(), Some(scope.router().rmap.clone()))); + Rc::get_mut(&mut self.rmap) + .unwrap() + .nested + .push(scope.router().rmap.clone()); + let filters = scope.take_filters(); self.patterns .push(ResourcePattern::Scope(scope.rdef().clone(), filters)); @@ -286,22 +375,25 @@ impl Router { } pub(crate) fn finish(&mut self) { - if let Some(ref default) = self.default { - for resource in &mut self.resources { - match resource { - ResourceItem::Resource(_) => (), - ResourceItem::Scope(scope) => { - if !scope.has_default_resource() { + for resource in &mut self.resources { + match resource { + ResourceItem::Resource(_) => (), + ResourceItem::Scope(scope) => { + if !scope.has_default_resource() { + if let Some(ref default) = self.default { scope.default_resource(default.clone()); } - scope.finish() } - ResourceItem::Handler(hnd) => { - if !hnd.has_default_resource() { + *scope.router().rmap.parent.borrow_mut() = Some(self.rmap.clone()); + scope.finish(); + } + ResourceItem::Handler(hnd) => { + if !hnd.has_default_resource() { + if let Some(ref default) = self.default { hnd.default_resource(default.clone()); } - hnd.finish() } + hnd.finish() } } } @@ -459,35 +551,38 @@ pub struct ResourceDef { } impl ResourceDef { - /// Parse path pattern and create new `Resource` instance. + /// Parse path pattern and create new `ResourceDef` instance. /// /// Panics if path pattern is wrong. pub fn new(path: &str) -> Self { - ResourceDef::with_prefix(path, if path.is_empty() { "" } else { "/" }, false) + ResourceDef::with_prefix(path, false, !path.is_empty()) } - /// Parse path pattern and create new `Resource` instance. + /// Parse path pattern and create new `ResourceDef` instance. /// /// Use `prefix` type instead of `static`. /// /// Panics if path regex pattern is wrong. pub fn prefix(path: &str) -> Self { - ResourceDef::with_prefix(path, "/", true) + ResourceDef::with_prefix(path, true, !path.is_empty()) } - /// Construct external resource + /// Construct external resource def /// /// Panics if path pattern is wrong. pub fn external(path: &str) -> Self { - let mut resource = ResourceDef::with_prefix(path, "/", false); + let mut resource = ResourceDef::with_prefix(path, false, false); resource.rtp = ResourceType::External; resource } - /// Parse path pattern and create new `Resource` instance with custom prefix - pub fn with_prefix(path: &str, prefix: &str, for_prefix: bool) -> Self { - let (pattern, elements, is_dynamic, len) = - ResourceDef::parse(path, prefix, for_prefix); + /// Parse path pattern and create new `ResourceDef` instance with custom prefix + pub fn with_prefix(path: &str, for_prefix: bool, slash: bool) -> Self { + let mut path = path.to_owned(); + if slash && !path.starts_with('/') { + path.insert(0, '/'); + } + let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix); let tp = if is_dynamic { let re = match Regex::new(&pattern) { @@ -705,23 +800,21 @@ impl ResourceDef { /// Build resource path. pub fn resource_path( - &self, elements: U, prefix: &str, - ) -> Result + &self, path: &mut String, elements: &mut U, + ) -> Result<(), UrlGenerationError> where - U: IntoIterator, + U: Iterator, I: AsRef, { - let mut path = match self.tp { - PatternType::Prefix(ref p) => p.to_owned(), - PatternType::Static(ref p) => p.to_owned(), + match self.tp { + PatternType::Prefix(ref p) => path.push_str(p), + PatternType::Static(ref p) => path.push_str(p), PatternType::Dynamic(..) => { - let mut path = String::new(); - let mut iter = elements.into_iter(); for el in &self.elements { match *el { PatternElement::Str(ref s) => path.push_str(s), PatternElement::Var(_) => { - if let Some(val) = iter.next() { + if let Some(val) = elements.next() { path.push_str(val.as_ref()) } else { return Err(UrlGenerationError::NotEnoughElements); @@ -729,34 +822,18 @@ impl ResourceDef { } } } - path } }; - - if self.rtp != ResourceType::External { - if prefix.ends_with('/') { - if path.starts_with('/') { - path.insert_str(0, &prefix[..prefix.len() - 1]); - } else { - path.insert_str(0, prefix); - } - } else { - if !path.starts_with('/') { - path.insert(0, '/'); - } - path.insert_str(0, prefix); - } - } - Ok(path) + Ok(()) } fn parse( - pattern: &str, prefix: &str, for_prefix: bool, + pattern: &str, for_prefix: bool, ) -> (String, Vec, bool, usize) { const DEFAULT_PATTERN: &str = "[^/]+"; - let mut re1 = String::from("^") + prefix; - let mut re2 = String::from(prefix); + let mut re1 = String::from("^"); + let mut re2 = String::new(); let mut el = String::new(); let mut in_param = false; let mut in_param_pattern = false; @@ -766,12 +843,7 @@ impl ResourceDef { let mut elems = Vec::new(); let mut len = 0; - for (index, ch) in pattern.chars().enumerate() { - // All routes must have a leading slash so its optional to have one - if index == 0 && ch == '/' { - continue; - } - + for ch in pattern.chars() { if in_param { // In parameter segment: `{....}` if ch == '}' { @@ -846,7 +918,7 @@ mod tests { #[test] fn test_recognizer10() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/name"))); router.register_resource(Resource::new(ResourceDef::new("/name/{val}"))); router.register_resource(Resource::new(ResourceDef::new( @@ -858,7 +930,7 @@ mod tests { ))); router.register_resource(Resource::new(ResourceDef::new("/v/{tail:.*}"))); router.register_resource(Resource::new(ResourceDef::new("/test2/{test}.html"))); - router.register_resource(Resource::new(ResourceDef::new("{test}/index.html"))); + router.register_resource(Resource::new(ResourceDef::new("/{test}/index.html"))); let req = TestRequest::with_uri("/name").finish(); let info = router.recognize(&req, &(), 0); @@ -909,7 +981,7 @@ mod tests { #[test] fn test_recognizer_2() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/index.json"))); router.register_resource(Resource::new(ResourceDef::new("/{source}.json"))); @@ -924,7 +996,7 @@ mod tests { #[test] fn test_recognizer_with_prefix() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/name"))); router.register_resource(Resource::new(ResourceDef::new("/name/{val}"))); @@ -943,7 +1015,7 @@ mod tests { assert_eq!(&info.match_info()["val"], "value"); // same patterns - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); router.register_resource(Resource::new(ResourceDef::new("/name"))); router.register_resource(Resource::new(ResourceDef::new("/name/{val}"))); @@ -1049,7 +1121,7 @@ mod tests { #[test] fn test_request_resource() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); let mut resource = Resource::new(ResourceDef::new("/index.json")); resource.name("r1"); router.register_resource(resource); @@ -1071,7 +1143,7 @@ mod tests { #[test] fn test_has_resource() { - let mut router = Router::<()>::new(); + let mut router = Router::<()>::default(); let scope = Scope::new("/test").resource("/name", |_| "done"); router.register_scope(scope); @@ -1088,4 +1160,93 @@ mod tests { let info = router.default_route_info(); assert!(info.has_resource("/test2/test10/name")); } + + #[test] + fn test_url_for() { + let mut router = Router::<()>::new(ResourceDef::prefix("")); + + let mut resource = Resource::new(ResourceDef::new("/tttt")); + resource.name("r0"); + router.register_resource(resource); + + let scope = Scope::new("/test").resource("/name", |r| { + r.name("r1"); + }); + router.register_scope(scope); + + let scope = Scope::new("/test2") + .nested("/test10", |s| s.resource("/name", |r| r.name("r2"))); + router.register_scope(scope); + router.finish(); + + let req = TestRequest::with_uri("/test").request(); + { + let info = router.default_route_info(); + + let res = info + .url_for(&req, "r0", Vec::<&'static str>::new()) + .unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/tttt"); + + let res = info + .url_for(&req, "r1", Vec::<&'static str>::new()) + .unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/test/name"); + + let res = info + .url_for(&req, "r2", Vec::<&'static str>::new()) + .unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/test2/test10/name"); + } + + let req = TestRequest::with_uri("/test/name").request(); + let info = router.recognize(&req, &(), 0); + assert_eq!(info.resource, ResourceId::Normal(1)); + + let res = info + .url_for(&req, "r0", Vec::<&'static str>::new()) + .unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/tttt"); + + let res = info + .url_for(&req, "r1", Vec::<&'static str>::new()) + .unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/test/name"); + + let res = info + .url_for(&req, "r2", Vec::<&'static str>::new()) + .unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/test2/test10/name"); + } + + #[test] + fn test_url_for_dynamic() { + let mut router = Router::<()>::new(ResourceDef::prefix("")); + + let mut resource = Resource::new(ResourceDef::new("/{name}/test/index.{ext}")); + resource.name("r0"); + router.register_resource(resource); + + let scope = Scope::new("/{name1}").nested("/{name2}", |s| { + s.resource("/{name3}/test/index.{ext}", |r| r.name("r2")) + }); + router.register_scope(scope); + router.finish(); + + let req = TestRequest::with_uri("/test").request(); + { + let info = router.default_route_info(); + + let res = info.url_for(&req, "r0", vec!["sec1", "html"]).unwrap(); + assert_eq!(res.as_str(), "http://localhost:8080/sec1/test/index.html"); + + let res = info + .url_for(&req, "r2", vec!["sec1", "sec2", "sec3", "html"]) + .unwrap(); + assert_eq!( + res.as_str(), + "http://localhost:8080/sec1/sec2/sec3/test/index.html" + ); + } + } } diff --git a/src/scope.rs b/src/scope.rs index 43d078529..d8a0a81ad 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -58,11 +58,11 @@ pub struct Scope { #[cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))] impl Scope { /// Create a new scope - // TODO: Why is this not exactly the default impl? pub fn new(path: &str) -> Scope { + let rdef = ResourceDef::prefix(path); Scope { - rdef: ResourceDef::prefix(path), - router: Rc::new(Router::new()), + rdef: rdef.clone(), + router: Rc::new(Router::new(rdef)), filters: Vec::new(), middlewares: Rc::new(Vec::new()), } @@ -132,10 +132,11 @@ impl Scope { where F: FnOnce(Scope) -> Scope, { + let rdef = ResourceDef::prefix(path); let scope = Scope { - rdef: ResourceDef::prefix(path), + rdef: rdef.clone(), filters: Vec::new(), - router: Rc::new(Router::new()), + router: Rc::new(Router::new(rdef)), middlewares: Rc::new(Vec::new()), }; let mut scope = f(scope); @@ -178,10 +179,11 @@ impl Scope { where F: FnOnce(Scope) -> Scope, { + let rdef = ResourceDef::prefix(&path); let scope = Scope { - rdef: ResourceDef::prefix(&path), + rdef: rdef.clone(), filters: Vec::new(), - router: Rc::new(Router::new()), + router: Rc::new(Router::new(rdef)), middlewares: Rc::new(Vec::new()), }; Rc::get_mut(&mut self.router) @@ -258,12 +260,7 @@ impl Scope { F: FnOnce(&mut Resource) -> R + 'static, { // add resource - let pattern = ResourceDef::with_prefix( - path, - if path.is_empty() { "" } else { "/" }, - false, - ); - let mut resource = Resource::new(pattern); + let mut resource = Resource::new(ResourceDef::new(path)); f(&mut resource); Rc::get_mut(&mut self.router) diff --git a/src/test.rs b/src/test.rs index f466db2d5..f94732dd7 100644 --- a/src/test.rs +++ b/src/test.rs @@ -147,13 +147,11 @@ impl TestServer { #[cfg(feature = "rust-tls")] { use rustls::ClientConfig; - use std::io::BufReader; use std::fs::File; + use std::io::BufReader; let mut config = ClientConfig::new(); let pem_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap()); - config - .root_store - .add_pem_file(pem_file).unwrap(); + config.root_store.add_pem_file(pem_file).unwrap(); ClientConnector::with_connector(Arc::new(config)).start() } #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] @@ -574,7 +572,7 @@ impl TestRequest { payload, prefix, } = self; - let router = Router::<()>::new(); + let router = Router::<()>::default(); let pool = RequestPool::pool(ServerSettings::default()); let mut req = RequestPool::get(pool); From aa1e75f071e0c729b217e42a93b742ace0ba6b39 Mon Sep 17 00:00:00 2001 From: jrconlin Date: Tue, 31 Jul 2018 16:21:18 -0700 Subject: [PATCH 019/219] feature: allow TestServer to open a websocket on any URL * added `TestServer::ws_at(uri_str)` * modified `TestServer::ws()` to call `self.ws_at("/")` to preserve behavior Closes #432 --- src/test.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/test.rs b/src/test.rs index f94732dd7..2ec7a98d8 100644 --- a/src/test.rs +++ b/src/test.rs @@ -207,15 +207,23 @@ impl TestServer { self.rt.block_on(fut) } - /// Connect to websocket server - pub fn ws( + /// Connect to websocket server at a given path + pub fn ws_at( &mut self, + path: &str, ) -> Result<(ws::ClientReader, ws::ClientWriter), ws::ClientError> { - let url = self.url("/"); + let url = self.url(path); self.rt .block_on(ws::Client::with_connector(url, self.conn.clone()).connect()) } + /// Connect to a websocket server + pub fn ws( + &mut self, + ) -> Result<(ws::ClientReader, ws::ClientWriter), ws::ClientError> { + self.ws_at("/") + } + /// Create `GET` request pub fn get(&self) -> ClientRequestBuilder { ClientRequest::get(self.url("/").as_str()) From 58230b15b9cd67a98e65a074652bd384e24757f6 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 31 Jul 2018 19:51:26 -0700 Subject: [PATCH 020/219] use one thread for accept loop; refactor rust-tls support --- .travis.yml | 6 +- src/server/accept.rs | 439 +++++++++++++++++++++++++++---------------- src/server/mod.rs | 6 +- src/server/srv.rs | 57 +++--- src/test.rs | 65 ++++--- tests/test_server.rs | 56 ++++++ tests/test_ws.rs | 9 +- 7 files changed, 406 insertions(+), 232 deletions(-) diff --git a/.travis.yml b/.travis.yml index 54a86aa7a..f03c95238 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,12 +32,12 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then cargo clean - cargo test --features="alpn,tls" -- --nocapture + cargo test --features="alpn,tls,rust-tls" -- --nocapture fi - | if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="alpn,tls" --out Xml --no-count + cargo tarpaulin --features="alpn,tls,rust-tls" --out Xml --no-count bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi @@ -46,7 +46,7 @@ script: after_success: - | if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then - cargo doc --features "alpn, tls, session" --no-deps && + cargo doc --features "alpn, tls, rust-tls, session" --no-deps && echo "" > target/doc/index.html && git clone https://github.com/davisp/ghp-import.git && ./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc && diff --git a/src/server/accept.rs b/src/server/accept.rs index a91ca8141..752805600 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -1,22 +1,16 @@ use std::sync::mpsc as sync_mpsc; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{io, net, thread}; -use futures::sync::mpsc; +use futures::{sync::mpsc, Future}; use mio; use slab::Slab; +use tokio_timer::Delay; -#[cfg(feature = "tls")] -use native_tls::TlsAcceptor; - -#[cfg(feature = "alpn")] -use openssl::ssl::{AlpnError, SslAcceptorBuilder}; - -#[cfg(feature = "rust-tls")] -use rustls::ServerConfig; +use actix::{msgs::Execute, Arbiter, System}; use super::srv::{ServerCommand, Socket}; -use super::worker::{Conn, SocketInfo}; +use super::worker::Conn; pub(crate) enum Command { Pause, @@ -25,169 +19,43 @@ pub(crate) enum Command { Worker(usize, mpsc::UnboundedSender>), } +struct ServerSocketInfo { + addr: net::SocketAddr, + token: usize, + sock: mio::net::TcpListener, + timeout: Option, +} + +struct Accept { + poll: mio::Poll, + rx: sync_mpsc::Receiver, + sockets: Slab, + workers: Vec<(usize, mpsc::UnboundedSender>)>, + _reg: mio::Registration, + next: usize, + srv: mpsc::UnboundedSender, + timer: (mio::Registration, mio::SetReadiness), +} + +const CMD: mio::Token = mio::Token(0); +const TIMER: mio::Token = mio::Token(1); + pub(crate) fn start_accept_thread( - token: usize, sock: Socket, srv: mpsc::UnboundedSender, - socks: Slab, - mut workers: Vec<(usize, mpsc::UnboundedSender>)>, + socks: Vec<(usize, Socket)>, srv: mpsc::UnboundedSender, + workers: Vec<(usize, mpsc::UnboundedSender>)>, ) -> (mio::SetReadiness, sync_mpsc::Sender) { let (tx, rx) = sync_mpsc::channel(); let (reg, readiness) = mio::Registration::new2(); + let sys = System::current(); + // start accept thread #[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))] let _ = thread::Builder::new() - .name(format!("Accept on {}", sock.addr)) + .name("actix-web accept loop".to_owned()) .spawn(move || { - const SRV: mio::Token = mio::Token(0); - const CMD: mio::Token = mio::Token(1); - - let addr = sock.addr; - let mut server = Some( - mio::net::TcpListener::from_std(sock.lst) - .expect("Can not create mio::net::TcpListener"), - ); - - // Create a poll instance - let poll = match mio::Poll::new() { - Ok(poll) => poll, - Err(err) => panic!("Can not create mio::Poll: {}", err), - }; - - // Start listening for incoming connections - if let Some(ref srv) = server { - if let Err(err) = - poll.register(srv, SRV, mio::Ready::readable(), mio::PollOpt::edge()) - { - panic!("Can not register io: {}", err); - } - } - - // Start listening for incoming commands - if let Err(err) = - poll.register(®, CMD, mio::Ready::readable(), mio::PollOpt::edge()) - { - panic!("Can not register Registration: {}", err); - } - - // Create storage for events - let mut events = mio::Events::with_capacity(128); - - // Sleep on error - let sleep = Duration::from_millis(100); - - let mut next = 0; - loop { - if let Err(err) = poll.poll(&mut events, None) { - panic!("Poll error: {}", err); - } - - for event in events.iter() { - match event.token() { - SRV => if let Some(ref server) = server { - loop { - match server.accept_std() { - Ok((io, addr)) => { - let mut msg = Conn { - io, - token, - peer: Some(addr), - http2: false, - }; - while !workers.is_empty() { - match workers[next].1.unbounded_send(msg) { - Ok(_) => (), - Err(err) => { - let _ = srv.unbounded_send( - ServerCommand::WorkerDied( - workers[next].0, - socks.clone(), - ), - ); - msg = err.into_inner(); - workers.swap_remove(next); - if workers.is_empty() { - error!("No workers"); - thread::sleep(sleep); - break; - } else if workers.len() <= next { - next = 0; - } - continue; - } - } - next = (next + 1) % workers.len(); - break; - } - } - Err(ref e) - if e.kind() == io::ErrorKind::WouldBlock => - { - break - } - Err(ref e) if connection_error(e) => continue, - Err(e) => { - error!("Error accepting connection: {}", e); - // sleep after error - thread::sleep(sleep); - break; - } - } - } - }, - CMD => match rx.try_recv() { - Ok(cmd) => match cmd { - Command::Pause => if let Some(ref server) = server { - if let Err(err) = poll.deregister(server) { - error!( - "Can not deregister server socket {}", - err - ); - } else { - info!( - "Paused accepting connections on {}", - addr - ); - } - }, - Command::Resume => { - if let Some(ref server) = server { - if let Err(err) = poll.register( - server, - SRV, - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - error!("Can not resume socket accept process: {}", err); - } else { - info!("Accepting connections on {} has been resumed", - addr); - } - } - } - Command::Stop => { - if let Some(server) = server.take() { - let _ = poll.deregister(&server); - } - return; - } - Command::Worker(idx, addr) => { - workers.push((idx, addr)); - } - }, - Err(err) => match err { - sync_mpsc::TryRecvError::Empty => (), - sync_mpsc::TryRecvError::Disconnected => { - if let Some(server) = server.take() { - let _ = poll.deregister(&server); - } - return; - } - }, - }, - _ => unreachable!(), - } - } - } + System::set_current(sys); + Accept::new(reg, rx, socks, workers, srv).poll(); }); (readiness, tx) @@ -205,3 +73,244 @@ fn connection_error(e: &io::Error) -> bool { || e.kind() == io::ErrorKind::ConnectionAborted || e.kind() == io::ErrorKind::ConnectionReset } + +impl Accept { + fn new( + _reg: mio::Registration, rx: sync_mpsc::Receiver, + socks: Vec<(usize, Socket)>, + workers: Vec<(usize, mpsc::UnboundedSender>)>, + srv: mpsc::UnboundedSender, + ) -> Accept { + // Create a poll instance + let poll = match mio::Poll::new() { + Ok(poll) => poll, + Err(err) => panic!("Can not create mio::Poll: {}", err), + }; + + // Start listening for incoming commands + if let Err(err) = + poll.register(&_reg, CMD, mio::Ready::readable(), mio::PollOpt::edge()) + { + panic!("Can not register Registration: {}", err); + } + + // Start accept + let mut sockets = Slab::new(); + for (stoken, sock) in socks { + let server = mio::net::TcpListener::from_std(sock.lst) + .expect("Can not create mio::net::TcpListener"); + + let entry = sockets.vacant_entry(); + let token = entry.key(); + + // Start listening for incoming connections + if let Err(err) = poll.register( + &server, + mio::Token(token + 1000), + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + panic!("Can not register io: {}", err); + } + + entry.insert(ServerSocketInfo { + token: stoken, + addr: sock.addr, + sock: server, + timeout: None, + }); + } + + // Timer + let (tm, tmr) = mio::Registration::new2(); + if let Err(err) = + poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge()) + { + panic!("Can not register Registration: {}", err); + } + + Accept { + poll, + rx, + _reg, + sockets, + workers, + srv, + next: 0, + timer: (tm, tmr), + } + } + + fn poll(&mut self) { + // Create storage for events + let mut events = mio::Events::with_capacity(128); + + loop { + if let Err(err) = self.poll.poll(&mut events, None) { + panic!("Poll error: {}", err); + } + + for event in events.iter() { + let token = event.token(); + match token { + CMD => if !self.process_cmd() { + return; + }, + TIMER => self.process_timer(), + _ => self.accept(token), + } + } + } + } + + fn process_timer(&mut self) { + let now = Instant::now(); + for (token, info) in self.sockets.iter_mut() { + if let Some(inst) = info.timeout.take() { + if now > inst { + if let Err(err) = self.poll.register( + &info.sock, + mio::Token(token + 1000), + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + error!("Can not register server socket {}", err); + } else { + info!("Resume accepting connections on {}", info.addr); + } + } else { + info.timeout = Some(inst); + } + } + } + } + + fn process_cmd(&mut self) -> bool { + loop { + match self.rx.try_recv() { + Ok(cmd) => match cmd { + Command::Pause => { + for (_, info) in self.sockets.iter_mut() { + if let Err(err) = self.poll.deregister(&info.sock) { + error!("Can not deregister server socket {}", err); + } else { + info!("Paused accepting connections on {}", info.addr); + } + } + } + Command::Resume => { + for (token, info) in self.sockets.iter() { + if let Err(err) = self.poll.register( + &info.sock, + mio::Token(token + 1000), + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + error!("Can not resume socket accept process: {}", err); + } else { + info!( + "Accepting connections on {} has been resumed", + info.addr + ); + } + } + } + Command::Stop => { + for (_, info) in self.sockets.iter() { + let _ = self.poll.deregister(&info.sock); + } + return false; + } + Command::Worker(idx, addr) => { + self.workers.push((idx, addr)); + } + }, + Err(err) => match err { + sync_mpsc::TryRecvError::Empty => break, + sync_mpsc::TryRecvError::Disconnected => { + for (_, info) in self.sockets.iter() { + let _ = self.poll.deregister(&info.sock); + } + return false; + } + }, + } + } + true + } + + fn accept(&mut self, token: mio::Token) { + let token = usize::from(token); + if token < 1000 { + return; + } + + if let Some(info) = self.sockets.get_mut(token - 1000) { + loop { + match info.sock.accept_std() { + Ok((io, addr)) => { + let mut msg = Conn { + io, + token: info.token, + peer: Some(addr), + http2: false, + }; + while !self.workers.is_empty() { + match self.workers[self.next].1.unbounded_send(msg) { + Ok(_) => (), + Err(err) => { + let _ = self.srv.unbounded_send( + ServerCommand::WorkerDied( + self.workers[self.next].0, + ), + ); + msg = err.into_inner(); + self.workers.swap_remove(self.next); + if self.workers.is_empty() { + error!("No workers"); + thread::sleep(Duration::from_millis(100)); + break; + } else if self.workers.len() <= self.next { + self.next = 0; + } + continue; + } + } + self.next = (self.next + 1) % self.workers.len(); + break; + } + } + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break, + Err(ref e) if connection_error(e) => continue, + Err(e) => { + error!("Error accepting connection: {}", e); + if let Err(err) = self.poll.deregister(&info.sock) { + error!("Can not deregister server socket {}", err); + } + + // sleep after error + info.timeout = Some(Instant::now() + Duration::from_millis(500)); + + let r = self.timer.1.clone(); + System::current().arbiter().do_send(Execute::new( + move || -> Result<(), ()> { + Arbiter::spawn( + Delay::new( + Instant::now() + Duration::from_millis(510), + ).map_err(|_| ()) + .and_then(move |_| { + let _ = + r.set_readiness(mio::Ready::readable()); + Ok(()) + }), + ); + Ok(()) + }, + )); + break; + } + } + } + } + } +} diff --git a/src/server/mod.rs b/src/server/mod.rs index a4f5e87d7..429e293f2 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -315,10 +315,10 @@ impl IoStream for TlsStream { #[cfg(feature = "rust-tls")] use rustls::{ClientSession, ServerSession}; #[cfg(feature = "rust-tls")] -use tokio_rustls::TlsStream; +use tokio_rustls::TlsStream as RustlsStream; #[cfg(feature = "rust-tls")] -impl IoStream for TlsStream { +impl IoStream for RustlsStream { #[inline] fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { let _ = ::shutdown(self); @@ -337,7 +337,7 @@ impl IoStream for TlsStream { } #[cfg(feature = "rust-tls")] -impl IoStream for TlsStream { +impl IoStream for RustlsStream { #[inline] fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { let _ = ::shutdown(self); diff --git a/src/server/srv.rs b/src/server/srv.rs index a054d5a70..e776f7422 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -46,14 +46,6 @@ fn configure_alpn(builder: &mut SslAcceptorBuilder) -> io::Result<()> { Ok(()) } -#[cfg(all(feature = "rust-tls", not(feature = "alpn")))] -fn configure_alpn(builder: &mut Arc) -> io::Result<()> { - Arc::::get_mut(builder) - .unwrap() - .set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]); - Ok(()) -} - /// An HTTP Server pub struct HttpServer where @@ -68,7 +60,11 @@ where #[cfg_attr(feature = "cargo-clippy", allow(type_complexity))] workers: Vec<(usize, Addr>)>, sockets: Vec, - accept: Vec<(mio::SetReadiness, sync_mpsc::Sender)>, + accept: Option<( + mio::SetReadiness, + sync_mpsc::Sender, + Slab, + )>, exit: bool, shutdown_timeout: u16, signals: Option>, @@ -77,7 +73,7 @@ where } pub(crate) enum ServerCommand { - WorkerDied(usize, Slab), + WorkerDied(usize), } impl Actor for HttpServer @@ -114,7 +110,7 @@ where factory: Arc::new(f), workers: Vec::new(), sockets: Vec::new(), - accept: Vec::new(), + accept: None, exit: false, shutdown_timeout: 30, signals: None, @@ -280,22 +276,22 @@ where Ok(self) } - #[cfg(all(feature = "rust-tls", not(feature = "alpn")))] + #[cfg(feature = "rust-tls")] /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn listen_ssl( - mut self, lst: net::TcpListener, mut builder: Arc, + pub fn listen_rustls( + mut self, lst: net::TcpListener, mut builder: ServerConfig, ) -> io::Result { // alpn support if !self.no_http2 { - configure_alpn(&mut builder)?; + builder.set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]); } let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { addr, lst, - tp: StreamHandlerType::Rustls(builder.clone()), + tp: StreamHandlerType::Rustls(Arc::new(builder)), }); Ok(self) } @@ -378,20 +374,21 @@ where Ok(self) } - #[cfg(all(feature = "rust-tls", not(feature = "alpn")))] + #[cfg(feature = "rust-tls")] /// Start listening for incoming tls connections. /// /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn bind_ssl( - mut self, addr: S, mut builder: Arc, + pub fn bind_rustls( + mut self, addr: S, mut builder: ServerConfig, ) -> io::Result { // alpn support if !self.no_http2 { - configure_alpn(&mut builder)?; + builder.set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]); } + let builder = Arc::new(builder); let sockets = self.bind2(addr)?; - self.sockets.extend(sockets.into_iter().map(|mut s| { + self.sockets.extend(sockets.into_iter().map(move |mut s| { s.tp = StreamHandlerType::Rustls(builder.clone()); s })); @@ -487,17 +484,12 @@ impl HttpServer { let settings = ServerSettings::new(Some(addrs[0].1.addr), &self.host, false); let workers = self.start_workers(&settings, &socks); - // start acceptors threads - for (token, sock) in addrs { + // start accept thread + for (_, sock) in &addrs { info!("Starting server on http://{}", sock.addr); - self.accept.push(start_accept_thread( - token, - sock, - tx.clone(), - socks.clone(), - workers.clone(), - )); } + let (r, cmd) = start_accept_thread(addrs, tx.clone(), workers.clone()); + self.accept = Some((r, cmd, socks)); // start http server actor let signals = self.subscribe_to_signals(); @@ -672,7 +664,7 @@ impl StreamHandler for HttpServer { fn handle(&mut self, msg: ServerCommand, _: &mut Context) { match msg { - ServerCommand::WorkerDied(idx, socks) => { + ServerCommand::WorkerDied(idx) => { let mut found = false; for i in 0..self.workers.len() { if self.workers[i].0 == idx { @@ -700,6 +692,7 @@ impl StreamHandler for HttpServer { let ka = self.keep_alive; let factory = Arc::clone(&self.factory); let host = self.host.clone(); + let socks = self.accept.as_ref().unwrap().2.clone(); let addr = socks[0].addr; let addr = Arbiter::start(move |ctx: &mut Context<_>| { @@ -709,7 +702,7 @@ impl StreamHandler for HttpServer { ctx.add_message_stream(rx); Worker::new(apps, socks, ka, settings) }); - for item in &self.accept { + if let Some(ref item) = &self.accept { let _ = item.1.send(Command::Worker(new_idx, tx.clone())); let _ = item.0.set_readiness(mio::Ready::readable()); } diff --git a/src/test.rs b/src/test.rs index f94732dd7..5c520a75a 100644 --- a/src/test.rs +++ b/src/test.rs @@ -15,10 +15,10 @@ use tokio::runtime::current_thread::Runtime; #[cfg(feature = "alpn")] use openssl::ssl::SslAcceptorBuilder; -#[cfg(feature = "rust-tls")] +#[cfg(all(feature = "rust-tls"))] use rustls::ServerConfig; -#[cfg(feature = "rust-tls")] -use std::sync::Arc; +//#[cfg(all(feature = "rust-tls"))] +//use std::sync::Arc; use application::{App, HttpApplication}; use body::Binary; @@ -144,7 +144,7 @@ impl TestServer { builder.set_verify(SslVerifyMode::NONE); ClientConnector::with_connector(builder.build()).start() } - #[cfg(feature = "rust-tls")] + #[cfg(all(feature = "rust-tls", not(feature = "alpn")))] { use rustls::ClientConfig; use std::fs::File; @@ -256,7 +256,7 @@ pub struct TestServerBuilder { #[cfg(feature = "alpn")] ssl: Option, #[cfg(feature = "rust-tls")] - ssl: Option>, + rust_ssl: Option, } impl TestServerBuilder { @@ -267,8 +267,10 @@ impl TestServerBuilder { { TestServerBuilder { state: Box::new(state), - #[cfg(any(feature = "alpn", feature = "rust-tls"))] + #[cfg(feature = "alpn")] ssl: None, + #[cfg(feature = "rust-tls")] + rust_ssl: None, } } @@ -280,9 +282,9 @@ impl TestServerBuilder { } #[cfg(feature = "rust-tls")] - /// Create ssl server - pub fn ssl(mut self, ssl: Arc) -> Self { - self.ssl = Some(ssl); + /// Create rust tls server + pub fn rustls(mut self, ssl: ServerConfig) -> Self { + self.rust_ssl = Some(ssl); self } @@ -294,41 +296,56 @@ impl TestServerBuilder { { let (tx, rx) = mpsc::channel(); - #[cfg(any(feature = "alpn", feature = "rust-tls"))] - let ssl = self.ssl.is_some(); - #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] - let ssl = false; + let mut has_ssl = false; + + #[cfg(feature = "alpn")] + { + has_ssl = has_ssl || self.ssl.is_some(); + } + + #[cfg(feature = "rust-tls")] + { + has_ssl = has_ssl || self.rust_ssl.is_some(); + } // run server in separate thread thread::spawn(move || { - let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); - let local_addr = tcp.local_addr().unwrap(); + let addr = TestServer::unused_addr(); let sys = System::new("actix-test-server"); let state = self.state; - let srv = HttpServer::new(move || { + let mut srv = HttpServer::new(move || { let mut app = TestApp::new(state()); config(&mut app); vec![app] }).workers(1) .disable_signals(); - tx.send((System::current(), local_addr, TestServer::get_conn())) + tx.send((System::current(), addr, TestServer::get_conn())) .unwrap(); - #[cfg(any(feature = "alpn", feature = "rust-tls"))] + #[cfg(feature = "alpn")] { let ssl = self.ssl.take(); if let Some(ssl) = ssl { - srv.listen_ssl(tcp, ssl).unwrap().start(); - } else { - srv.listen(tcp).start(); + let tcp = net::TcpListener::bind(addr).unwrap(); + srv = srv.listen_ssl(tcp, ssl).unwrap(); } } - #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] + #[cfg(feature = "rust-tls")] { - srv.listen(tcp).start(); + let ssl = self.rust_ssl.take(); + if let Some(ssl) = ssl { + let tcp = net::TcpListener::bind(addr).unwrap(); + srv = srv.listen_rustls(tcp, ssl).unwrap(); + } } + if !has_ssl { + let tcp = net::TcpListener::bind(addr).unwrap(); + srv = srv.listen(tcp); + } + srv.start(); + sys.run(); }); @@ -336,8 +353,8 @@ impl TestServerBuilder { System::set_current(system); TestServer { addr, - ssl, conn, + ssl: has_ssl, rt: Runtime::new().unwrap(), } } diff --git a/tests/test_server.rs b/tests/test_server.rs index 82a318e59..3a8259283 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -153,6 +153,62 @@ fn test_shutdown() { let _ = sys.stop(); } +#[test] +#[cfg(unix)] +fn test_panic() { + let _ = test::TestServer::unused_addr(); + let (tx, rx) = mpsc::channel(); + + thread::spawn(|| { + System::run(move || { + let srv = server::new(|| { + App::new() + .resource("/panic", |r| { + r.method(http::Method::GET).f(|_| -> &'static str { + panic!("error"); + }); + }) + .resource("/", |r| { + r.method(http::Method::GET).f(|_| HttpResponse::Ok()) + }) + }).workers(1); + + let srv = srv.bind("127.0.0.1:0").unwrap(); + let addr = srv.addrs()[0]; + srv.start(); + let _ = tx.send((addr, System::current())); + }); + }); + let (addr, sys) = rx.recv().unwrap(); + System::set_current(sys.clone()); + + let mut rt = Runtime::new().unwrap(); + { + let req = client::ClientRequest::get(format!("http://{}/panic", addr).as_str()) + .finish() + .unwrap(); + let response = rt.block_on(req.send()); + assert!(response.is_err()); + } + + { + let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) + .finish() + .unwrap(); + let response = rt.block_on(req.send()); + assert!(response.is_err()); + } + { + let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) + .finish() + .unwrap(); + let response = rt.block_on(req.send()).unwrap(); + assert!(response.status().is_success()); + } + + let _ = sys.stop(); +} + #[test] fn test_simple() { let mut srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok())); diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 1ed80bf77..94f389781 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -277,13 +277,12 @@ fn test_ws_server_ssl() { #[test] #[cfg(feature = "rust-tls")] -fn test_ws_server_ssl() { +fn test_ws_server_rust_tls() { extern crate rustls; - use rustls::{ServerConfig, NoClientAuth}; use rustls::internal::pemfile::{certs, rsa_private_keys}; - use std::io::BufReader; - use std::sync::Arc; + use rustls::{NoClientAuth, ServerConfig}; use std::fs::File; + use std::io::BufReader; // load ssl keys let mut config = ServerConfig::new(NoClientAuth::new()); @@ -293,7 +292,7 @@ fn test_ws_server_ssl() { let mut keys = rsa_private_keys(key_file).unwrap(); config.set_single_cert(cert_chain, keys.remove(0)).unwrap(); - let mut srv = test::TestServer::build().ssl(Arc::new(config)).start(|app| { + let mut srv = test::TestServer::build().rustls(config).start(|app| { app.handler(|req| { ws::start( req, From dca4c110dd0634bd864e624b475f8e7f6e3a5b36 Mon Sep 17 00:00:00 2001 From: jrconlin Date: Tue, 31 Jul 2018 16:21:18 -0700 Subject: [PATCH 021/219] feature: allow TestServer to open a websocket on any URL * added `TestServer::ws_at(uri_str)` * modified `TestServer::ws()` to call `self.ws_at("/")` to preserve behavior Closes #432 --- CHANGES.md | 4 +++- src/test.rs | 14 +++++++++++--- tests/test_ws.rs | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 237b4bfbc..9cb883a3d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,8 @@ * Support HTTP/2 with rustls #36 +* Allow TestServer to open a websocket on any URL # 433 + ### Fixed * Do not override HOST header for client request #428 @@ -22,7 +24,7 @@ * Add implementation of `FromRequest` for `Option` and `Result` * Allow to handle application prefix, i.e. allow to handle `/app` path - for application with `/app` prefix. + for application with `/app` prefix. Check [`App::prefix()`](https://actix.rs/actix-web/actix_web/struct.App.html#method.prefix) api doc. diff --git a/src/test.rs b/src/test.rs index f94732dd7..2ec7a98d8 100644 --- a/src/test.rs +++ b/src/test.rs @@ -207,15 +207,23 @@ impl TestServer { self.rt.block_on(fut) } - /// Connect to websocket server - pub fn ws( + /// Connect to websocket server at a given path + pub fn ws_at( &mut self, + path: &str, ) -> Result<(ws::ClientReader, ws::ClientWriter), ws::ClientError> { - let url = self.url("/"); + let url = self.url(path); self.rt .block_on(ws::Client::with_connector(url, self.conn.clone()).connect()) } + /// Connect to a websocket server + pub fn ws( + &mut self, + ) -> Result<(ws::ClientReader, ws::ClientWriter), ws::ClientError> { + self.ws_at("/") + } + /// Create `GET` request pub fn get(&self) -> ClientRequestBuilder { ClientRequest::get(self.url("/").as_str()) diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 1ed80bf77..86717272c 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -64,6 +64,46 @@ fn test_simple() { ); } +// websocket resource helper function +fn start_ws_resource(req: &HttpRequest) -> Result { + ws::start(req, Ws) +} + +#[test] +fn test_simple_path() { + const PATH:&str = "/v1/ws/"; + + // Create a websocket at a specific path. + let mut srv = test::TestServer::new(|app| { + app.resource(PATH, |r| r.route().f(start_ws_resource)); + }); + // fetch the sockets for the resource at a given path. + let (reader, mut writer) = srv.ws_at(PATH).unwrap(); + + writer.text("text"); + let (item, reader) = srv.execute(reader.into_future()).unwrap(); + assert_eq!(item, Some(ws::Message::Text("text".to_owned()))); + + writer.binary(b"text".as_ref()); + let (item, reader) = srv.execute(reader.into_future()).unwrap(); + assert_eq!( + item, + Some(ws::Message::Binary(Bytes::from_static(b"text").into())) + ); + + writer.ping("ping"); + let (item, reader) = srv.execute(reader.into_future()).unwrap(); + assert_eq!(item, Some(ws::Message::Pong("ping".to_owned()))); + + writer.close(Some(ws::CloseCode::Normal.into())); + let (item, _) = srv.execute(reader.into_future()).unwrap(); + assert_eq!( + item, + Some(ws::Message::Close(Some(ws::CloseCode::Normal.into()))) + ); +} + + #[test] fn test_empty_close_code() { let mut srv = test::TestServer::new(|app| app.handler(|req| ws::start(req, Ws))); From 972b008a6e15defd9d7d8dfb9073091b341b716a Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 1 Aug 2018 09:42:12 -0700 Subject: [PATCH 022/219] remove unsafe error transmute, upgrade failure to 0.1.2 #434 --- Cargo.toml | 2 +- src/error.rs | 17 +++-------------- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 695b2e31f..31440eb37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ cookie = { version="0.11", features=["percent-encode"] } brotli2 = { version="^0.3.2", optional = true } flate2 = { version="^1.0.2", optional = true, default-features = false } -failure = "=0.1.1" +failure = "^0.1.2" # io mio = "^0.6.13" diff --git a/src/error.rs b/src/error.rs index 461b23e20..76c8e79ec 100644 --- a/src/error.rs +++ b/src/error.rs @@ -52,7 +52,8 @@ pub struct Error { impl Error { /// Deprecated way to reference the underlying response error. #[deprecated( - since = "0.6.0", note = "please use `Error::as_response_error()` instead" + since = "0.6.0", + note = "please use `Error::as_response_error()` instead" )] pub fn cause(&self) -> &ResponseError { self.cause.as_ref() @@ -97,21 +98,9 @@ impl Error { // // So we first downcast into that compat, to then further downcast through // the failure's Error downcasting system into the original failure. - // - // This currently requires a transmute. This could be avoided if failure - // provides a deref: https://github.com/rust-lang-nursery/failure/pull/213 let compat: Option<&failure::Compat> = Fail::downcast_ref(self.cause.as_fail()); - if let Some(compat) = compat { - pub struct CompatWrappedError { - error: failure::Error, - } - let compat: &CompatWrappedError = - unsafe { &*(compat as *const _ as *const CompatWrappedError) }; - compat.error.downcast_ref() - } else { - None - } + compat.and_then(|e| e.get_ref().downcast_ref()) } } From a5f80a25ffa057fd2ec78c0cd32d4dc39af1c417 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 1 Aug 2018 10:51:47 -0700 Subject: [PATCH 023/219] update changes --- CHANGES.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9cb883a3d..d86de70f0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,15 +1,18 @@ # Changes -## [0.7.3] - 2018-07-xx +## [0.7.3] - 2018-08-01 ### Added * Support HTTP/2 with rustls #36 -* Allow TestServer to open a websocket on any URL # 433 +* Allow TestServer to open a websocket on any URL (TestServer::ws_at()) #433 + ### Fixed +* Fixed failure 0.1.2 compatibility + * Do not override HOST header for client request #428 * Gz streaming, use `flate2::write::GzDecoder` #228 From 0da3fdcb09973954bb155ee8b3d8c265d37d5de4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 1 Aug 2018 10:59:00 -0700 Subject: [PATCH 024/219] do not use Arc for rustls config --- src/client/connector.rs | 88 +++++++++++++++++++++++++++++++++-------- src/test.rs | 9 ++--- 2 files changed, 75 insertions(+), 22 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index a00546719..ef66cd734 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -26,21 +26,51 @@ use native_tls::{Error as TlsError, TlsConnector, TlsStream}; #[cfg(all(feature = "tls", not(feature = "alpn")))] use tokio_tls::TlsConnectorExt; -#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +#[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) +)] use rustls::ClientConfig; -#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +#[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) +)] use std::io::Error as TLSError; -#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +#[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) +)] use std::sync::Arc; -#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +#[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) +)] use tokio_rustls::ClientConfigExt; -#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +#[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) +)] use webpki::DNSNameRef; -#[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] +#[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) +)] use webpki_roots; use server::IoStream; -use {HAS_OPENSSL, HAS_TLS, HAS_RUSTLS}; +use {HAS_OPENSSL, HAS_RUSTLS, HAS_TLS}; /// Client connector usage stats #[derive(Default, Message)] @@ -153,7 +183,12 @@ pub enum ClientConnectorError { SslError(#[cause] TlsError), /// SSL error - #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] #[fail(display = "{}", _0)] SslError(#[cause] TLSError), @@ -211,7 +246,12 @@ pub struct ClientConnector { connector: SslConnector, #[cfg(all(feature = "tls", not(feature = "alpn")))] connector: TlsConnector, - #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] connector: Arc, stats: ClientConnectorStats, @@ -282,13 +322,18 @@ impl Default for ClientConnector { paused: Paused::No, } } - #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] { let mut config = ClientConfig::new(); config .root_store .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); - ClientConnector::with_connector(Arc::new(config)) + ClientConnector::with_connector(config) } #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] @@ -380,7 +425,12 @@ impl ClientConnector { } } - #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] /// Create `ClientConnector` actor with custom `SslConnector` instance. /// /// By default `ClientConnector` uses very a simple SSL configuration. @@ -425,11 +475,11 @@ impl ClientConnector { /// }); /// } /// ``` - pub fn with_connector(connector: Arc) -> ClientConnector { + pub fn with_connector(connector: ClientConfig) -> ClientConnector { let (tx, rx) = mpsc::unbounded(); ClientConnector { - connector, + connector: Arc::new(connector), stats: ClientConnectorStats::default(), subscriber: None, acq_tx: tx, @@ -806,7 +856,12 @@ impl ClientConnector { } } - #[cfg(all(feature = "rust-tls", not(any(feature = "alpn", feature = "tls"))))] + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] match res { Err(err) => { let _ = waiter.tx.send(Err(err.into())); @@ -815,7 +870,8 @@ impl ClientConnector { Ok(stream) => { act.stats.opened += 1; if conn.0.ssl { - let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap(); + let host = + DNSNameRef::try_from_ascii_str(&key.host).unwrap(); fut::Either::A( act.connector .connect_async(host, stream) diff --git a/src/test.rs b/src/test.rs index 4e23e64a3..244c079a7 100644 --- a/src/test.rs +++ b/src/test.rs @@ -17,8 +17,6 @@ use tokio::runtime::current_thread::Runtime; use openssl::ssl::SslAcceptorBuilder; #[cfg(all(feature = "rust-tls"))] use rustls::ServerConfig; -//#[cfg(all(feature = "rust-tls"))] -//use std::sync::Arc; use application::{App, HttpApplication}; use body::Binary; @@ -152,7 +150,7 @@ impl TestServer { let mut config = ClientConfig::new(); let pem_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap()); config.root_store.add_pem_file(pem_file).unwrap(); - ClientConnector::with_connector(Arc::new(config)).start() + ClientConnector::with_connector(config).start() } #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] { @@ -209,8 +207,7 @@ impl TestServer { /// Connect to websocket server at a given path pub fn ws_at( - &mut self, - path: &str, + &mut self, path: &str, ) -> Result<(ws::ClientReader, ws::ClientWriter), ws::ClientError> { let url = self.url(path); self.rt @@ -223,7 +220,7 @@ impl TestServer { ) -> Result<(ws::ClientReader, ws::ClientWriter), ws::ClientError> { self.ws_at("/") } - + /// Create `GET` request pub fn get(&self) -> ClientRequestBuilder { ClientRequest::get(self.url("/").as_str()) From e9c1889df46394c4c6e8fdda2e956a1077b628cf Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 1 Aug 2018 16:41:24 -0700 Subject: [PATCH 025/219] test timing --- tests/test_server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_server.rs b/tests/test_server.rs index 3a8259283..842d685f0 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -887,6 +887,7 @@ fn test_brotli_encoding_large() { fn test_h2() { let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR))); let addr = srv.addr(); + thread::sleep(time::Duration::from_millis(500)); let mut core = Runtime::new().unwrap(); let tcp = TcpStream::connect(&addr); From 8c89c90c50f64bb411db1a95aeec6b2a1cc9d9e1 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 2 Aug 2018 23:17:10 -0700 Subject: [PATCH 026/219] add accept backpressure #250 --- CHANGES.md | 7 + Cargo.toml | 2 +- src/server/accept.rs | 358 +++++++++++++++++++++++++++++++---------- src/server/h1.rs | 110 ++++--------- src/server/settings.rs | 53 +++--- src/server/srv.rs | 150 +++++++---------- src/server/worker.rs | 132 +++++++++++++-- 7 files changed, 516 insertions(+), 296 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d86de70f0..f7e663d63 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.7.4] - 2018-08-xx + +### Added + +* Added `HttpServer::max_connections()` and `HttpServer::max_sslrate()`, accept backpressure #250 + + ## [0.7.3] - 2018-08-01 ### Added diff --git a/Cargo.toml b/Cargo.toml index 31440eb37..86cb53d10 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.3" +version = "0.7.4" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" diff --git a/src/server/accept.rs b/src/server/accept.rs index 752805600..f846e4a40 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -10,13 +10,13 @@ use tokio_timer::Delay; use actix::{msgs::Execute, Arbiter, System}; use super::srv::{ServerCommand, Socket}; -use super::worker::Conn; +use super::worker::{Conn, WorkerClient}; pub(crate) enum Command { Pause, Resume, Stop, - Worker(usize, mpsc::UnboundedSender>), + Worker(WorkerClient), } struct ServerSocketInfo { @@ -26,40 +26,133 @@ struct ServerSocketInfo { timeout: Option, } +#[derive(Clone)] +pub(crate) struct AcceptNotify { + ready: mio::SetReadiness, + maxconn: usize, + maxconn_low: usize, + maxsslrate: usize, + maxsslrate_low: usize, +} + +impl AcceptNotify { + pub fn new(ready: mio::SetReadiness, maxconn: usize, maxsslrate: usize) -> Self { + let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 }; + let maxsslrate_low = if maxsslrate > 10 { maxsslrate - 10 } else { 0 }; + AcceptNotify { + ready, + maxconn, + maxconn_low, + maxsslrate, + maxsslrate_low, + } + } + + pub fn notify_maxconn(&self, maxconn: usize) { + if maxconn > self.maxconn_low && maxconn <= self.maxconn { + let _ = self.ready.set_readiness(mio::Ready::readable()); + } + } + pub fn notify_maxsslrate(&self, sslrate: usize) { + if sslrate > self.maxsslrate_low && sslrate <= self.maxsslrate { + let _ = self.ready.set_readiness(mio::Ready::readable()); + } + } +} + +impl Default for AcceptNotify { + fn default() -> Self { + AcceptNotify::new(mio::Registration::new2().1, 0, 0) + } +} + +pub(crate) struct AcceptLoop { + cmd_reg: Option, + cmd_ready: mio::SetReadiness, + notify_reg: Option, + notify_ready: mio::SetReadiness, + tx: sync_mpsc::Sender, + rx: Option>, + srv: Option<( + mpsc::UnboundedSender, + mpsc::UnboundedReceiver, + )>, + maxconn: usize, + maxsslrate: usize, +} + +impl AcceptLoop { + pub fn new() -> AcceptLoop { + let (tx, rx) = sync_mpsc::channel(); + let (cmd_reg, cmd_ready) = mio::Registration::new2(); + let (notify_reg, notify_ready) = mio::Registration::new2(); + + AcceptLoop { + tx, + cmd_ready, + cmd_reg: Some(cmd_reg), + notify_ready, + notify_reg: Some(notify_reg), + maxconn: 102_400, + maxsslrate: 256, + rx: Some(rx), + srv: Some(mpsc::unbounded()), + } + } + + pub fn send(&self, msg: Command) { + let _ = self.tx.send(msg); + let _ = self.cmd_ready.set_readiness(mio::Ready::readable()); + } + + pub fn get_notify(&self) -> AcceptNotify { + AcceptNotify::new(self.notify_ready.clone(), self.maxconn, self.maxsslrate) + } + + pub fn max_connections(&mut self, num: usize) { + self.maxconn = num; + } + + pub fn max_sslrate(&mut self, num: usize) { + self.maxsslrate = num; + } + + pub(crate) fn start( + &mut self, socks: Vec<(usize, Socket)>, workers: Vec, + ) -> mpsc::UnboundedReceiver { + let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo"); + + Accept::start( + self.rx.take().expect("Can not re-use AcceptInfo"), + self.cmd_reg.take().expect("Can not re-use AcceptInfo"), + self.notify_reg.take().expect("Can not re-use AcceptInfo"), + self.maxconn, + self.maxsslrate, + socks, + tx, + workers, + ); + rx + } +} + struct Accept { poll: mio::Poll, rx: sync_mpsc::Receiver, sockets: Slab, - workers: Vec<(usize, mpsc::UnboundedSender>)>, - _reg: mio::Registration, - next: usize, + workers: Vec, srv: mpsc::UnboundedSender, timer: (mio::Registration, mio::SetReadiness), + next: usize, + maxconn: usize, + maxsslrate: usize, + backpressure: bool, } +const DELTA: usize = 100; const CMD: mio::Token = mio::Token(0); const TIMER: mio::Token = mio::Token(1); - -pub(crate) fn start_accept_thread( - socks: Vec<(usize, Socket)>, srv: mpsc::UnboundedSender, - workers: Vec<(usize, mpsc::UnboundedSender>)>, -) -> (mio::SetReadiness, sync_mpsc::Sender) { - let (tx, rx) = sync_mpsc::channel(); - let (reg, readiness) = mio::Registration::new2(); - - let sys = System::current(); - - // start accept thread - #[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))] - let _ = thread::Builder::new() - .name("actix-web accept loop".to_owned()) - .spawn(move || { - System::set_current(sys); - Accept::new(reg, rx, socks, workers, srv).poll(); - }); - - (readiness, tx) -} +const NOTIFY: mio::Token = mio::Token(2); /// This function defines errors that are per-connection. Which basically /// means that if we get this error from `accept()` system call it means @@ -75,11 +168,51 @@ fn connection_error(e: &io::Error) -> bool { } impl Accept { + #![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] + pub(crate) fn start( + rx: sync_mpsc::Receiver, cmd_reg: mio::Registration, + notify_reg: mio::Registration, maxconn: usize, maxsslrate: usize, + socks: Vec<(usize, Socket)>, srv: mpsc::UnboundedSender, + workers: Vec, + ) { + let sys = System::current(); + + // start accept thread + let _ = thread::Builder::new() + .name("actix-web accept loop".to_owned()) + .spawn(move || { + System::set_current(sys); + let mut accept = Accept::new(rx, socks, workers, srv); + accept.maxconn = maxconn; + accept.maxsslrate = maxsslrate; + + // Start listening for incoming commands + if let Err(err) = accept.poll.register( + &cmd_reg, + CMD, + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + panic!("Can not register Registration: {}", err); + } + + // Start listening for notify updates + if let Err(err) = accept.poll.register( + ¬ify_reg, + NOTIFY, + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + panic!("Can not register Registration: {}", err); + } + + accept.poll(); + }); + } + fn new( - _reg: mio::Registration, rx: sync_mpsc::Receiver, - socks: Vec<(usize, Socket)>, - workers: Vec<(usize, mpsc::UnboundedSender>)>, - srv: mpsc::UnboundedSender, + rx: sync_mpsc::Receiver, socks: Vec<(usize, Socket)>, + workers: Vec, srv: mpsc::UnboundedSender, ) -> Accept { // Create a poll instance let poll = match mio::Poll::new() { @@ -87,13 +220,6 @@ impl Accept { Err(err) => panic!("Can not create mio::Poll: {}", err), }; - // Start listening for incoming commands - if let Err(err) = - poll.register(&_reg, CMD, mio::Ready::readable(), mio::PollOpt::edge()) - { - panic!("Can not register Registration: {}", err); - } - // Start accept let mut sockets = Slab::new(); for (stoken, sock) in socks { @@ -106,7 +232,7 @@ impl Accept { // Start listening for incoming connections if let Err(err) = poll.register( &server, - mio::Token(token + 1000), + mio::Token(token + DELTA), mio::Ready::readable(), mio::PollOpt::edge(), ) { @@ -132,12 +258,14 @@ impl Accept { Accept { poll, rx, - _reg, sockets, workers, srv, next: 0, timer: (tm, tmr), + maxconn: 102_400, + maxsslrate: 256, + backpressure: false, } } @@ -157,7 +285,14 @@ impl Accept { return; }, TIMER => self.process_timer(), - _ => self.accept(token), + NOTIFY => self.backpressure(false), + _ => { + let token = usize::from(token); + if token < DELTA { + continue; + } + self.accept(token - DELTA); + } } } } @@ -170,7 +305,7 @@ impl Accept { if now > inst { if let Err(err) = self.poll.register( &info.sock, - mio::Token(token + 1000), + mio::Token(token + DELTA), mio::Ready::readable(), mio::PollOpt::edge(), ) { @@ -202,7 +337,7 @@ impl Accept { for (token, info) in self.sockets.iter() { if let Err(err) = self.poll.register( &info.sock, - mio::Token(token + 1000), + mio::Token(token + DELTA), mio::Ready::readable(), mio::PollOpt::edge(), ) { @@ -221,8 +356,9 @@ impl Accept { } return false; } - Command::Worker(idx, addr) => { - self.workers.push((idx, addr)); + Command::Worker(worker) => { + self.backpressure(false); + self.workers.push(worker); } }, Err(err) => match err { @@ -239,48 +375,100 @@ impl Accept { true } - fn accept(&mut self, token: mio::Token) { - let token = usize::from(token); - if token < 1000 { - return; + fn backpressure(&mut self, on: bool) { + if self.backpressure { + if !on { + self.backpressure = false; + for (token, info) in self.sockets.iter() { + if let Err(err) = self.poll.register( + &info.sock, + mio::Token(token + DELTA), + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + error!("Can not resume socket accept process: {}", err); + } else { + info!("Accepting connections on {} has been resumed", info.addr); + } + } + } + } else if on { + self.backpressure = true; + for (_, info) in self.sockets.iter() { + let _ = self.poll.deregister(&info.sock); + } } + } - if let Some(info) = self.sockets.get_mut(token - 1000) { - loop { - match info.sock.accept_std() { - Ok((io, addr)) => { - let mut msg = Conn { - io, - token: info.token, - peer: Some(addr), - http2: false, - }; - while !self.workers.is_empty() { - match self.workers[self.next].1.unbounded_send(msg) { - Ok(_) => (), - Err(err) => { - let _ = self.srv.unbounded_send( - ServerCommand::WorkerDied( - self.workers[self.next].0, - ), - ); - msg = err.into_inner(); - self.workers.swap_remove(self.next); - if self.workers.is_empty() { - error!("No workers"); - thread::sleep(Duration::from_millis(100)); - break; - } else if self.workers.len() <= self.next { - self.next = 0; - } - continue; - } - } + fn accept_one(&mut self, mut msg: Conn) { + if self.backpressure { + while !self.workers.is_empty() { + match self.workers[self.next].send(msg) { + Ok(_) => (), + Err(err) => { + let _ = self.srv.unbounded_send(ServerCommand::WorkerDied( + self.workers[self.next].idx, + )); + msg = err.into_inner(); + self.workers.swap_remove(self.next); + if self.workers.is_empty() { + error!("No workers"); + return; + } else if self.workers.len() <= self.next { + self.next = 0; + } + continue; + } + } + self.next = (self.next + 1) % self.workers.len(); + break; + } + } else { + let mut idx = 0; + while idx < self.workers.len() { + idx += 1; + if self.workers[self.next].available(self.maxconn, self.maxsslrate) { + match self.workers[self.next].send(msg) { + Ok(_) => { self.next = (self.next + 1) % self.workers.len(); - break; + return; + } + Err(err) => { + let _ = self.srv.unbounded_send(ServerCommand::WorkerDied( + self.workers[self.next].idx, + )); + msg = err.into_inner(); + self.workers.swap_remove(self.next); + if self.workers.is_empty() { + error!("No workers"); + self.backpressure(true); + return; + } else if self.workers.len() <= self.next { + self.next = 0; + } + continue; } } - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break, + } + self.next = (self.next + 1) % self.workers.len(); + } + // enable backpressure + self.backpressure(true); + self.accept_one(msg); + } + } + + fn accept(&mut self, token: usize) { + loop { + let msg = if let Some(info) = self.sockets.get_mut(token) { + match info.sock.accept_std() { + Ok((io, addr)) => Conn { + io, + token: info.token, + peer: Some(addr), + http2: false, + }, + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return, Err(ref e) if connection_error(e) => continue, Err(e) => { error!("Error accepting connection: {}", e); @@ -307,10 +495,14 @@ impl Accept { Ok(()) }, )); - break; + return; } } - } + } else { + return; + }; + + self.accept_one(msg); } } } diff --git a/src/server/h1.rs b/src/server/h1.rs index 511b32bce..9f3bda28f 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -464,6 +464,7 @@ where #[cfg(test)] mod tests { use std::net::Shutdown; + use std::sync::{atomic::AtomicUsize, Arc}; use std::{cmp, io, time}; use bytes::{Buf, Bytes, BytesMut}; @@ -473,10 +474,22 @@ mod tests { use super::*; use application::HttpApplication; use httpmessage::HttpMessage; + use server::accept::AcceptNotify; use server::h1decoder::Message; use server::settings::{ServerSettings, WorkerSettings}; use server::{KeepAlive, Request}; + fn wrk_settings() -> WorkerSettings { + WorkerSettings::::new( + Vec::new(), + KeepAlive::Os, + ServerSettings::default(), + AcceptNotify::default(), + Arc::new(AtomicUsize::new(0)), + Arc::new(AtomicUsize::new(0)), + ) + } + impl Message { fn message(self) -> Request { match self { @@ -506,8 +519,7 @@ mod tests { macro_rules! parse_ready { ($e:expr) => {{ - let settings: WorkerSettings = - WorkerSettings::new(Vec::new(), KeepAlive::Os, ServerSettings::default()); + let settings = wrk_settings(); match H1Decoder::new().decode($e, &settings) { Ok(Some(msg)) => msg.message(), Ok(_) => unreachable!("Eof during parsing http request"), @@ -518,8 +530,7 @@ mod tests { macro_rules! expect_parse_err { ($e:expr) => {{ - let settings: WorkerSettings = - WorkerSettings::new(Vec::new(), KeepAlive::Os, ServerSettings::default()); + let settings = wrk_settings(); match H1Decoder::new().decode($e, &settings) { Err(err) => match err { @@ -595,11 +606,7 @@ mod tests { fn test_req_parse() { let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n"); let readbuf = BytesMut::new(); - let settings = Rc::new(WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - )); + let settings = Rc::new(wrk_settings()); let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf); h1.poll_io(); @@ -611,11 +618,7 @@ mod tests { fn test_req_parse_err() { let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); let readbuf = BytesMut::new(); - let settings = Rc::new(WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - )); + let settings = Rc::new(wrk_settings()); let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf); h1.poll_io(); @@ -626,11 +629,7 @@ mod tests { #[test] fn test_parse() { let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n\r\n"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); match reader.decode(&mut buf, &settings) { @@ -647,11 +646,7 @@ mod tests { #[test] fn test_parse_partial() { let mut buf = BytesMut::from("PUT /test HTTP/1"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); match reader.decode(&mut buf, &settings) { @@ -674,11 +669,7 @@ mod tests { #[test] fn test_parse_post() { let mut buf = BytesMut::from("POST /test2 HTTP/1.0\r\n\r\n"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); match reader.decode(&mut buf, &settings) { @@ -696,11 +687,7 @@ mod tests { fn test_parse_body() { let mut buf = BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); match reader.decode(&mut buf, &settings) { @@ -727,11 +714,7 @@ mod tests { fn test_parse_body_crlf() { let mut buf = BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); match reader.decode(&mut buf, &settings) { @@ -757,11 +740,7 @@ mod tests { #[test] fn test_parse_partial_eof() { let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); assert!(reader.decode(&mut buf, &settings).unwrap().is_none()); @@ -780,11 +759,7 @@ mod tests { #[test] fn test_headers_split_field() { let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n"); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); assert!{ reader.decode(&mut buf, &settings).unwrap().is_none() } @@ -815,11 +790,7 @@ mod tests { Set-Cookie: c1=cookie1\r\n\ Set-Cookie: c2=cookie2\r\n\r\n", ); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); let msg = reader.decode(&mut buf, &settings).unwrap().unwrap(); let req = msg.message(); @@ -1015,11 +986,7 @@ mod tests { #[test] fn test_http_request_upgrade() { - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut buf = BytesMut::from( "GET /test HTTP/1.1\r\n\ connection: upgrade\r\n\ @@ -1085,12 +1052,7 @@ mod tests { "GET /test HTTP/1.1\r\n\ transfer-encoding: chunked\r\n\r\n", ); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); - + let settings = wrk_settings(); let mut reader = H1Decoder::new(); let msg = reader.decode(&mut buf, &settings).unwrap().unwrap(); assert!(msg.is_payload()); @@ -1125,11 +1087,7 @@ mod tests { "GET /test HTTP/1.1\r\n\ transfer-encoding: chunked\r\n\r\n", ); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); let msg = reader.decode(&mut buf, &settings).unwrap().unwrap(); assert!(msg.is_payload()); @@ -1163,11 +1121,7 @@ mod tests { "GET /test HTTP/1.1\r\n\ transfer-encoding: chunked\r\n\r\n", ); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); let msg = reader.decode(&mut buf, &settings).unwrap().unwrap(); @@ -1214,11 +1168,7 @@ mod tests { &"GET /test HTTP/1.1\r\n\ transfer-encoding: chunked\r\n\r\n"[..], ); - let settings = WorkerSettings::::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = wrk_settings(); let mut reader = H1Decoder::new(); let msg = reader.decode(&mut buf, &settings).unwrap().unwrap(); diff --git a/src/server/settings.rs b/src/server/settings.rs index cc2e1c06e..8e30646d9 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -1,7 +1,8 @@ -use std::cell::{Cell, RefCell, RefMut, UnsafeCell}; +use std::cell::{RefCell, RefMut, UnsafeCell}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; +use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use std::{env, fmt, net}; use bytes::BytesMut; @@ -11,6 +12,7 @@ use lazycell::LazyCell; use parking_lot::Mutex; use time; +use super::accept::AcceptNotify; use super::channel::Node; use super::message::{Request, RequestPool}; use super::KeepAlive; @@ -93,21 +95,6 @@ impl ServerSettings { } } - pub(crate) fn parts(&self) -> (Option, String, bool) { - (self.addr, self.host.clone(), self.secure) - } - - pub(crate) fn from_parts(parts: (Option, String, bool)) -> Self { - let (addr, host, secure) = parts; - ServerSettings { - addr, - host, - secure, - cpu_pool: LazyCell::new(), - responses: HttpResponsePool::get_pool(), - } - } - /// Returns the socket address of the local half of this TCP connection pub fn local_addr(&self) -> Option { self.addr @@ -150,14 +137,17 @@ pub(crate) struct WorkerSettings { ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, - channels: Cell, + channels: Arc, node: RefCell>, date: UnsafeCell, + sslrate: Arc, + notify: AcceptNotify, } impl WorkerSettings { pub(crate) fn new( h: Vec, keep_alive: KeepAlive, settings: ServerSettings, + notify: AcceptNotify, channels: Arc, sslrate: Arc, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -169,16 +159,18 @@ impl WorkerSettings { h: RefCell::new(h), bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), - channels: Cell::new(0), node: RefCell::new(Node::head()), date: UnsafeCell::new(Date::new()), keep_alive, ka_enabled, + channels, + sslrate, + notify, } } pub fn num_channels(&self) -> usize { - self.channels.get() + self.channels.load(Ordering::Relaxed) } pub fn head(&self) -> RefMut> { @@ -210,16 +202,12 @@ impl WorkerSettings { } pub fn add_channel(&self) { - self.channels.set(self.channels.get() + 1); + self.channels.fetch_add(1, Ordering::Relaxed); } pub fn remove_channel(&self) { - let num = self.channels.get(); - if num > 0 { - self.channels.set(num - 1); - } else { - error!("Number of removed channels is bigger than added channel. Bug in actix-web"); - } + let val = self.channels.fetch_sub(1, Ordering::Relaxed); + self.notify.notify_maxconn(val); } pub fn update_date(&self) { @@ -240,6 +228,16 @@ impl WorkerSettings { dst.extend_from_slice(date_bytes); } } + + #[allow(dead_code)] + pub(crate) fn ssl_conn_add(&self) { + self.sslrate.fetch_add(1, Ordering::Relaxed); + } + #[allow(dead_code)] + pub(crate) fn ssl_conn_del(&self) { + let val = self.sslrate.fetch_sub(1, Ordering::Relaxed); + self.notify.notify_maxsslrate(val); + } } struct Date { @@ -311,6 +309,9 @@ mod tests { Vec::new(), KeepAlive::Os, ServerSettings::default(), + AcceptNotify::default(), + Arc::new(AtomicUsize::new(0)), + Arc::new(AtomicUsize::new(0)), ); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); settings.set_date(&mut buf1, true); diff --git a/src/server/srv.rs b/src/server/srv.rs index e776f7422..b6bd21967 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -1,5 +1,5 @@ use std::rc::Rc; -use std::sync::{mpsc as sync_mpsc, Arc}; +use std::sync::{atomic::AtomicUsize, Arc}; use std::time::Duration; use std::{io, net}; @@ -10,10 +10,8 @@ use actix::{ use futures::sync::mpsc; use futures::{Future, Sink, Stream}; -use mio; use net2::TcpBuilder; use num_cpus; -use slab::Slab; use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "tls")] @@ -25,10 +23,12 @@ use openssl::ssl::{AlpnError, SslAcceptorBuilder}; #[cfg(feature = "rust-tls")] use rustls::ServerConfig; -use super::accept::{start_accept_thread, Command}; +use super::accept::{AcceptLoop, AcceptNotify, Command}; use super::channel::{HttpChannel, WrapperStream}; use super::settings::{ServerSettings, WorkerSettings}; -use super::worker::{Conn, SocketInfo, StopWorker, StreamHandlerType, Worker}; +use super::worker::{ + Conn, StopWorker, StreamHandlerType, Worker, WorkerClient, WorkersPool, +}; use super::{IntoHttpHandler, IoStream, KeepAlive}; use super::{PauseServer, ResumeServer, StopServer}; @@ -54,17 +54,10 @@ where h: Option>>, threads: usize, backlog: i32, - host: Option, - keep_alive: KeepAlive, - factory: Arc Vec + Send + Sync>, - #[cfg_attr(feature = "cargo-clippy", allow(type_complexity))] - workers: Vec<(usize, Addr>)>, sockets: Vec, - accept: Option<( - mio::SetReadiness, - sync_mpsc::Sender, - Slab, - )>, + pool: WorkersPool, + workers: Vec<(usize, Addr>)>, + accept: AcceptLoop, exit: bool, shutdown_timeout: u16, signals: Option>, @@ -105,12 +98,10 @@ where h: None, threads: num_cpus::get(), backlog: 2048, - host: None, - keep_alive: KeepAlive::Os, - factory: Arc::new(f), + pool: WorkersPool::new(f), workers: Vec::new(), sockets: Vec::new(), - accept: None, + accept: AcceptLoop::new(), exit: false, shutdown_timeout: 30, signals: None, @@ -128,15 +119,6 @@ where self } - #[doc(hidden)] - #[deprecated( - since = "0.6.0", - note = "please use `HttpServer::workers()` instead" - )] - pub fn threads(self, num: usize) -> Self { - self.workers(num) - } - /// Set the maximum number of pending connections. /// /// This refers to the number of clients that can be waiting to be served. @@ -152,11 +134,34 @@ where self } + /// Sets the maximum per-worker number of concurrent connections. + /// + /// All socket listeners will stop accepting connections when this limit is reached + /// for each worker. + /// + /// By default max connections is set to a 100k. + pub fn max_connections(mut self, num: usize) -> Self { + self.accept.max_connections(num); + self + } + + /// Sets the maximum concurrent per-worker number of SSL handshakes. + /// + /// All listeners will stop accepting connections when this limit is reached. It + /// can be used to limit the global SSL CPU usage regardless of each worker + /// capacity. + /// + /// By default max connections is set to a 256. + pub fn max_sslrate(mut self, num: usize) -> Self { + self.accept.max_sslrate(num); + self + } + /// Set server keep-alive setting. /// /// By default keep alive is set to a `Os`. pub fn keep_alive>(mut self, val: T) -> Self { - self.keep_alive = val.into(); + self.pool.keep_alive = val.into(); self } @@ -166,7 +171,7 @@ where /// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo. /// html#method.host) documentation for more information. pub fn server_hostname(mut self, val: String) -> Self { - self.host = Some(val); + self.pool.host = Some(val); self } @@ -395,27 +400,12 @@ where Ok(self) } - fn start_workers( - &mut self, settings: &ServerSettings, sockets: &Slab, - ) -> Vec<(usize, mpsc::UnboundedSender>)> { + fn start_workers(&mut self, notify: &AcceptNotify) -> Vec { // start workers let mut workers = Vec::new(); for idx in 0..self.threads { - let (tx, rx) = mpsc::unbounded::>(); - - let ka = self.keep_alive; - let socks = sockets.clone(); - let factory = Arc::clone(&self.factory); - let parts = settings.parts(); - - let addr = Arbiter::start(move |ctx: &mut Context<_>| { - let s = ServerSettings::from_parts(parts); - let apps: Vec<_> = - (*factory)().into_iter().map(|h| h.into_handler()).collect(); - ctx.add_message_stream(rx); - Worker::new(apps, socks, ka, s) - }); - workers.push((idx, tx)); + let (worker, addr) = self.pool.start(idx, notify.clone()); + workers.push(worker); self.workers.push((idx, addr)); } info!("Starting {} http workers", self.threads); @@ -466,30 +456,20 @@ impl HttpServer { if self.sockets.is_empty() { panic!("HttpServer::bind() has to be called before start()"); } else { - let (tx, rx) = mpsc::unbounded(); - - let mut socks = Slab::new(); let mut addrs: Vec<(usize, Socket)> = Vec::new(); for socket in self.sockets.drain(..) { - let entry = socks.vacant_entry(); - let token = entry.key(); - entry.insert(SocketInfo { - addr: socket.addr, - htype: socket.tp.clone(), - }); + let token = self.pool.insert(socket.addr, socket.tp.clone()); addrs.push((token, socket)); } - - let settings = ServerSettings::new(Some(addrs[0].1.addr), &self.host, false); - let workers = self.start_workers(&settings, &socks); + let notify = self.accept.get_notify(); + let workers = self.start_workers(¬ify); // start accept thread for (_, sock) in &addrs { info!("Starting server on http://{}", sock.addr); } - let (r, cmd) = start_accept_thread(addrs, tx.clone(), workers.clone()); - self.accept = Some((r, cmd, socks)); + let rx = self.accept.start(addrs, workers.clone()); // start http server actor let signals = self.subscribe_to_signals(); @@ -600,15 +580,18 @@ impl HttpServer { { // set server settings let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); - let settings = ServerSettings::new(Some(addr), &self.host, secure); - let apps: Vec<_> = (*self.factory)() + let settings = ServerSettings::new(Some(addr), &self.pool.host, secure); + let apps: Vec<_> = (*self.pool.factory)() .into_iter() .map(|h| h.into_handler()) .collect(); self.h = Some(Rc::new(WorkerSettings::new( apps, - self.keep_alive, + self.pool.keep_alive, settings, + AcceptNotify::default(), + Arc::new(AtomicUsize::new(0)), + Arc::new(AtomicUsize::new(0)), ))); // start server @@ -676,7 +659,6 @@ impl StreamHandler for HttpServer { if found { error!("Worker has died {:?}, restarting", idx); - let (tx, rx) = mpsc::unbounded::>(); let mut new_idx = self.workers.len(); 'found: loop { @@ -689,25 +671,10 @@ impl StreamHandler for HttpServer { break; } - let ka = self.keep_alive; - let factory = Arc::clone(&self.factory); - let host = self.host.clone(); - let socks = self.accept.as_ref().unwrap().2.clone(); - let addr = socks[0].addr; - - let addr = Arbiter::start(move |ctx: &mut Context<_>| { - let settings = ServerSettings::new(Some(addr), &host, false); - let apps: Vec<_> = - (*factory)().into_iter().map(|h| h.into_handler()).collect(); - ctx.add_message_stream(rx); - Worker::new(apps, socks, ka, settings) - }); - if let Some(ref item) = &self.accept { - let _ = item.1.send(Command::Worker(new_idx, tx.clone())); - let _ = item.0.set_readiness(mio::Ready::readable()); - } - + let (worker, addr) = + self.pool.start(new_idx, self.accept.get_notify()); self.workers.push((new_idx, addr)); + self.accept.send(Command::Worker(worker)); } } } @@ -735,10 +702,7 @@ impl Handler for HttpServer { type Result = (); fn handle(&mut self, _: PauseServer, _: &mut Context) { - for item in &self.accept { - let _ = item.1.send(Command::Pause); - let _ = item.0.set_readiness(mio::Ready::readable()); - } + self.accept.send(Command::Pause); } } @@ -746,10 +710,7 @@ impl Handler for HttpServer { type Result = (); fn handle(&mut self, _: ResumeServer, _: &mut Context) { - for item in &self.accept { - let _ = item.1.send(Command::Resume); - let _ = item.0.set_readiness(mio::Ready::readable()); - } + self.accept.send(Command::Resume); } } @@ -758,10 +719,7 @@ impl Handler for HttpServer { fn handle(&mut self, msg: StopServer, ctx: &mut Context) -> Self::Result { // stop accept threads - for item in &self.accept { - let _ = item.1.send(Command::Stop); - let _ = item.0.set_readiness(mio::Ready::readable()); - } + self.accept.send(Command::Stop); // stop workers let (tx, rx) = mpsc::channel(1); diff --git a/src/server/worker.rs b/src/server/worker.rs index 5e753ce58..ed0799563 100644 --- a/src/server/worker.rs +++ b/src/server/worker.rs @@ -1,9 +1,12 @@ +use std::rc::Rc; +use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; +use std::{net, time}; + +use futures::sync::mpsc::{unbounded, SendError, UnboundedSender}; use futures::sync::oneshot; use futures::Future; use net2::TcpStreamExt; use slab::Slab; -use std::rc::Rc; -use std::{net, time}; use tokio::executor::current_thread; use tokio_reactor::Handle; use tokio_tcp::TcpStream; @@ -24,16 +27,15 @@ use tokio_openssl::SslAcceptorExt; #[cfg(feature = "rust-tls")] use rustls::{ServerConfig, Session}; #[cfg(feature = "rust-tls")] -use std::sync::Arc; -#[cfg(feature = "rust-tls")] use tokio_rustls::ServerConfigExt; use actix::msgs::StopArbiter; -use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response}; +use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, Message, Response}; -use server::channel::HttpChannel; -use server::settings::{ServerSettings, WorkerSettings}; -use server::{HttpHandler, KeepAlive}; +use super::accept::AcceptNotify; +use super::channel::HttpChannel; +use super::settings::{ServerSettings, WorkerSettings}; +use super::{HttpHandler, IntoHttpHandler, KeepAlive}; #[derive(Message)] pub(crate) struct Conn { @@ -49,6 +51,95 @@ pub(crate) struct SocketInfo { pub htype: StreamHandlerType, } +pub(crate) struct WorkersPool { + sockets: Slab, + pub factory: Arc Vec + Send + Sync>, + pub host: Option, + pub keep_alive: KeepAlive, +} + +impl WorkersPool { + pub fn new(factory: F) -> Self + where + F: Fn() -> Vec + Send + Sync + 'static, + { + WorkersPool { + factory: Arc::new(factory), + host: None, + keep_alive: KeepAlive::Os, + sockets: Slab::new(), + } + } + + pub fn insert(&mut self, addr: net::SocketAddr, htype: StreamHandlerType) -> usize { + let entry = self.sockets.vacant_entry(); + let token = entry.key(); + entry.insert(SocketInfo { addr, htype }); + token + } + + pub fn start( + &mut self, idx: usize, notify: AcceptNotify, + ) -> (WorkerClient, Addr>) { + let host = self.host.clone(); + let addr = self.sockets[0].addr; + let factory = Arc::clone(&self.factory); + let socks = self.sockets.clone(); + let ka = self.keep_alive; + let (tx, rx) = unbounded::>(); + let client = WorkerClient::new(idx, tx, self.sockets.clone()); + let conn = client.conn.clone(); + let sslrate = client.sslrate.clone(); + + let addr = Arbiter::start(move |ctx: &mut Context<_>| { + let s = ServerSettings::new(Some(addr), &host, false); + let apps: Vec<_> = + (*factory)().into_iter().map(|h| h.into_handler()).collect(); + ctx.add_message_stream(rx); + Worker::new(apps, socks, ka, s, conn, sslrate, notify) + }); + + (client, addr) + } +} + +#[derive(Clone)] +pub(crate) struct WorkerClient { + pub idx: usize, + tx: UnboundedSender>, + info: Slab, + pub conn: Arc, + pub sslrate: Arc, +} + +impl WorkerClient { + fn new( + idx: usize, tx: UnboundedSender>, info: Slab, + ) -> Self { + WorkerClient { + idx, + tx, + info, + conn: Arc::new(AtomicUsize::new(0)), + sslrate: Arc::new(AtomicUsize::new(0)), + } + } + + pub fn send( + &self, msg: Conn, + ) -> Result<(), SendError>> { + self.tx.unbounded_send(msg) + } + + pub fn available(&self, maxconn: usize, maxsslrate: usize) -> bool { + if maxsslrate <= self.sslrate.load(Ordering::Relaxed) { + false + } else { + maxconn > self.conn.load(Ordering::Relaxed) + } + } +} + /// Stop worker message. Returns `true` on successful shutdown /// and `false` if some connections still alive. pub(crate) struct StopWorker { @@ -75,7 +166,8 @@ where impl Worker { pub(crate) fn new( h: Vec, socks: Slab, keep_alive: KeepAlive, - settings: ServerSettings, + settings: ServerSettings, conn: Arc, sslrate: Arc, + notify: AcceptNotify, ) -> Worker { let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { Some(time::Duration::new(val as u64, 0)) @@ -84,7 +176,9 @@ impl Worker { }; Worker { - settings: Rc::new(WorkerSettings::new(h, keep_alive, settings)), + settings: Rc::new(WorkerSettings::new( + h, keep_alive, settings, notify, conn, sslrate, + )), socks, tcp_ka, } @@ -182,6 +276,18 @@ pub(crate) enum StreamHandlerType { } impl StreamHandlerType { + pub fn is_ssl(&self) -> bool { + match *self { + StreamHandlerType::Normal => false, + #[cfg(feature = "tls")] + StreamHandlerType::Tls(_) => true, + #[cfg(feature = "alpn")] + StreamHandlerType::Alpn(_) => true, + #[cfg(feature = "rust-tls")] + StreamHandlerType::Rustls(_) => true, + } + } + fn handle( &mut self, h: Rc>, msg: Conn, ) { @@ -201,9 +307,11 @@ impl StreamHandlerType { let _ = io.set_nodelay(true); let io = TcpStream::from_std(io, &Handle::default()) .expect("failed to associate TCP stream"); + self.settings.ssl_conn_add(); current_thread::spawn(TlsAcceptorExt::accept_async(acceptor, io).then( move |res| { + self.settings.ssl_conn_del(); match res { Ok(io) => current_thread::spawn(HttpChannel::new( h, io, peer, http2, @@ -222,9 +330,11 @@ impl StreamHandlerType { let _ = io.set_nodelay(true); let io = TcpStream::from_std(io, &Handle::default()) .expect("failed to associate TCP stream"); + self.settings.ssl_conn_add(); current_thread::spawn(SslAcceptorExt::accept_async(acceptor, io).then( move |res| { + self.settings.ssl_conn_del(); match res { Ok(io) => { let http2 = if let Some(p) = @@ -252,9 +362,11 @@ impl StreamHandlerType { let _ = io.set_nodelay(true); let io = TcpStream::from_std(io, &Handle::default()) .expect("failed to associate TCP stream"); + self.settings.ssl_conn_add(); current_thread::spawn(ServerConfigExt::accept_async(acceptor, io).then( move |res| { + self.settings.ssl_conn_del(); match res { Ok(io) => { let http2 = if let Some(p) = From f8e5d7c6c1a1a5c30da7c904fbc4b5d1276ec6fd Mon Sep 17 00:00:00 2001 From: Mathieu Amiot <1262712+OtaK@users.noreply.github.com> Date: Fri, 3 Aug 2018 11:11:51 +0000 Subject: [PATCH 027/219] Fixed broken build on wrong variable usage (#440) --- src/server/worker.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/server/worker.rs b/src/server/worker.rs index ed0799563..e9bf42250 100644 --- a/src/server/worker.rs +++ b/src/server/worker.rs @@ -307,11 +307,11 @@ impl StreamHandlerType { let _ = io.set_nodelay(true); let io = TcpStream::from_std(io, &Handle::default()) .expect("failed to associate TCP stream"); - self.settings.ssl_conn_add(); + h.ssl_conn_add(); current_thread::spawn(TlsAcceptorExt::accept_async(acceptor, io).then( move |res| { - self.settings.ssl_conn_del(); + h.ssl_conn_del(); match res { Ok(io) => current_thread::spawn(HttpChannel::new( h, io, peer, http2, @@ -330,11 +330,11 @@ impl StreamHandlerType { let _ = io.set_nodelay(true); let io = TcpStream::from_std(io, &Handle::default()) .expect("failed to associate TCP stream"); - self.settings.ssl_conn_add(); + h.ssl_conn_add(); current_thread::spawn(SslAcceptorExt::accept_async(acceptor, io).then( move |res| { - self.settings.ssl_conn_del(); + h.ssl_conn_del(); match res { Ok(io) => { let http2 = if let Some(p) = @@ -362,11 +362,11 @@ impl StreamHandlerType { let _ = io.set_nodelay(true); let io = TcpStream::from_std(io, &Handle::default()) .expect("failed to associate TCP stream"); - self.settings.ssl_conn_add(); + h.ssl_conn_add(); current_thread::spawn(ServerConfigExt::accept_async(acceptor, io).then( move |res| { - self.settings.ssl_conn_del(); + h.ssl_conn_del(); match res { Ok(io) => { let http2 = if let Some(p) = From 9a10d8aa7a8c48be693382fb8864a2381925bd73 Mon Sep 17 00:00:00 2001 From: Mathieu Amiot <1262712+OtaK@users.noreply.github.com> Date: Fri, 3 Aug 2018 12:03:11 +0000 Subject: [PATCH 028/219] Fixed headers' formating for CORS Middleware Access-Control-Expose-Headers header value to HTTP/1.1 & HTTP/2 spec-compliant format (#436) --- CHANGES.md | 3 ++- src/middleware/cors.rs | 26 +++++++++++++++++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f7e663d63..d0488c558 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,7 +15,6 @@ * Allow TestServer to open a websocket on any URL (TestServer::ws_at()) #433 - ### Fixed * Fixed failure 0.1.2 compatibility @@ -26,6 +25,8 @@ * HttpRequest::url_for is not working with scopes #429 +* Fixed headers' formating for CORS Middleware `Access-Control-Expose-Headers` header value to HTTP/1.1 & HTTP/2 spec-compliant format #436 + ## [0.7.2] - 2018-07-26 diff --git a/src/middleware/cors.rs b/src/middleware/cors.rs index 052e4da23..a61727409 100644 --- a/src/middleware/cors.rs +++ b/src/middleware/cors.rs @@ -838,10 +838,9 @@ impl CorsBuilder { if !self.expose_hdrs.is_empty() { cors.expose_hdrs = Some( - self.expose_hdrs - .iter() - .fold(String::new(), |s, v| s + v.as_str())[1..] - .to_owned(), + self.expose_hdrs.iter() + .fold(String::new(), |s, v| format!("{}, {}", s, v.as_str()))[2..] + .to_owned() ); } Cors { @@ -1073,12 +1072,14 @@ mod tests { #[test] fn test_response() { + let exposed_headers = vec![header::AUTHORIZATION, header::ACCEPT]; let cors = Cors::build() .send_wildcard() .disable_preflight() .max_age(3600) .allowed_methods(vec![Method::GET, Method::OPTIONS, Method::POST]) - .allowed_headers(vec![header::AUTHORIZATION, header::ACCEPT]) + .allowed_headers(exposed_headers.clone()) + .expose_headers(exposed_headers.clone()) .allowed_header(header::CONTENT_TYPE) .finish(); @@ -1100,6 +1101,21 @@ mod tests { resp.headers().get(header::VARY).unwrap().as_bytes() ); + { + let headers = resp.headers() + .get(header::ACCESS_CONTROL_EXPOSE_HEADERS) + .unwrap() + .to_str() + .unwrap() + .split(',') + .map(|s| s.trim()) + .collect::>(); + + for h in exposed_headers { + assert!(headers.contains(&h.as_str())); + } + } + let resp: HttpResponse = HttpResponse::Ok().header(header::VARY, "Accept").finish(); let resp = cors.response(&req, resp).unwrap().response(); From e61ef7dee4a4e1017372783594b6f6bda6dc6f5c Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Fri, 3 Aug 2018 14:56:26 +0200 Subject: [PATCH 029/219] Use zlib instead of deflate for content encoding (#442) --- CHANGES.md | 3 +++ src/client/writer.rs | 6 +++--- src/server/input.rs | 6 +++--- src/server/output.rs | 8 ++++---- tests/test_server.rs | 12 ++++++------ 5 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d0488c558..478b8e0e5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,9 @@ * Added `HttpServer::max_connections()` and `HttpServer::max_sslrate()`, accept backpressure #250 +* Fix: Use zlib instead of raw deflate for decoding and encoding payloads with + `Content-Encoding: deflate`. + ## [0.7.3] - 2018-08-01 diff --git a/src/client/writer.rs b/src/client/writer.rs index b691407dd..81ad96510 100644 --- a/src/client/writer.rs +++ b/src/client/writer.rs @@ -8,7 +8,7 @@ use std::io::{self, Write}; use brotli2::write::BrotliEncoder; use bytes::{BufMut, BytesMut}; #[cfg(feature = "flate2")] -use flate2::write::{DeflateEncoder, GzEncoder}; +use flate2::write::{GzEncoder, ZlibEncoder}; #[cfg(feature = "flate2")] use flate2::Compression; use futures::{Async, Poll}; @@ -232,7 +232,7 @@ fn content_encoder(buf: BytesMut, req: &mut ClientRequest) -> Output { let mut enc = match encoding { #[cfg(feature = "flate2")] ContentEncoding::Deflate => ContentEncoder::Deflate( - DeflateEncoder::new(transfer, Compression::default()), + ZlibEncoder::new(transfer, Compression::default()), ), #[cfg(feature = "flate2")] ContentEncoding::Gzip => ContentEncoder::Gzip(GzEncoder::new( @@ -302,7 +302,7 @@ fn content_encoder(buf: BytesMut, req: &mut ClientRequest) -> Output { req.replace_body(body); let enc = match encoding { #[cfg(feature = "flate2")] - ContentEncoding::Deflate => ContentEncoder::Deflate(DeflateEncoder::new( + ContentEncoding::Deflate => ContentEncoder::Deflate(ZlibEncoder::new( transfer, Compression::default(), )), diff --git a/src/server/input.rs b/src/server/input.rs index fe62e760a..d23d1e991 100644 --- a/src/server/input.rs +++ b/src/server/input.rs @@ -5,7 +5,7 @@ use brotli2::write::BrotliDecoder; use bytes::{Bytes, BytesMut}; use error::PayloadError; #[cfg(feature = "flate2")] -use flate2::write::{DeflateDecoder, GzDecoder}; +use flate2::write::{GzDecoder, ZlibDecoder}; use header::ContentEncoding; use http::header::{HeaderMap, CONTENT_ENCODING}; use payload::{PayloadSender, PayloadStatus, PayloadWriter}; @@ -139,7 +139,7 @@ impl PayloadWriter for EncodedPayload { pub(crate) enum Decoder { #[cfg(feature = "flate2")] - Deflate(Box>), + Deflate(Box>), #[cfg(feature = "flate2")] Gzip(Box>), #[cfg(feature = "brotli")] @@ -186,7 +186,7 @@ impl PayloadStream { } #[cfg(feature = "flate2")] ContentEncoding::Deflate => { - Decoder::Deflate(Box::new(DeflateDecoder::new(Writer::new()))) + Decoder::Deflate(Box::new(ZlibDecoder::new(Writer::new()))) } #[cfg(feature = "flate2")] ContentEncoding::Gzip => { diff --git a/src/server/output.rs b/src/server/output.rs index 597faf342..970e03d8d 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -7,7 +7,7 @@ use std::{cmp, fmt, io, mem}; use brotli2::write::BrotliEncoder; use bytes::BytesMut; #[cfg(feature = "flate2")] -use flate2::write::{DeflateEncoder, GzEncoder}; +use flate2::write::{GzEncoder, ZlibEncoder}; #[cfg(feature = "flate2")] use flate2::Compression; use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH}; @@ -210,7 +210,7 @@ impl Output { let mut enc = match encoding { #[cfg(feature = "flate2")] ContentEncoding::Deflate => ContentEncoder::Deflate( - DeflateEncoder::new(transfer, Compression::fast()), + ZlibEncoder::new(transfer, Compression::fast()), ), #[cfg(feature = "flate2")] ContentEncoding::Gzip => ContentEncoder::Gzip( @@ -273,7 +273,7 @@ impl Output { let enc = match encoding { #[cfg(feature = "flate2")] - ContentEncoding::Deflate => ContentEncoder::Deflate(DeflateEncoder::new( + ContentEncoding::Deflate => ContentEncoder::Deflate(ZlibEncoder::new( transfer, Compression::fast(), )), @@ -354,7 +354,7 @@ impl Output { pub(crate) enum ContentEncoder { #[cfg(feature = "flate2")] - Deflate(DeflateEncoder), + Deflate(ZlibEncoder), #[cfg(feature = "flate2")] Gzip(GzEncoder), #[cfg(feature = "brotli")] diff --git a/tests/test_server.rs b/tests/test_server.rs index 842d685f0..4db73a3be 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -20,7 +20,7 @@ use std::{net, thread, time}; use brotli2::write::{BrotliDecoder, BrotliEncoder}; use bytes::{Bytes, BytesMut}; use flate2::read::GzDecoder; -use flate2::write::{DeflateDecoder, DeflateEncoder, GzEncoder}; +use flate2::write::{GzEncoder, ZlibDecoder, ZlibEncoder}; use flate2::Compression; use futures::stream::once; use futures::{Future, Stream}; @@ -528,7 +528,7 @@ fn test_body_chunked_explicit() { #[test] fn test_body_identity() { - let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); + let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(STR.as_ref()).unwrap(); let enc = e.finish().unwrap(); let enc2 = enc.clone(); @@ -578,7 +578,7 @@ fn test_body_deflate() { let bytes = srv.execute(response.body()).unwrap(); // decode deflate - let mut e = DeflateDecoder::new(Vec::new()); + let mut e = ZlibDecoder::new(Vec::new()); e.write_all(bytes.as_ref()).unwrap(); let dec = e.finish().unwrap(); assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref())); @@ -727,7 +727,7 @@ fn test_reading_deflate_encoding() { }) }); - let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); + let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(STR.as_ref()).unwrap(); let enc = e.finish().unwrap(); @@ -760,7 +760,7 @@ fn test_reading_deflate_encoding_large() { }) }); - let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); + let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(data.as_ref()).unwrap(); let enc = e.finish().unwrap(); @@ -797,7 +797,7 @@ fn test_reading_deflate_encoding_large_random() { }) }); - let mut e = DeflateEncoder::new(Vec::new(), Compression::default()); + let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(data.as_ref()).unwrap(); let enc = e.finish().unwrap(); From 036cf5e867a997f059784837712c1d1d05b84fbe Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 3 Aug 2018 08:20:59 -0700 Subject: [PATCH 030/219] update changes --- CHANGES.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 478b8e0e5..c6e4a9436 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,11 +4,16 @@ ### Added -* Added `HttpServer::max_connections()` and `HttpServer::max_sslrate()`, accept backpressure #250 +* Added `HttpServer::max_connections()` and `HttpServer::max_sslrate()`, + accept backpressure #250 -* Fix: Use zlib instead of raw deflate for decoding and encoding payloads with +### Fixed + +* Use zlib instead of raw deflate for decoding and encoding payloads with `Content-Encoding: deflate`. +* Fixed headers formating for CORS Middleware Access-Control-Expose-Headers #436 + ## [0.7.3] - 2018-08-01 From f3f1e04853dbaf1ff7f014ff50319fc822e20240 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 3 Aug 2018 16:09:46 -0700 Subject: [PATCH 031/219] refactor ssl support --- CHANGES.md | 5 +- src/server/accept.rs | 60 ++-- src/server/h1.rs | 2 +- src/server/h2.rs | 2 +- src/server/mod.rs | 129 +++----- src/server/settings.rs | 24 +- src/server/srv.rs | 379 ++++++++-------------- src/server/ssl/mod.rs | 14 + src/server/ssl/nativetls.rs | 67 ++++ src/server/ssl/openssl.rs | 96 ++++++ src/server/ssl/rustls.rs | 92 ++++++ src/server/worker.rs | 622 ++++++++++++++++++++++-------------- 12 files changed, 879 insertions(+), 613 deletions(-) create mode 100644 src/server/ssl/mod.rs create mode 100644 src/server/ssl/nativetls.rs create mode 100644 src/server/ssl/openssl.rs create mode 100644 src/server/ssl/rustls.rs diff --git a/CHANGES.md b/CHANGES.md index c6e4a9436..4d1610c09 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,9 +4,12 @@ ### Added -* Added `HttpServer::max_connections()` and `HttpServer::max_sslrate()`, +* Added `HttpServer::maxconn()` and `HttpServer::maxconnrate()`, accept backpressure #250 +* Allow to customize connection handshake process via `HttpServer::listen_with()` + and `HttpServer::bind_with()` methods + ### Fixed * Use zlib instead of raw deflate for decoding and encoding payloads with diff --git a/src/server/accept.rs b/src/server/accept.rs index f846e4a40..e837852d3 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -9,8 +9,8 @@ use tokio_timer::Delay; use actix::{msgs::Execute, Arbiter, System}; -use super::srv::{ServerCommand, Socket}; -use super::worker::{Conn, WorkerClient}; +use super::srv::ServerCommand; +use super::worker::{Conn, Socket, Token, WorkerClient}; pub(crate) enum Command { Pause, @@ -21,7 +21,7 @@ pub(crate) enum Command { struct ServerSocketInfo { addr: net::SocketAddr, - token: usize, + token: Token, sock: mio::net::TcpListener, timeout: Option, } @@ -31,20 +31,24 @@ pub(crate) struct AcceptNotify { ready: mio::SetReadiness, maxconn: usize, maxconn_low: usize, - maxsslrate: usize, - maxsslrate_low: usize, + maxconnrate: usize, + maxconnrate_low: usize, } impl AcceptNotify { - pub fn new(ready: mio::SetReadiness, maxconn: usize, maxsslrate: usize) -> Self { + pub fn new(ready: mio::SetReadiness, maxconn: usize, maxconnrate: usize) -> Self { let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 }; - let maxsslrate_low = if maxsslrate > 10 { maxsslrate - 10 } else { 0 }; + let maxconnrate_low = if maxconnrate > 10 { + maxconnrate - 10 + } else { + 0 + }; AcceptNotify { ready, maxconn, maxconn_low, - maxsslrate, - maxsslrate_low, + maxconnrate, + maxconnrate_low, } } @@ -53,8 +57,8 @@ impl AcceptNotify { let _ = self.ready.set_readiness(mio::Ready::readable()); } } - pub fn notify_maxsslrate(&self, sslrate: usize) { - if sslrate > self.maxsslrate_low && sslrate <= self.maxsslrate { + pub fn notify_maxconnrate(&self, connrate: usize) { + if connrate > self.maxconnrate_low && connrate <= self.maxconnrate { let _ = self.ready.set_readiness(mio::Ready::readable()); } } @@ -78,7 +82,7 @@ pub(crate) struct AcceptLoop { mpsc::UnboundedReceiver, )>, maxconn: usize, - maxsslrate: usize, + maxconnrate: usize, } impl AcceptLoop { @@ -94,7 +98,7 @@ impl AcceptLoop { notify_ready, notify_reg: Some(notify_reg), maxconn: 102_400, - maxsslrate: 256, + maxconnrate: 256, rx: Some(rx), srv: Some(mpsc::unbounded()), } @@ -106,19 +110,19 @@ impl AcceptLoop { } pub fn get_notify(&self) -> AcceptNotify { - AcceptNotify::new(self.notify_ready.clone(), self.maxconn, self.maxsslrate) + AcceptNotify::new(self.notify_ready.clone(), self.maxconn, self.maxconnrate) } - pub fn max_connections(&mut self, num: usize) { + pub fn maxconn(&mut self, num: usize) { self.maxconn = num; } - pub fn max_sslrate(&mut self, num: usize) { - self.maxsslrate = num; + pub fn maxconnrate(&mut self, num: usize) { + self.maxconnrate = num; } pub(crate) fn start( - &mut self, socks: Vec<(usize, Socket)>, workers: Vec, + &mut self, socks: Vec, workers: Vec, ) -> mpsc::UnboundedReceiver { let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo"); @@ -127,7 +131,7 @@ impl AcceptLoop { self.cmd_reg.take().expect("Can not re-use AcceptInfo"), self.notify_reg.take().expect("Can not re-use AcceptInfo"), self.maxconn, - self.maxsslrate, + self.maxconnrate, socks, tx, workers, @@ -145,7 +149,7 @@ struct Accept { timer: (mio::Registration, mio::SetReadiness), next: usize, maxconn: usize, - maxsslrate: usize, + maxconnrate: usize, backpressure: bool, } @@ -171,8 +175,8 @@ impl Accept { #![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] pub(crate) fn start( rx: sync_mpsc::Receiver, cmd_reg: mio::Registration, - notify_reg: mio::Registration, maxconn: usize, maxsslrate: usize, - socks: Vec<(usize, Socket)>, srv: mpsc::UnboundedSender, + notify_reg: mio::Registration, maxconn: usize, maxconnrate: usize, + socks: Vec, srv: mpsc::UnboundedSender, workers: Vec, ) { let sys = System::current(); @@ -184,7 +188,7 @@ impl Accept { System::set_current(sys); let mut accept = Accept::new(rx, socks, workers, srv); accept.maxconn = maxconn; - accept.maxsslrate = maxsslrate; + accept.maxconnrate = maxconnrate; // Start listening for incoming commands if let Err(err) = accept.poll.register( @@ -211,7 +215,7 @@ impl Accept { } fn new( - rx: sync_mpsc::Receiver, socks: Vec<(usize, Socket)>, + rx: sync_mpsc::Receiver, socks: Vec, workers: Vec, srv: mpsc::UnboundedSender, ) -> Accept { // Create a poll instance @@ -222,7 +226,7 @@ impl Accept { // Start accept let mut sockets = Slab::new(); - for (stoken, sock) in socks { + for sock in socks { let server = mio::net::TcpListener::from_std(sock.lst) .expect("Can not create mio::net::TcpListener"); @@ -240,7 +244,7 @@ impl Accept { } entry.insert(ServerSocketInfo { - token: stoken, + token: sock.token, addr: sock.addr, sock: server, timeout: None, @@ -264,7 +268,7 @@ impl Accept { next: 0, timer: (tm, tmr), maxconn: 102_400, - maxsslrate: 256, + maxconnrate: 256, backpressure: false, } } @@ -427,7 +431,7 @@ impl Accept { let mut idx = 0; while idx < self.workers.len() { idx += 1; - if self.workers[self.next].available(self.maxconn, self.maxsslrate) { + if self.workers[self.next].available(self.maxconn, self.maxconnrate) { match self.workers[self.next].send(msg) { Ok(_) => { self.next = (self.next + 1) % self.workers.len(); diff --git a/src/server/h1.rs b/src/server/h1.rs index 9f3bda28f..085cea005 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -375,7 +375,7 @@ where self.keepalive_timer.take(); // search handler for request - for h in self.settings.handlers().iter_mut() { + for h in self.settings.handlers().iter() { msg = match h.handle(msg) { Ok(mut pipe) => { if self.tasks.is_empty() { diff --git a/src/server/h2.rs b/src/server/h2.rs index e5355a1fd..cb5367c5e 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -347,7 +347,7 @@ impl Entry { // start request processing let mut task = None; - for h in settings.handlers().iter_mut() { + for h in settings.handlers().iter() { msg = match h.handle(msg) { Ok(t) => { task = Some(t); diff --git a/src/server/mod.rs b/src/server/mod.rs index 429e293f2..55de25db4 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -1,10 +1,11 @@ //! Http server use std::net::Shutdown; -use std::{io, time}; +use std::{io, net, time}; use bytes::{BufMut, BytesMut}; -use futures::{Async, Poll}; +use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_reactor::Handle; use tokio_tcp::TcpStream; pub(crate) mod accept; @@ -21,11 +22,13 @@ pub(crate) mod message; pub(crate) mod output; pub(crate) mod settings; mod srv; +mod ssl; mod worker; pub use self::message::Request; pub use self::settings::ServerSettings; pub use self::srv::HttpServer; +pub use self::ssl::*; #[doc(hidden)] pub use self::helpers::write_content_length; @@ -72,6 +75,13 @@ where HttpServer::new(factory) } +bitflags! { + pub struct ServerFlags: u8 { + const HTTP1 = 0b0000_0001; + const HTTP2 = 0b0000_0010; + } +} + #[derive(Debug, PartialEq, Clone, Copy)] /// Server keep-alive setting pub enum KeepAlive { @@ -179,6 +189,34 @@ impl IntoHttpHandler for T { } } +pub(crate) trait IntoAsyncIo { + type Io: AsyncRead + AsyncWrite; + + fn into_async_io(self) -> Result; +} + +impl IntoAsyncIo for net::TcpStream { + type Io = TcpStream; + + fn into_async_io(self) -> Result { + TcpStream::from_std(self, &Handle::default()) + } +} + +/// Trait implemented by types that could accept incomming socket connections. +pub trait AcceptorService: Clone { + /// Established connection type + type Accepted: IoStream; + /// Future describes async accept process. + type Future: Future + 'static; + + /// Establish new connection + fn accept(&self, io: Io) -> Self::Future; + + /// Scheme + fn scheme(&self) -> &'static str; +} + #[doc(hidden)] #[derive(Debug)] pub enum WriterState { @@ -267,90 +305,3 @@ impl IoStream for TcpStream { TcpStream::set_linger(self, dur) } } - -#[cfg(feature = "alpn")] -use tokio_openssl::SslStream; - -#[cfg(feature = "alpn")] -impl IoStream for SslStream { - #[inline] - fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { - let _ = self.get_mut().shutdown(); - Ok(()) - } - - #[inline] - fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { - self.get_mut().get_mut().set_nodelay(nodelay) - } - - #[inline] - fn set_linger(&mut self, dur: Option) -> io::Result<()> { - self.get_mut().get_mut().set_linger(dur) - } -} - -#[cfg(feature = "tls")] -use tokio_tls::TlsStream; - -#[cfg(feature = "tls")] -impl IoStream for TlsStream { - #[inline] - fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { - let _ = self.get_mut().shutdown(); - Ok(()) - } - - #[inline] - fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { - self.get_mut().get_mut().set_nodelay(nodelay) - } - - #[inline] - fn set_linger(&mut self, dur: Option) -> io::Result<()> { - self.get_mut().get_mut().set_linger(dur) - } -} - -#[cfg(feature = "rust-tls")] -use rustls::{ClientSession, ServerSession}; -#[cfg(feature = "rust-tls")] -use tokio_rustls::TlsStream as RustlsStream; - -#[cfg(feature = "rust-tls")] -impl IoStream for RustlsStream { - #[inline] - fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { - let _ = ::shutdown(self); - Ok(()) - } - - #[inline] - fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { - self.get_mut().0.set_nodelay(nodelay) - } - - #[inline] - fn set_linger(&mut self, dur: Option) -> io::Result<()> { - self.get_mut().0.set_linger(dur) - } -} - -#[cfg(feature = "rust-tls")] -impl IoStream for RustlsStream { - #[inline] - fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { - let _ = ::shutdown(self); - Ok(()) - } - - #[inline] - fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { - self.get_mut().0.set_nodelay(nodelay) - } - - #[inline] - fn set_linger(&mut self, dur: Option) -> io::Result<()> { - self.get_mut().0.set_linger(dur) - } -} diff --git a/src/server/settings.rs b/src/server/settings.rs index 8e30646d9..508be67dd 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -132,7 +132,7 @@ impl ServerSettings { const DATE_VALUE_LENGTH: usize = 29; pub(crate) struct WorkerSettings { - h: RefCell>, + h: Vec, keep_alive: u64, ka_enabled: bool, bytes: Rc, @@ -140,14 +140,14 @@ pub(crate) struct WorkerSettings { channels: Arc, node: RefCell>, date: UnsafeCell, - sslrate: Arc, + connrate: Arc, notify: AcceptNotify, } impl WorkerSettings { pub(crate) fn new( h: Vec, keep_alive: KeepAlive, settings: ServerSettings, - notify: AcceptNotify, channels: Arc, sslrate: Arc, + notify: AcceptNotify, channels: Arc, connrate: Arc, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -156,7 +156,7 @@ impl WorkerSettings { }; WorkerSettings { - h: RefCell::new(h), + h, bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), node: RefCell::new(Node::head()), @@ -164,7 +164,7 @@ impl WorkerSettings { keep_alive, ka_enabled, channels, - sslrate, + connrate, notify, } } @@ -177,8 +177,8 @@ impl WorkerSettings { self.node.borrow_mut() } - pub fn handlers(&self) -> RefMut> { - self.h.borrow_mut() + pub fn handlers(&self) -> &Vec { + &self.h } pub fn keep_alive(&self) -> u64 { @@ -230,13 +230,13 @@ impl WorkerSettings { } #[allow(dead_code)] - pub(crate) fn ssl_conn_add(&self) { - self.sslrate.fetch_add(1, Ordering::Relaxed); + pub(crate) fn conn_rate_add(&self) { + self.connrate.fetch_add(1, Ordering::Relaxed); } #[allow(dead_code)] - pub(crate) fn ssl_conn_del(&self) { - let val = self.sslrate.fetch_sub(1, Ordering::Relaxed); - self.notify.notify_maxsslrate(val); + pub(crate) fn conn_rate_del(&self) { + let val = self.connrate.fetch_sub(1, Ordering::Relaxed); + self.notify.notify_maxconnrate(val); } } diff --git a/src/server/srv.rs b/src/server/srv.rs index b6bd21967..33c820aa7 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -10,15 +10,15 @@ use actix::{ use futures::sync::mpsc; use futures::{Future, Sink, Stream}; -use net2::TcpBuilder; use num_cpus; use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_tcp::TcpStream; #[cfg(feature = "tls")] use native_tls::TlsAcceptor; #[cfg(feature = "alpn")] -use openssl::ssl::{AlpnError, SslAcceptorBuilder}; +use openssl::ssl::SslAcceptorBuilder; #[cfg(feature = "rust-tls")] use rustls::ServerConfig; @@ -26,43 +26,25 @@ use rustls::ServerConfig; use super::accept::{AcceptLoop, AcceptNotify, Command}; use super::channel::{HttpChannel, WrapperStream}; use super::settings::{ServerSettings, WorkerSettings}; -use super::worker::{ - Conn, StopWorker, StreamHandlerType, Worker, WorkerClient, WorkersPool, -}; -use super::{IntoHttpHandler, IoStream, KeepAlive}; +use super::worker::{Conn, StopWorker, Token, Worker, WorkerClient, WorkerFactory}; +use super::{AcceptorService, IntoHttpHandler, IoStream, KeepAlive}; use super::{PauseServer, ResumeServer, StopServer}; -#[cfg(feature = "alpn")] -fn configure_alpn(builder: &mut SslAcceptorBuilder) -> io::Result<()> { - builder.set_alpn_protos(b"\x02h2\x08http/1.1")?; - builder.set_alpn_select_callback(|_, protos| { - const H2: &[u8] = b"\x02h2"; - if protos.windows(3).any(|window| window == H2) { - Ok(b"h2") - } else { - Err(AlpnError::NOACK) - } - }); - Ok(()) -} - /// An HTTP Server pub struct HttpServer where H: IntoHttpHandler + 'static, { - h: Option>>, threads: usize, - backlog: i32, - sockets: Vec, - pool: WorkersPool, - workers: Vec<(usize, Addr>)>, + factory: WorkerFactory, + workers: Vec<(usize, Addr)>, accept: AcceptLoop, exit: bool, shutdown_timeout: u16, signals: Option>, no_http2: bool, no_signals: bool, + settings: Option>>, } pub(crate) enum ServerCommand { @@ -76,12 +58,6 @@ where type Context = Context; } -pub(crate) struct Socket { - pub lst: net::TcpListener, - pub addr: net::SocketAddr, - pub tp: StreamHandlerType, -} - impl HttpServer where H: IntoHttpHandler + 'static, @@ -95,18 +71,16 @@ where let f = move || (factory)().into_iter().collect(); HttpServer { - h: None, threads: num_cpus::get(), - backlog: 2048, - pool: WorkersPool::new(f), + factory: WorkerFactory::new(f), workers: Vec::new(), - sockets: Vec::new(), accept: AcceptLoop::new(), exit: false, shutdown_timeout: 30, signals: None, no_http2: false, no_signals: false, + settings: None, } } @@ -130,7 +104,7 @@ where /// /// This method should be called before `bind()` method call. pub fn backlog(mut self, num: i32) -> Self { - self.backlog = num; + self.factory.backlog = num; self } @@ -140,20 +114,19 @@ where /// for each worker. /// /// By default max connections is set to a 100k. - pub fn max_connections(mut self, num: usize) -> Self { - self.accept.max_connections(num); + pub fn maxconn(mut self, num: usize) -> Self { + self.accept.maxconn(num); self } - /// Sets the maximum concurrent per-worker number of SSL handshakes. + /// Sets the maximum per-worker concurrent connection establish process. /// /// All listeners will stop accepting connections when this limit is reached. It - /// can be used to limit the global SSL CPU usage regardless of each worker - /// capacity. + /// can be used to limit the global SSL CPU usage. /// /// By default max connections is set to a 256. - pub fn max_sslrate(mut self, num: usize) -> Self { - self.accept.max_sslrate(num); + pub fn maxconnrate(mut self, num: usize) -> Self { + self.accept.maxconnrate(num); self } @@ -161,7 +134,7 @@ where /// /// By default keep alive is set to a `Os`. pub fn keep_alive>(mut self, val: T) -> Self { - self.pool.keep_alive = val.into(); + self.factory.keep_alive = val.into(); self } @@ -171,7 +144,7 @@ where /// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo. /// html#method.host) documentation for more information. pub fn server_hostname(mut self, val: String) -> Self { - self.pool.host = Some(val); + self.factory.host = Some(val); self } @@ -215,7 +188,7 @@ where /// Get addresses of bound sockets. pub fn addrs(&self) -> Vec { - self.sockets.iter().map(|s| s.addr).collect() + self.factory.addrs() } /// Get addresses of bound sockets and the scheme for it. @@ -225,10 +198,7 @@ where /// and the user should be presented with an enumeration of which /// socket requires which protocol. pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { - self.sockets - .iter() - .map(|s| (s.addr, s.tp.scheme())) - .collect() + self.factory.addrs_with_scheme() } /// Use listener for accepting incoming connection requests @@ -236,175 +206,177 @@ where /// HttpServer does not change any configuration for TcpListener, /// it needs to be configured before passing it to listen() method. pub fn listen(mut self, lst: net::TcpListener) -> Self { - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - addr, - lst, - tp: StreamHandlerType::Normal, - }); + self.factory.listen(lst); self } + /// Use listener for accepting incoming connection requests + pub fn listen_with( + mut self, lst: net::TcpListener, acceptor: A, + ) -> io::Result + where + A: AcceptorService + Send + 'static, + { + self.factory.listen_with(lst, acceptor); + Ok(self) + } + #[cfg(feature = "tls")] + #[doc(hidden)] + #[deprecated( + since = "0.7.4", + note = "please use `actix_web::HttpServer::listen_with()` and `actix_web::server::NativeTlsAcceptor` instead" + )] /// Use listener for accepting incoming tls connection requests /// /// HttpServer does not change any configuration for TcpListener, /// it needs to be configured before passing it to listen() method. - pub fn listen_tls(mut self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - addr, - lst, - tp: StreamHandlerType::Tls(acceptor.clone()), - }); - self + pub fn listen_tls( + self, lst: net::TcpListener, acceptor: TlsAcceptor, + ) -> io::Result { + use super::NativeTlsAcceptor; + + self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) } #[cfg(feature = "alpn")] + #[doc(hidden)] + #[deprecated( + since = "0.7.4", + note = "please use `actix_web::HttpServer::listen_with()` and `actix_web::server::OpensslAcceptor` instead" + )] /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" pub fn listen_ssl( - mut self, lst: net::TcpListener, mut builder: SslAcceptorBuilder, + self, lst: net::TcpListener, builder: SslAcceptorBuilder, ) -> io::Result { + use super::{OpensslAcceptor, ServerFlags}; + // alpn support - if !self.no_http2 { - configure_alpn(&mut builder)?; - } - let acceptor = builder.build(); - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - addr, - lst, - tp: StreamHandlerType::Alpn(acceptor.clone()), - }); - Ok(self) + let flags = if !self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; + + self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?) } #[cfg(feature = "rust-tls")] + #[doc(hidden)] + #[deprecated( + since = "0.7.4", + note = "please use `actix_web::HttpServer::listen_with()` and `actix_web::server::RustlsAcceptor` instead" + )] /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" pub fn listen_rustls( - mut self, lst: net::TcpListener, mut builder: ServerConfig, + self, lst: net::TcpListener, builder: ServerConfig, ) -> io::Result { + use super::{RustlsAcceptor, ServerFlags}; + // alpn support - if !self.no_http2 { - builder.set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]); - } - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - addr, - lst, - tp: StreamHandlerType::Rustls(Arc::new(builder)), - }); - Ok(self) - } - - fn bind2(&mut self, addr: S) -> io::Result> { - let mut err = None; - let mut succ = false; - let mut sockets = Vec::new(); - for addr in addr.to_socket_addrs()? { - match create_tcp_listener(addr, self.backlog) { - Ok(lst) => { - succ = true; - let addr = lst.local_addr().unwrap(); - sockets.push(Socket { - lst, - addr, - tp: StreamHandlerType::Normal, - }); - } - Err(e) => err = Some(e), - } - } - - if !succ { - if let Some(e) = err.take() { - Err(e) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "Can not bind to address.", - )) - } + let flags = if !self.no_http2 { + ServerFlags::HTTP1 } else { - Ok(sockets) - } + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; + + self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags)) } /// The socket address to bind /// /// To bind multiple addresses this method can be called multiple times. pub fn bind(mut self, addr: S) -> io::Result { - let sockets = self.bind2(addr)?; - self.sockets.extend(sockets); + self.factory.bind(addr)?; + Ok(self) + } + + /// Start listening for incoming connections with supplied acceptor. + #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] + pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result + where + S: net::ToSocketAddrs, + A: AcceptorService + Send + 'static, + { + self.factory.bind_with(addr, &acceptor)?; Ok(self) } #[cfg(feature = "tls")] + #[doc(hidden)] + #[deprecated( + since = "0.7.4", + note = "please use `actix_web::HttpServer::bind_with()` and `actix_web::server::NativeTlsAcceptor` instead" + )] /// The ssl socket address to bind /// /// To bind multiple addresses this method can be called multiple times. pub fn bind_tls( - mut self, addr: S, acceptor: TlsAcceptor, + self, addr: S, acceptor: TlsAcceptor, ) -> io::Result { - let sockets = self.bind2(addr)?; - self.sockets.extend(sockets.into_iter().map(|mut s| { - s.tp = StreamHandlerType::Tls(acceptor.clone()); - s - })); - Ok(self) + use super::NativeTlsAcceptor; + + self.bind_with(addr, NativeTlsAcceptor::new(acceptor)) } #[cfg(feature = "alpn")] + #[doc(hidden)] + #[deprecated( + since = "0.7.4", + note = "please use `actix_web::HttpServer::bind_with()` and `actix_web::server::OpensslAcceptor` instead" + )] /// Start listening for incoming tls connections. /// /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn bind_ssl( - mut self, addr: S, mut builder: SslAcceptorBuilder, - ) -> io::Result { - // alpn support - if !self.no_http2 { - configure_alpn(&mut builder)?; - } + pub fn bind_ssl(self, addr: S, builder: SslAcceptorBuilder) -> io::Result + where + S: net::ToSocketAddrs, + { + use super::{OpensslAcceptor, ServerFlags}; - let acceptor = builder.build(); - let sockets = self.bind2(addr)?; - self.sockets.extend(sockets.into_iter().map(|mut s| { - s.tp = StreamHandlerType::Alpn(acceptor.clone()); - s - })); - Ok(self) + // alpn support + let flags = if !self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; + + self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?) } #[cfg(feature = "rust-tls")] + #[doc(hidden)] + #[deprecated( + since = "0.7.4", + note = "please use `actix_web::HttpServer::bind_with()` and `actix_web::server::RustlsAcceptor` instead" + )] /// Start listening for incoming tls connections. /// /// This method sets alpn protocols to "h2" and "http/1.1" pub fn bind_rustls( - mut self, addr: S, mut builder: ServerConfig, + self, addr: S, builder: ServerConfig, ) -> io::Result { - // alpn support - if !self.no_http2 { - builder.set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]); - } + use super::{RustlsAcceptor, ServerFlags}; - let builder = Arc::new(builder); - let sockets = self.bind2(addr)?; - self.sockets.extend(sockets.into_iter().map(move |mut s| { - s.tp = StreamHandlerType::Rustls(builder.clone()); - s - })); - Ok(self) + // alpn support + let flags = if !self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; + + self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags)) } fn start_workers(&mut self, notify: &AcceptNotify) -> Vec { // start workers let mut workers = Vec::new(); for idx in 0..self.threads { - let (worker, addr) = self.pool.start(idx, notify.clone()); + let (worker, addr) = self.factory.start(idx, notify.clone()); workers.push(worker); self.workers.push((idx, addr)); } @@ -453,23 +425,18 @@ impl HttpServer { /// } /// ``` pub fn start(mut self) -> Addr { - if self.sockets.is_empty() { + let sockets = self.factory.take_sockets(); + if sockets.is_empty() { panic!("HttpServer::bind() has to be called before start()"); } else { - let mut addrs: Vec<(usize, Socket)> = Vec::new(); - - for socket in self.sockets.drain(..) { - let token = self.pool.insert(socket.addr, socket.tp.clone()); - addrs.push((token, socket)); - } let notify = self.accept.get_notify(); let workers = self.start_workers(¬ify); // start accept thread - for (_, sock) in &addrs { + for sock in &sockets { info!("Starting server on http://{}", sock.addr); } - let rx = self.accept.start(addrs, workers.clone()); + let rx = self.accept.start(sockets, workers.clone()); // start http server actor let signals = self.subscribe_to_signals(); @@ -511,64 +478,6 @@ impl HttpServer { } } -#[doc(hidden)] -#[cfg(feature = "tls")] -#[deprecated( - since = "0.6.0", - note = "please use `actix_web::HttpServer::bind_tls` instead" -)] -impl HttpServer { - /// Start listening for incoming tls connections. - pub fn start_tls(mut self, acceptor: TlsAcceptor) -> io::Result> { - for sock in &mut self.sockets { - match sock.tp { - StreamHandlerType::Normal => (), - _ => continue, - } - sock.tp = StreamHandlerType::Tls(acceptor.clone()); - } - Ok(self.start()) - } -} - -#[doc(hidden)] -#[cfg(feature = "alpn")] -#[deprecated( - since = "0.6.0", - note = "please use `actix_web::HttpServer::bind_ssl` instead" -)] -impl HttpServer { - /// Start listening for incoming tls connections. - /// - /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn start_ssl( - mut self, mut builder: SslAcceptorBuilder, - ) -> io::Result> { - // alpn support - if !self.no_http2 { - builder.set_alpn_protos(b"\x02h2\x08http/1.1")?; - builder.set_alpn_select_callback(|_, protos| { - const H2: &[u8] = b"\x02h2"; - if protos.windows(3).any(|window| window == H2) { - Ok(b"h2") - } else { - Err(AlpnError::NOACK) - } - }); - } - - let acceptor = builder.build(); - for sock in &mut self.sockets { - match sock.tp { - StreamHandlerType::Normal => (), - _ => continue, - } - sock.tp = StreamHandlerType::Alpn(acceptor.clone()); - } - Ok(self.start()) - } -} - impl HttpServer { /// Start listening for incoming connections from a stream. /// @@ -580,14 +489,14 @@ impl HttpServer { { // set server settings let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); - let settings = ServerSettings::new(Some(addr), &self.pool.host, secure); - let apps: Vec<_> = (*self.pool.factory)() + let settings = ServerSettings::new(Some(addr), &self.factory.host, secure); + let apps: Vec<_> = (*self.factory.factory)() .into_iter() .map(|h| h.into_handler()) .collect(); - self.h = Some(Rc::new(WorkerSettings::new( + self.settings = Some(Rc::new(WorkerSettings::new( apps, - self.pool.keep_alive, + self.factory.keep_alive, settings, AcceptNotify::default(), Arc::new(AtomicUsize::new(0)), @@ -599,7 +508,7 @@ impl HttpServer { let addr = HttpServer::create(move |ctx| { ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn { io: WrapperStream::new(t), - token: 0, + token: Token::new(0), peer: None, http2: false, })); @@ -672,7 +581,7 @@ impl StreamHandler for HttpServer { } let (worker, addr) = - self.pool.start(new_idx, self.accept.get_notify()); + self.factory.start(new_idx, self.accept.get_notify()); self.workers.push((new_idx, addr)); self.accept.send(Command::Worker(worker)); } @@ -690,7 +599,7 @@ where fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { Arbiter::spawn(HttpChannel::new( - Rc::clone(self.h.as_ref().unwrap()), + Rc::clone(self.settings.as_ref().unwrap()), msg.io, msg.peer, msg.http2, @@ -766,15 +675,3 @@ impl Handler for HttpServer { } } } - -fn create_tcp_listener( - addr: net::SocketAddr, backlog: i32, -) -> io::Result { - let builder = match addr { - net::SocketAddr::V4(_) => TcpBuilder::new_v4()?, - net::SocketAddr::V6(_) => TcpBuilder::new_v6()?, - }; - builder.reuse_address(true)?; - builder.bind(addr)?; - Ok(builder.listen(backlog)?) -} diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs new file mode 100644 index 000000000..d99c4a584 --- /dev/null +++ b/src/server/ssl/mod.rs @@ -0,0 +1,14 @@ +#[cfg(feature = "alpn")] +mod openssl; +#[cfg(feature = "alpn")] +pub use self::openssl::OpensslAcceptor; + +#[cfg(feature = "tls")] +mod nativetls; +#[cfg(feature = "tls")] +pub use self::nativetls::NativeTlsAcceptor; + +#[cfg(feature = "rust-tls")] +mod rustls; +#[cfg(feature = "rust-tls")] +pub use self::rustls::RustlsAcceptor; diff --git a/src/server/ssl/nativetls.rs b/src/server/ssl/nativetls.rs new file mode 100644 index 000000000..8749599e9 --- /dev/null +++ b/src/server/ssl/nativetls.rs @@ -0,0 +1,67 @@ +use std::net::Shutdown; +use std::{io, time}; + +use futures::{Future, Poll}; +use native_tls::TlsAcceptor; +use tokio_tls::{AcceptAsync, TlsAcceptorExt, TlsStream}; + +use server::{AcceptorService, IoStream}; + +#[derive(Clone)] +/// Support `SSL` connections via native-tls package +/// +/// `tls` feature enables `NativeTlsAcceptor` type +pub struct NativeTlsAcceptor { + acceptor: TlsAcceptor, +} + +impl NativeTlsAcceptor { + /// Create `NativeTlsAcceptor` instance + pub fn new(acceptor: TlsAcceptor) -> Self { + NativeTlsAcceptor { acceptor } + } +} + +pub struct AcceptorFut(AcceptAsync); + +impl Future for AcceptorFut { + type Item = TlsStream; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + self.0 + .poll() + .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + } +} + +impl AcceptorService for NativeTlsAcceptor { + type Accepted = TlsStream; + type Future = AcceptorFut; + + fn scheme(&self) -> &'static str { + "https" + } + + fn accept(&self, io: Io) -> Self::Future { + AcceptorFut(TlsAcceptorExt::accept_async(&self.acceptor, io)) + } +} + +impl IoStream for TlsStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = self.get_mut().shutdown(); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().get_mut().set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().get_mut().set_linger(dur) + } +} diff --git a/src/server/ssl/openssl.rs b/src/server/ssl/openssl.rs new file mode 100644 index 000000000..996c510dc --- /dev/null +++ b/src/server/ssl/openssl.rs @@ -0,0 +1,96 @@ +use std::net::Shutdown; +use std::{io, time}; + +use futures::{Future, Poll}; +use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder}; +use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream}; + +use server::{AcceptorService, IoStream, ServerFlags}; + +#[derive(Clone)] +/// Support `SSL` connections via openssl package +/// +/// `alpn` feature enables `OpensslAcceptor` type +pub struct OpensslAcceptor { + acceptor: SslAcceptor, +} + +impl OpensslAcceptor { + /// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. + pub fn new(builder: SslAcceptorBuilder) -> io::Result { + OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2) + } + + /// Create `OpensslAcceptor` with custom server flags. + pub fn with_flags( + mut builder: SslAcceptorBuilder, flags: ServerFlags, + ) -> io::Result { + let mut protos = Vec::new(); + if flags.contains(ServerFlags::HTTP1) { + protos.extend(b"\x08http/1.1"); + } + if flags.contains(ServerFlags::HTTP2) { + protos.extend(b"\x02h2"); + builder.set_alpn_select_callback(|_, protos| { + const H2: &[u8] = b"\x02h2"; + if protos.windows(3).any(|window| window == H2) { + Ok(b"h2") + } else { + Err(AlpnError::NOACK) + } + }); + } + + if !protos.is_empty() { + builder.set_alpn_protos(&protos)?; + } + + Ok(OpensslAcceptor { + acceptor: builder.build(), + }) + } +} + +pub struct AcceptorFut(AcceptAsync); + +impl Future for AcceptorFut { + type Item = SslStream; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + self.0 + .poll() + .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + } +} + +impl AcceptorService for OpensslAcceptor { + type Accepted = SslStream; + type Future = AcceptorFut; + + fn scheme(&self) -> &'static str { + "https" + } + + fn accept(&self, io: Io) -> Self::Future { + AcceptorFut(SslAcceptorExt::accept_async(&self.acceptor, io)) + } +} + +impl IoStream for SslStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = self.get_mut().shutdown(); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().get_mut().set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().get_mut().set_linger(dur) + } +} diff --git a/src/server/ssl/rustls.rs b/src/server/ssl/rustls.rs new file mode 100644 index 000000000..45cb61be7 --- /dev/null +++ b/src/server/ssl/rustls.rs @@ -0,0 +1,92 @@ +use std::net::Shutdown; +use std::sync::Arc; +use std::{io, time}; + +use rustls::{ClientSession, ServerConfig, ServerSession}; +use tokio_io::AsyncWrite; +use tokio_rustls::{AcceptAsync, ServerConfigExt, TlsStream}; + +use server::{AcceptorService, IoStream, ServerFlags}; + +#[derive(Clone)] +/// Support `SSL` connections via rustls package +/// +/// `rust-tls` feature enables `RustlsAcceptor` type +pub struct RustlsAcceptor { + config: Arc, +} + +impl RustlsAcceptor { + /// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. + pub fn new(config: ServerConfig) -> Self { + RustlsAcceptor::with_flags(config, ServerFlags::HTTP1 | ServerFlags::HTTP2) + } + + /// Create `OpensslAcceptor` with custom server flags. + pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self { + let mut protos = Vec::new(); + if flags.contains(ServerFlags::HTTP1) { + protos.push("http/1.1".to_string()); + } + if flags.contains(ServerFlags::HTTP2) { + protos.push("h2".to_string()); + } + + if !protos.is_empty() { + config.set_protocols(&protos); + } + + RustlsAcceptor { + config: Arc::new(config), + } + } +} + +impl AcceptorService for RustlsAcceptor { + type Accepted = TlsStream; + type Future = AcceptAsync; + + fn scheme(&self) -> &'static str { + "https" + } + + fn accept(&self, io: Io) -> Self::Future { + ServerConfigExt::accept_async(&self.config, io) + } +} + +impl IoStream for TlsStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = ::shutdown(self); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().0.set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().0.set_linger(dur) + } +} + +impl IoStream for TlsStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = ::shutdown(self); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().0.set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().0.set_linger(dur) + } +} diff --git a/src/server/worker.rs b/src/server/worker.rs index e9bf42250..3b8f426db 100644 --- a/src/server/worker.rs +++ b/src/server/worker.rs @@ -1,102 +1,195 @@ +use std::marker::PhantomData; use std::rc::Rc; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; -use std::{net, time}; +use std::{io, mem, net, time}; use futures::sync::mpsc::{unbounded, SendError, UnboundedSender}; use futures::sync::oneshot; use futures::Future; -use net2::TcpStreamExt; -use slab::Slab; +use net2::{TcpBuilder, TcpStreamExt}; use tokio::executor::current_thread; -use tokio_reactor::Handle; use tokio_tcp::TcpStream; -#[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))] -use futures::future; - -#[cfg(feature = "tls")] -use native_tls::TlsAcceptor; -#[cfg(feature = "tls")] -use tokio_tls::TlsAcceptorExt; - -#[cfg(feature = "alpn")] -use openssl::ssl::SslAcceptor; -#[cfg(feature = "alpn")] -use tokio_openssl::SslAcceptorExt; - -#[cfg(feature = "rust-tls")] -use rustls::{ServerConfig, Session}; -#[cfg(feature = "rust-tls")] -use tokio_rustls::ServerConfigExt; - use actix::msgs::StopArbiter; use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, Message, Response}; use super::accept::AcceptNotify; use super::channel::HttpChannel; use super::settings::{ServerSettings, WorkerSettings}; -use super::{HttpHandler, IntoHttpHandler, KeepAlive}; +use super::{ + AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive, +}; #[derive(Message)] pub(crate) struct Conn { pub io: T, - pub token: usize, + pub token: Token, pub peer: Option, pub http2: bool, } -#[derive(Clone)] -pub(crate) struct SocketInfo { - pub addr: net::SocketAddr, - pub htype: StreamHandlerType, +#[derive(Clone, Copy)] +pub struct Token(usize); + +impl Token { + pub(crate) fn new(val: usize) -> Token { + Token(val) + } } -pub(crate) struct WorkersPool { - sockets: Slab, +pub(crate) struct Socket { + pub lst: net::TcpListener, + pub addr: net::SocketAddr, + pub token: Token, +} + +pub(crate) struct WorkerFactory { pub factory: Arc Vec + Send + Sync>, pub host: Option, pub keep_alive: KeepAlive, + pub backlog: i32, + sockets: Vec, + handlers: Vec>>, } -impl WorkersPool { +impl WorkerFactory { pub fn new(factory: F) -> Self where F: Fn() -> Vec + Send + Sync + 'static, { - WorkersPool { + WorkerFactory { factory: Arc::new(factory), host: None, + backlog: 2048, keep_alive: KeepAlive::Os, - sockets: Slab::new(), + sockets: Vec::new(), + handlers: Vec::new(), } } - pub fn insert(&mut self, addr: net::SocketAddr, htype: StreamHandlerType) -> usize { - let entry = self.sockets.vacant_entry(); - let token = entry.key(); - entry.insert(SocketInfo { addr, htype }); - token + pub fn addrs(&self) -> Vec { + self.sockets.iter().map(|s| s.addr).collect() + } + + pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { + self.handlers + .iter() + .map(|s| (s.addr(), s.scheme())) + .collect() + } + + pub fn take_sockets(&mut self) -> Vec { + mem::replace(&mut self.sockets, Vec::new()) + } + + pub fn listen(&mut self, lst: net::TcpListener) { + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers + .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); + self.sockets.push(Socket { lst, addr, token }) + } + + pub fn listen_with(&mut self, lst: net::TcpListener, acceptor: A) + where + A: AcceptorService + Send + 'static, + { + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers.push(Box::new(StreamHandler::new( + lst.local_addr().unwrap(), + acceptor, + ))); + self.sockets.push(Socket { lst, addr, token }) + } + + pub fn bind(&mut self, addr: S) -> io::Result<()> + where + S: net::ToSocketAddrs, + { + let sockets = self.bind2(addr)?; + + for lst in sockets { + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers + .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); + self.sockets.push(Socket { lst, addr, token }) + } + Ok(()) + } + + pub fn bind_with(&mut self, addr: S, acceptor: &A) -> io::Result<()> + where + S: net::ToSocketAddrs, + A: AcceptorService + Send + 'static, + { + let sockets = self.bind2(addr)?; + + for lst in sockets { + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers.push(Box::new(StreamHandler::new( + lst.local_addr().unwrap(), + acceptor.clone(), + ))); + self.sockets.push(Socket { lst, addr, token }) + } + Ok(()) + } + + fn bind2( + &self, addr: S, + ) -> io::Result> { + let mut err = None; + let mut succ = false; + let mut sockets = Vec::new(); + for addr in addr.to_socket_addrs()? { + match create_tcp_listener(addr, self.backlog) { + Ok(lst) => { + succ = true; + sockets.push(lst); + } + Err(e) => err = Some(e), + } + } + + if !succ { + if let Some(e) = err.take() { + Err(e) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "Can not bind to address.", + )) + } + } else { + Ok(sockets) + } } pub fn start( &mut self, idx: usize, notify: AcceptNotify, - ) -> (WorkerClient, Addr>) { + ) -> (WorkerClient, Addr) { let host = self.host.clone(); - let addr = self.sockets[0].addr; + let addr = self.handlers[0].addr(); let factory = Arc::clone(&self.factory); - let socks = self.sockets.clone(); let ka = self.keep_alive; let (tx, rx) = unbounded::>(); - let client = WorkerClient::new(idx, tx, self.sockets.clone()); + let client = WorkerClient::new(idx, tx); let conn = client.conn.clone(); let sslrate = client.sslrate.clone(); + let handlers: Vec<_> = self.handlers.iter().map(|v| v.clone()).collect(); let addr = Arbiter::start(move |ctx: &mut Context<_>| { let s = ServerSettings::new(Some(addr), &host, false); let apps: Vec<_> = (*factory)().into_iter().map(|h| h.into_handler()).collect(); ctx.add_message_stream(rx); - Worker::new(apps, socks, ka, s, conn, sslrate, notify) + let inner = WorkerInner::new(apps, handlers, ka, s, conn, sslrate, notify); + Worker { + inner: Box::new(inner), + } }); (client, addr) @@ -107,19 +200,15 @@ impl WorkersPool { pub(crate) struct WorkerClient { pub idx: usize, tx: UnboundedSender>, - info: Slab, pub conn: Arc, pub sslrate: Arc, } impl WorkerClient { - fn new( - idx: usize, tx: UnboundedSender>, info: Slab, - ) -> Self { + fn new(idx: usize, tx: UnboundedSender>) -> Self { WorkerClient { idx, tx, - info, conn: Arc::new(AtomicUsize::new(0)), sslrate: Arc::new(AtomicUsize::new(0)), } @@ -154,47 +243,30 @@ impl Message for StopWorker { /// /// Worker accepts Socket objects via unbounded channel and start requests /// processing. -pub(crate) struct Worker -where - H: HttpHandler + 'static, -{ - settings: Rc>, - socks: Slab, - tcp_ka: Option, +pub(crate) struct Worker { + inner: Box, } -impl Worker { - pub(crate) fn new( - h: Vec, socks: Slab, keep_alive: KeepAlive, - settings: ServerSettings, conn: Arc, sslrate: Arc, - notify: AcceptNotify, - ) -> Worker { - let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { - Some(time::Duration::new(val as u64, 0)) - } else { - None - }; +impl Actor for Worker { + type Context = Context; - Worker { - settings: Rc::new(WorkerSettings::new( - h, keep_alive, settings, notify, conn, sslrate, - )), - socks, - tcp_ka, - } + fn started(&mut self, ctx: &mut Self::Context) { + self.update_date(ctx); } +} - fn update_time(&self, ctx: &mut Context) { - self.settings.update_date(); - ctx.run_later(time::Duration::new(1, 0), |slf, ctx| slf.update_time(ctx)); +impl Worker { + fn update_date(&self, ctx: &mut Context) { + self.inner.update_date(); + ctx.run_later(time::Duration::new(1, 0), |slf, ctx| slf.update_date(ctx)); } fn shutdown_timeout( - &self, ctx: &mut Context, tx: oneshot::Sender, dur: time::Duration, + &self, ctx: &mut Context, tx: oneshot::Sender, dur: time::Duration, ) { // sleep for 1 second and then check again ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| { - let num = slf.settings.num_channels(); + let num = slf.inner.num_channels(); if num == 0 { let _ = tx.send(true); Arbiter::current().do_send(StopArbiter(0)); @@ -202,7 +274,7 @@ impl Worker { slf.shutdown_timeout(ctx, tx, d); } else { info!("Force shutdown http worker, {} connections", num); - slf.settings.head().traverse::(); + slf.inner.force_shutdown(); let _ = tx.send(false); Arbiter::current().do_send(StopArbiter(0)); } @@ -210,44 +282,20 @@ impl Worker { } } -impl Actor for Worker -where - H: HttpHandler + 'static, -{ - type Context = Context; - - fn started(&mut self, ctx: &mut Self::Context) { - self.update_time(ctx); - } -} - -impl Handler> for Worker -where - H: HttpHandler + 'static, -{ +impl Handler> for Worker { type Result = (); fn handle(&mut self, msg: Conn, _: &mut Context) { - if self.tcp_ka.is_some() && msg.io.set_keepalive(self.tcp_ka).is_err() { - error!("Can not set socket keep-alive option"); - } - self.socks - .get_mut(msg.token) - .unwrap() - .htype - .handle(Rc::clone(&self.settings), msg); + self.inner.handle_connect(msg) } } /// `StopWorker` message handler -impl Handler for Worker -where - H: HttpHandler + 'static, -{ +impl Handler for Worker { type Result = Response; fn handle(&mut self, msg: StopWorker, ctx: &mut Context) -> Self::Result { - let num = self.settings.num_channels(); + let num = self.inner.num_channels(); if num == 0 { info!("Shutting down http worker, 0 connections"); Response::reply(Ok(true)) @@ -258,148 +306,242 @@ where Response::async(rx.map_err(|_| ())) } else { info!("Force shutdown http worker, {} connections", num); - self.settings.head().traverse::(); + self.inner.force_shutdown(); Response::reply(Ok(false)) } } } -#[derive(Clone)] -pub(crate) enum StreamHandlerType { - Normal, - #[cfg(feature = "tls")] - Tls(TlsAcceptor), - #[cfg(feature = "alpn")] - Alpn(SslAcceptor), - #[cfg(feature = "rust-tls")] - Rustls(Arc), +trait WorkerHandler { + fn update_date(&self); + + fn handle_connect(&mut self, Conn); + + fn force_shutdown(&self); + + fn num_channels(&self) -> usize; } -impl StreamHandlerType { - pub fn is_ssl(&self) -> bool { - match *self { - StreamHandlerType::Normal => false, - #[cfg(feature = "tls")] - StreamHandlerType::Tls(_) => true, - #[cfg(feature = "alpn")] - StreamHandlerType::Alpn(_) => true, - #[cfg(feature = "rust-tls")] - StreamHandlerType::Rustls(_) => true, - } - } +struct WorkerInner +where + H: HttpHandler + 'static, +{ + settings: Rc>, + socks: Vec>>, + tcp_ka: Option, +} - fn handle( - &mut self, h: Rc>, msg: Conn, - ) { - match *self { - StreamHandlerType::Normal => { - let _ = msg.io.set_nodelay(true); - let io = TcpStream::from_std(msg.io, &Handle::default()) - .expect("failed to associate TCP stream"); +impl WorkerInner { + pub(crate) fn new( + h: Vec, socks: Vec>>, + keep_alive: KeepAlive, settings: ServerSettings, conn: Arc, + sslrate: Arc, notify: AcceptNotify, + ) -> WorkerInner { + let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { + Some(time::Duration::new(val as u64, 0)) + } else { + None + }; - current_thread::spawn(HttpChannel::new(h, io, msg.peer, msg.http2)); - } - #[cfg(feature = "tls")] - StreamHandlerType::Tls(ref acceptor) => { - let Conn { - io, peer, http2, .. - } = msg; - let _ = io.set_nodelay(true); - let io = TcpStream::from_std(io, &Handle::default()) - .expect("failed to associate TCP stream"); - h.ssl_conn_add(); - - current_thread::spawn(TlsAcceptorExt::accept_async(acceptor, io).then( - move |res| { - h.ssl_conn_del(); - match res { - Ok(io) => current_thread::spawn(HttpChannel::new( - h, io, peer, http2, - )), - Err(err) => { - trace!("Error during handling tls connection: {}", err) - } - }; - future::result(Ok(())) - }, - )); - } - #[cfg(feature = "alpn")] - StreamHandlerType::Alpn(ref acceptor) => { - let Conn { io, peer, .. } = msg; - let _ = io.set_nodelay(true); - let io = TcpStream::from_std(io, &Handle::default()) - .expect("failed to associate TCP stream"); - h.ssl_conn_add(); - - current_thread::spawn(SslAcceptorExt::accept_async(acceptor, io).then( - move |res| { - h.ssl_conn_del(); - match res { - Ok(io) => { - let http2 = if let Some(p) = - io.get_ref().ssl().selected_alpn_protocol() - { - p.len() == 2 && &p == b"h2" - } else { - false - }; - current_thread::spawn(HttpChannel::new( - h, io, peer, http2, - )); - } - Err(err) => { - trace!("Error during handling tls connection: {}", err) - } - }; - future::result(Ok(())) - }, - )); - } - #[cfg(feature = "rust-tls")] - StreamHandlerType::Rustls(ref acceptor) => { - let Conn { io, peer, .. } = msg; - let _ = io.set_nodelay(true); - let io = TcpStream::from_std(io, &Handle::default()) - .expect("failed to associate TCP stream"); - h.ssl_conn_add(); - - current_thread::spawn(ServerConfigExt::accept_async(acceptor, io).then( - move |res| { - h.ssl_conn_del(); - match res { - Ok(io) => { - let http2 = if let Some(p) = - io.get_ref().1.get_alpn_protocol() - { - p.len() == 2 && &p == &"h2" - } else { - false - }; - current_thread::spawn(HttpChannel::new( - h, io, peer, http2, - )); - } - Err(err) => { - trace!("Error during handling tls connection: {}", err) - } - }; - future::result(Ok(())) - }, - )); - } - } - } - - pub(crate) fn scheme(&self) -> &'static str { - match *self { - StreamHandlerType::Normal => "http", - #[cfg(feature = "tls")] - StreamHandlerType::Tls(_) => "https", - #[cfg(feature = "alpn")] - StreamHandlerType::Alpn(_) => "https", - #[cfg(feature = "rust-tls")] - StreamHandlerType::Rustls(_) => "https", + WorkerInner { + settings: Rc::new(WorkerSettings::new( + h, keep_alive, settings, notify, conn, sslrate, + )), + socks, + tcp_ka, } } } + +impl WorkerHandler for WorkerInner +where + H: HttpHandler + 'static, +{ + fn update_date(&self) { + self.settings.update_date(); + } + + fn handle_connect(&mut self, msg: Conn) { + if self.tcp_ka.is_some() && msg.io.set_keepalive(self.tcp_ka).is_err() { + error!("Can not set socket keep-alive option"); + } + self.socks[msg.token.0].handle(Rc::clone(&self.settings), msg.io, msg.peer); + } + + fn num_channels(&self) -> usize { + self.settings.num_channels() + } + + fn force_shutdown(&self) { + self.settings.head().traverse::(); + } +} + +struct SimpleHandler { + addr: net::SocketAddr, + io: PhantomData, +} + +impl Clone for SimpleHandler { + fn clone(&self) -> Self { + SimpleHandler { + addr: self.addr, + io: PhantomData, + } + } +} + +impl SimpleHandler { + fn new(addr: net::SocketAddr) -> Self { + SimpleHandler { + addr, + io: PhantomData, + } + } +} + +impl IoStreamHandler for SimpleHandler +where + H: HttpHandler, + Io: IntoAsyncIo + Send + 'static, + Io::Io: IoStream, +{ + fn addr(&self) -> net::SocketAddr { + self.addr + } + + fn clone(&self) -> Box> { + Box::new(Clone::clone(self)) + } + + fn scheme(&self) -> &'static str { + "http" + } + + fn handle(&self, h: Rc>, io: Io, peer: Option) { + let mut io = match io.into_async_io() { + Ok(io) => io, + Err(err) => { + trace!("Failed to create async io: {}", err); + return; + } + }; + let _ = io.set_nodelay(true); + + current_thread::spawn(HttpChannel::new(h, io, peer, false)); + } +} + +struct StreamHandler { + acceptor: A, + addr: net::SocketAddr, + io: PhantomData, +} + +impl> StreamHandler { + fn new(addr: net::SocketAddr, acceptor: A) -> Self { + StreamHandler { + addr, + acceptor, + io: PhantomData, + } + } +} + +impl> Clone for StreamHandler { + fn clone(&self) -> Self { + StreamHandler { + addr: self.addr, + acceptor: self.acceptor.clone(), + io: PhantomData, + } + } +} + +impl IoStreamHandler for StreamHandler +where + H: HttpHandler, + Io: IntoAsyncIo + Send + 'static, + Io::Io: IoStream, + A: AcceptorService + Send + 'static, +{ + fn addr(&self) -> net::SocketAddr { + self.addr + } + + fn clone(&self) -> Box> { + Box::new(Clone::clone(self)) + } + + fn scheme(&self) -> &'static str { + self.acceptor.scheme() + } + + fn handle(&self, h: Rc>, io: Io, peer: Option) { + let mut io = match io.into_async_io() { + Ok(io) => io, + Err(err) => { + trace!("Failed to create async io: {}", err); + return; + } + }; + let _ = io.set_nodelay(true); + + h.conn_rate_add(); + current_thread::spawn(self.acceptor.accept(io).then(move |res| { + h.conn_rate_del(); + match res { + Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer, false)), + Err(err) => trace!("Can not establish connection: {}", err), + } + Ok(()) + })) + } +} + +impl IoStreamHandler for Box> +where + H: HttpHandler, + Io: IntoAsyncIo, +{ + fn addr(&self) -> net::SocketAddr { + self.as_ref().addr() + } + + fn clone(&self) -> Box> { + self.as_ref().clone() + } + + fn scheme(&self) -> &'static str { + self.as_ref().scheme() + } + + fn handle(&self, h: Rc>, io: Io, peer: Option) { + self.as_ref().handle(h, io, peer) + } +} + +pub(crate) trait IoStreamHandler: Send +where + H: HttpHandler, +{ + fn clone(&self) -> Box>; + + fn addr(&self) -> net::SocketAddr; + + fn scheme(&self) -> &'static str; + + fn handle(&self, h: Rc>, io: Io, peer: Option); +} + +fn create_tcp_listener( + addr: net::SocketAddr, backlog: i32, +) -> io::Result { + let builder = match addr { + net::SocketAddr::V4(_) => TcpBuilder::new_v4()?, + net::SocketAddr::V6(_) => TcpBuilder::new_v6()?, + }; + builder.reuse_address(true)?; + builder.bind(addr)?; + Ok(builder.listen(backlog)?) +} From e34b5c08ba280f2a8318b2ed607309e41cb9f4d6 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 3 Aug 2018 19:24:53 -0700 Subject: [PATCH 032/219] allow to pass extra information from acceptor to application level --- src/server/h1.rs | 4 ++++ src/server/h2.rs | 11 +++++++++-- src/server/message.rs | 8 ++++++++ src/server/mod.rs | 7 +++++++ 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 085cea005..2c07f0cf4 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -368,6 +368,10 @@ where self.payload = Some(PayloadType::new(&msg.inner.headers, ps)); } + // stream extensions + msg.inner_mut().stream_extensions = + self.stream.get_mut().extensions(); + // set remote addr msg.inner_mut().addr = self.addr; diff --git a/src/server/h2.rs b/src/server/h2.rs index cb5367c5e..9f0725022 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -14,6 +14,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; use error::{Error, PayloadError}; +use extensions::Extensions; use http::{StatusCode, Version}; use payload::{Payload, PayloadStatus, PayloadWriter}; use uri::Url; @@ -22,7 +23,7 @@ use super::error::ServerError; use super::h2writer::H2Writer; use super::input::PayloadType; use super::settings::WorkerSettings; -use super::{HttpHandler, HttpHandlerTask, Writer}; +use super::{HttpHandler, HttpHandlerTask, IoStream, Writer}; bitflags! { struct Flags: u8 { @@ -42,6 +43,7 @@ where state: State>, tasks: VecDeque>, keepalive_timer: Option, + extensions: Option>, } enum State { @@ -52,12 +54,13 @@ enum State { impl Http2 where - T: AsyncRead + AsyncWrite + 'static, + T: IoStream + 'static, H: HttpHandler + 'static, { pub fn new( settings: Rc>, io: T, addr: Option, buf: Bytes, ) -> Self { + let extensions = io.extensions(); Http2 { flags: Flags::empty(), tasks: VecDeque::new(), @@ -68,6 +71,7 @@ where keepalive_timer: None, addr, settings, + extensions, } } @@ -206,6 +210,7 @@ where resp, self.addr, &self.settings, + self.extensions.clone(), )); } Ok(Async::NotReady) => { @@ -324,6 +329,7 @@ impl Entry { fn new( parts: Parts, recv: RecvStream, resp: SendResponse, addr: Option, settings: &Rc>, + extensions: Option>, ) -> Entry where H: HttpHandler + 'static, @@ -338,6 +344,7 @@ impl Entry { inner.method = parts.method; inner.version = parts.version; inner.headers = parts.headers; + inner.stream_extensions = extensions; *inner.payload.borrow_mut() = Some(payload); inner.addr = addr; } diff --git a/src/server/message.rs b/src/server/message.rs index 395d7b7c3..43f7e1425 100644 --- a/src/server/message.rs +++ b/src/server/message.rs @@ -35,6 +35,7 @@ pub(crate) struct InnerRequest { pub(crate) info: RefCell, pub(crate) payload: RefCell>, pub(crate) settings: ServerSettings, + pub(crate) stream_extensions: Option>, pool: &'static RequestPool, } @@ -82,6 +83,7 @@ impl Request { info: RefCell::new(ConnectionInfo::default()), payload: RefCell::new(None), extensions: RefCell::new(Extensions::new()), + stream_extensions: None, }), } } @@ -189,6 +191,12 @@ impl Request { } } + /// Io stream extensions + #[inline] + pub fn stream_extensions(&self) -> Option<&Extensions> { + self.inner().stream_extensions.as_ref().map(|e| e.as_ref()) + } + /// Server settings #[inline] pub fn server_settings(&self) -> &ServerSettings { diff --git a/src/server/mod.rs b/src/server/mod.rs index 55de25db4..baf004926 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -1,5 +1,6 @@ //! Http server use std::net::Shutdown; +use std::rc::Rc; use std::{io, net, time}; use bytes::{BufMut, BytesMut}; @@ -36,6 +37,7 @@ pub use self::helpers::write_content_length; use actix::Message; use body::Binary; use error::Error; +use extensions::Extensions; use header::ContentEncoding; use httpresponse::HttpResponse; @@ -287,6 +289,11 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { } } } + + /// Extra io stream extensions + fn extensions(&self) -> Option> { + None + } } impl IoStream for TcpStream { From ac9180ac465443370b6893841c1ce84497d936e3 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 3 Aug 2018 19:32:46 -0700 Subject: [PATCH 033/219] simplify channel impl --- src/server/accept.rs | 1 - src/server/channel.rs | 34 ++++++++++------------------------ src/server/srv.rs | 2 -- src/server/worker.rs | 5 ++--- 4 files changed, 12 insertions(+), 30 deletions(-) diff --git a/src/server/accept.rs b/src/server/accept.rs index e837852d3..61bc72fbe 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -470,7 +470,6 @@ impl Accept { io, token: info.token, peer: Some(addr), - http2: false, }, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return, Err(ref e) if connection_error(e) => continue, diff --git a/src/server/channel.rs b/src/server/channel.rs index b817b4160..c158f66b4 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -2,7 +2,7 @@ use std::net::{Shutdown, SocketAddr}; use std::rc::Rc; use std::{io, ptr, time}; -use bytes::{Buf, BufMut, Bytes, BytesMut}; +use bytes::{Buf, BufMut, BytesMut}; use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -38,32 +38,18 @@ where H: HttpHandler + 'static, { pub(crate) fn new( - settings: Rc>, mut io: T, peer: Option, - http2: bool, + settings: Rc>, io: T, peer: Option, ) -> HttpChannel { settings.add_channel(); - let _ = io.set_nodelay(true); - if http2 { - HttpChannel { - node: None, - proto: Some(HttpProtocol::H2(h2::Http2::new( - settings, - io, - peer, - Bytes::new(), - ))), - } - } else { - HttpChannel { - node: None, - proto: Some(HttpProtocol::Unknown( - settings, - peer, - io, - BytesMut::with_capacity(8192), - )), - } + HttpChannel { + node: None, + proto: Some(HttpProtocol::Unknown( + settings, + peer, + io, + BytesMut::with_capacity(8192), + )), } } diff --git a/src/server/srv.rs b/src/server/srv.rs index 33c820aa7..7e50e12b4 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -510,7 +510,6 @@ impl HttpServer { io: WrapperStream::new(t), token: Token::new(0), peer: None, - http2: false, })); self }); @@ -602,7 +601,6 @@ where Rc::clone(self.settings.as_ref().unwrap()), msg.io, msg.peer, - msg.http2, )); } } diff --git a/src/server/worker.rs b/src/server/worker.rs index 3b8f426db..168382e64 100644 --- a/src/server/worker.rs +++ b/src/server/worker.rs @@ -25,7 +25,6 @@ pub(crate) struct Conn { pub io: T, pub token: Token, pub peer: Option, - pub http2: bool, } #[derive(Clone, Copy)] @@ -428,7 +427,7 @@ where }; let _ = io.set_nodelay(true); - current_thread::spawn(HttpChannel::new(h, io, peer, false)); + current_thread::spawn(HttpChannel::new(h, io, peer)); } } @@ -491,7 +490,7 @@ where current_thread::spawn(self.acceptor.accept(io).then(move |res| { h.conn_rate_del(); match res { - Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer, false)), + Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer)), Err(err) => trace!("Can not establish connection: {}", err), } Ok(()) From 84b27db218549df6fdee47b89b975eaaac6a4584 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 3 Aug 2018 19:40:43 -0700 Subject: [PATCH 034/219] fix no_http2 flag --- src/server/srv.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/srv.rs b/src/server/srv.rs index 7e50e12b4..17d84998d 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -254,7 +254,7 @@ where use super::{OpensslAcceptor, ServerFlags}; // alpn support - let flags = if !self.no_http2 { + let flags = if self.no_http2 { ServerFlags::HTTP1 } else { ServerFlags::HTTP1 | ServerFlags::HTTP2 @@ -278,7 +278,7 @@ where use super::{RustlsAcceptor, ServerFlags}; // alpn support - let flags = if !self.no_http2 { + let flags = if self.no_http2 { ServerFlags::HTTP1 } else { ServerFlags::HTTP1 | ServerFlags::HTTP2 From 900fd5a98e7bd1988dd7d8a504ccc31cc0fd4354 Mon Sep 17 00:00:00 2001 From: Douman Date: Sat, 4 Aug 2018 01:34:23 +0300 Subject: [PATCH 035/219] Correct settings headers for HTTP2 Add test to verify number of Set-Cookies --- src/server/h2writer.rs | 6 +++--- tests/test_server.rs | 44 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index ff87b693e..511929fa8 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -112,7 +112,7 @@ impl Writer for H2Writer { DATE => has_date = true, _ => (), } - resp.headers_mut().insert(key, value.clone()); + resp.headers_mut().append(key, value.clone()); } // set date header @@ -151,6 +151,8 @@ impl Writer for H2Writer { .insert(CONTENT_ENCODING, HeaderValue::try_from(ce).unwrap()); } + trace!("Response: {:?}", resp); + match self .respond .send_response(resp, self.flags.contains(Flags::EOF)) @@ -159,8 +161,6 @@ impl Writer for H2Writer { Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "err")), } - trace!("Response: {:?}", msg); - let body = msg.replace_body(Body::Empty); if let Body::Binary(bytes) = body { if bytes.is_empty() { diff --git a/tests/test_server.rs b/tests/test_server.rs index 4db73a3be..5c4385680 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -931,3 +931,47 @@ fn test_application() { let response = srv.execute(request.send()).unwrap(); assert!(response.status().is_success()); } + +#[test] +fn test_server_cookies() { + use actix_web::http; + + let mut srv = test::TestServer::with_factory(|| { + App::new().resource("/", |r| r.f(|_| HttpResponse::Ok().cookie(http::CookieBuilder::new("first", "first_value").http_only(true).finish()) + .cookie(http::Cookie::new("second", "first_value")) + .cookie(http::Cookie::new("second", "second_value")) + .finish()) + ) + }); + + let first_cookie = http::CookieBuilder::new("first", "first_value").http_only(true).finish(); + let second_cookie = http::Cookie::new("second", "second_value"); + + let request = srv.get().finish().unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert!(response.status().is_success()); + + let cookies = response.cookies().expect("To have cookies"); + assert_eq!(cookies.len(), 2); + if cookies[0] == first_cookie { + assert_eq!(cookies[1], second_cookie); + } else { + assert_eq!(cookies[0], second_cookie); + assert_eq!(cookies[1], first_cookie); + } + + let first_cookie = first_cookie.to_string(); + let second_cookie = second_cookie.to_string(); + //Check that we have exactly two instances of raw cookie headers + let cookies = response.headers().get_all(http::header::SET_COOKIE) + .iter() + .map(|header| header.to_str().expect("To str").to_string()) + .collect::>(); + assert_eq!(cookies.len(), 2); + if cookies[0] == first_cookie { + assert_eq!(cookies[1], second_cookie); + } else { + assert_eq!(cookies[0], second_cookie); + assert_eq!(cookies[1], first_cookie); + } +} From 85e7548088b9cc6b7782b38ceef63b35500fbf32 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 4 Aug 2018 08:56:33 -0700 Subject: [PATCH 036/219] fix adding multiple response headers for http/2 #446 --- CHANGES.md | 2 ++ src/server/h2writer.rs | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4d1610c09..714e6b67c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,6 +17,8 @@ * Fixed headers formating for CORS Middleware Access-Control-Expose-Headers #436 +* Fix adding multiple response headers #446 + ## [0.7.3] - 2018-08-01 diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index ff87b693e..05bf45197 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -112,7 +112,7 @@ impl Writer for H2Writer { DATE => has_date = true, _ => (), } - resp.headers_mut().insert(key, value.clone()); + resp.headers_mut().append(key, value.clone()); } // set date header @@ -159,7 +159,7 @@ impl Writer for H2Writer { Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "err")), } - trace!("Response: {:?}", msg); + trace!("HttpResponse: {:?}", msg); let body = msg.replace_body(Body::Empty); if let Body::Binary(bytes) = body { From 954f1a0b0fba127b1a68131cea22cbdb22a6ec0c Mon Sep 17 00:00:00 2001 From: Erik Desjardins Date: Mon, 6 Aug 2018 03:44:08 -0400 Subject: [PATCH 037/219] impl FromRequest for () (#449) --- src/extractor.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/extractor.rs b/src/extractor.rs index 5c2c7f600..312287e0f 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -690,6 +690,12 @@ macro_rules! tuple_from_req ({$fut_type:ident, $(($n:tt, $T:ident)),+} => { } }); +impl FromRequest for () { + type Config = (); + type Result = Self; + fn from_request(_req: &HttpRequest, _cfg: &Self::Config) -> Self::Result {} +} + tuple_from_req!(TupleFromRequest1, (0, A)); tuple_from_req!(TupleFromRequest2, (0, A), (1, B)); tuple_from_req!(TupleFromRequest3, (0, A), (1, B), (2, C)); @@ -1006,5 +1012,7 @@ mod tests { assert_eq!((res.0).1, "user1"); assert_eq!((res.1).0, "name"); assert_eq!((res.1).1, "user1"); + + let () = <()>::extract(&req); } } From 9c80d3aa77a036f2b4b8ec6332d19940120f633b Mon Sep 17 00:00:00 2001 From: Douman Date: Tue, 7 Aug 2018 10:01:29 +0300 Subject: [PATCH 038/219] Write non-80 port in HOST of client's request (#451) --- CHANGES.md | 3 ++- src/client/request.rs | 9 ++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 714e6b67c..eff8d4cf7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,7 +7,7 @@ * Added `HttpServer::maxconn()` and `HttpServer::maxconnrate()`, accept backpressure #250 -* Allow to customize connection handshake process via `HttpServer::listen_with()` +* Allow to customize connection handshake process via `HttpServer::listen_with()` and `HttpServer::bind_with()` methods ### Fixed @@ -19,6 +19,7 @@ * Fix adding multiple response headers #446 +* Client includes port in HOST header when it is not default(e.g. not 80 and 443). #448 ## [0.7.3] - 2018-08-01 diff --git a/src/client/request.rs b/src/client/request.rs index 4d506c3fa..aff4ab485 100644 --- a/src/client/request.rs +++ b/src/client/request.rs @@ -629,7 +629,14 @@ impl ClientRequestBuilder { if let Some(parts) = parts(&mut self.request, &self.err) { if let Some(host) = parts.uri.host() { if !parts.headers.contains_key(header::HOST) { - match host.try_into() { + let mut wrt = BytesMut::with_capacity(host.len() + 5).writer(); + + let _ = match parts.uri.port() { + None | Some(80) | Some(443) => write!(wrt, "{}", host), + Some(port) => write!(wrt, "{}:{}", host, port), + }; + + match wrt.get_mut().take().freeze().try_into() { Ok(value) => { parts.headers.insert(header::HOST, value); } From 86a5afb5ca6eb0ad41b105e30e604a4c1ea169f7 Mon Sep 17 00:00:00 2001 From: Douman Date: Tue, 7 Aug 2018 17:33:49 +0300 Subject: [PATCH 039/219] Reserve enough space for ServerError task to write status line --- src/server/error.rs | 3 +++ src/server/helpers.rs | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/server/error.rs b/src/server/error.rs index b3c79a066..4c264bc18 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -16,6 +16,9 @@ impl HttpHandlerTask for ServerError { fn poll_io(&mut self, io: &mut Writer) -> Poll { { let bytes = io.buffer(); + //Buffer should have sufficient capacity for status line + //and extra space + bytes.reserve(helpers::STATUS_LINE_BUF_SIZE + 1); helpers::write_status_line(self.0, self.1.as_u16(), bytes); } io.set_date(); diff --git a/src/server/helpers.rs b/src/server/helpers.rs index 03bbc8310..f7e030f2d 100644 --- a/src/server/helpers.rs +++ b/src/server/helpers.rs @@ -8,8 +8,10 @@ const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\ 6061626364656667686970717273747576777879\ 8081828384858687888990919293949596979899"; +pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13; + pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) { - let mut buf: [u8; 13] = [ + let mut buf: [u8; STATUS_LINE_BUF_SIZE] = [ b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1', b' ', b' ', b' ', b' ', b' ', ]; match version { From 58a079bd10808d8d5be183b12a8c0fe74cd73bf1 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 7 Aug 2018 11:56:39 -0700 Subject: [PATCH 040/219] include content-length to error response --- src/server/error.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/server/error.rs b/src/server/error.rs index 4c264bc18..5bd0bf83b 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -16,11 +16,12 @@ impl HttpHandlerTask for ServerError { fn poll_io(&mut self, io: &mut Writer) -> Poll { { let bytes = io.buffer(); - //Buffer should have sufficient capacity for status line - //and extra space + // Buffer should have sufficient capacity for status line + // and extra space bytes.reserve(helpers::STATUS_LINE_BUF_SIZE + 1); helpers::write_status_line(self.0, self.1.as_u16(), bytes); } + io.buffer().extend_from_slice(b"\r\ncontent-length: 0\r\n"); io.set_date(); Ok(Async::Ready(true)) } From 5bd82d4f03696701eafa2abd3f2eb454d2dad62c Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 7 Aug 2018 12:00:51 -0700 Subject: [PATCH 041/219] update changes --- CHANGES.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index eff8d4cf7..7c69161df 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,6 +21,9 @@ * Client includes port in HOST header when it is not default(e.g. not 80 and 443). #448 +* Panic during access without routing being set #452 + + ## [0.7.3] - 2018-08-01 ### Added From 85acc3f8df0eefb430dda366c63421f88a2cb6eb Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 7 Aug 2018 12:49:40 -0700 Subject: [PATCH 042/219] deprecate HttpServer::no_http2(), update changes --- CHANGES.md | 11 +++++++++++ src/server/srv.rs | 5 ++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 7c69161df..b9ee04700 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,6 +23,17 @@ * Panic during access without routing being set #452 +### Deprecated + +* `HttpServer::no_http2()` is deprecated, use `OpensslAcceptor::with_flags()` or + `RustlsAcceptor::with_flags()` instead + +* `HttpServer::listen_tls()`, `HttpServer::listen_ssl()`, `HttpServer::listen_rustls()` have been + deprecated in favor of `HttpServer::listen_with()` with specific `acceptor`. + +* `HttpServer::bind_tls()`, `HttpServer::bind_ssl()`, `HttpServer::bind_rustls()` have been + deprecated in favor of `HttpServer::bind_with()` with specific `acceptor`. + ## [0.7.3] - 2018-08-01 diff --git a/src/server/srv.rs b/src/server/srv.rs index 17d84998d..c2bb6c819 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -181,6 +181,8 @@ where } /// Disable `HTTP/2` support + #[doc(hidden)] + #[deprecated(since = "0.7.4", note = "please use acceptor service with proper ServerFlags parama")] pub fn no_http2(mut self) -> Self { self.no_http2 = true; self @@ -655,6 +657,7 @@ impl Handler for HttpServer { }); } } + fut::ok(()) }), ); @@ -672,4 +675,4 @@ impl Handler for HttpServer { Response::reply(Ok(())) } } -} +} \ No newline at end of file From 57f991280cf1ee0ae4d7d78b9e1072f9d3d1eee9 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 7 Aug 2018 13:53:24 -0700 Subject: [PATCH 043/219] fix protocol order for rustls acceptor --- src/server/ssl/rustls.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/server/ssl/rustls.rs b/src/server/ssl/rustls.rs index 45cb61be7..6ad0a7b2b 100644 --- a/src/server/ssl/rustls.rs +++ b/src/server/ssl/rustls.rs @@ -25,13 +25,12 @@ impl RustlsAcceptor { /// Create `OpensslAcceptor` with custom server flags. pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self { let mut protos = Vec::new(); - if flags.contains(ServerFlags::HTTP1) { - protos.push("http/1.1".to_string()); - } if flags.contains(ServerFlags::HTTP2) { protos.push("h2".to_string()); } - + if flags.contains(ServerFlags::HTTP1) { + protos.push("http/1.1".to_string()); + } if !protos.is_empty() { config.set_protocols(&protos); } From 30769e3072e96902abdfd457e71ab3aa8b06f56a Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 7 Aug 2018 20:48:25 -0700 Subject: [PATCH 044/219] fix http/2 error handling --- CHANGES.md | 2 + src/pipeline.rs | 127 ++++++++++++++++++++++++++++------------- src/server/h2.rs | 22 +++++-- src/server/h2writer.rs | 7 +-- src/test.rs | 6 +- tests/test_ws.rs | 10 ++-- 6 files changed, 116 insertions(+), 58 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b9ee04700..bfd86a1a3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,6 +23,8 @@ * Panic during access without routing being set #452 +* Fixed http/2 error handling + ### Deprecated * `HttpServer::no_http2()` is deprecated, use `OpensslAcceptor::with_flags()` or diff --git a/src/pipeline.rs b/src/pipeline.rs index 7c277a587..ca6e974d8 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -42,13 +42,6 @@ enum PipelineState { } impl> PipelineState { - fn is_response(&self) -> bool { - match *self { - PipelineState::Response(_) => true, - _ => false, - } - } - fn poll( &mut self, info: &mut PipelineInfo, mws: &[Box>], ) -> Option> { @@ -58,7 +51,8 @@ impl> PipelineState { PipelineState::RunMiddlewares(ref mut state) => state.poll(info, mws), PipelineState::Finishing(ref mut state) => state.poll(info, mws), PipelineState::Completed(ref mut state) => state.poll(info), - PipelineState::Response(_) | PipelineState::None | PipelineState::Error => { + PipelineState::Response(ref mut state) => state.poll(info, mws), + PipelineState::None | PipelineState::Error => { None } } @@ -130,22 +124,20 @@ impl> HttpHandlerTask for Pipeline { let mut state = mem::replace(&mut self.1, PipelineState::None); loop { - if state.is_response() { - if let PipelineState::Response(st) = state { - match st.poll_io(io, &mut self.0, &self.2) { - Ok(state) => { - self.1 = state; - if let Some(error) = self.0.error.take() { - return Err(error); - } else { - return Ok(Async::Ready(self.is_done())); - } - } - Err(state) => { - self.1 = state; - return Ok(Async::NotReady); + if let PipelineState::Response(st) = state { + match st.poll_io(io, &mut self.0, &self.2) { + Ok(state) => { + self.1 = state; + if let Some(error) = self.0.error.take() { + return Err(error); + } else { + return Ok(Async::Ready(self.is_done())); } } + Err(state) => { + self.1 = state; + return Ok(Async::NotReady); + } } } match state { @@ -401,7 +393,7 @@ impl RunMiddlewares { } struct ProcessResponse { - resp: HttpResponse, + resp: Option, iostate: IOState, running: RunningState, drain: Option>, @@ -442,7 +434,7 @@ impl ProcessResponse { #[inline] fn init(resp: HttpResponse) -> PipelineState { PipelineState::Response(ProcessResponse { - resp, + resp: Some(resp), iostate: IOState::Response, running: RunningState::Running, drain: None, @@ -451,6 +443,59 @@ impl ProcessResponse { }) } + fn poll( + &mut self, info: &mut PipelineInfo, mws: &[Box>], + ) -> Option> { + println!("POLL"); + // connection is dead at this point + match mem::replace(&mut self.iostate, IOState::Done) { + IOState::Response => + Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())), + IOState::Payload(_) => + Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())), + IOState::Actor(mut ctx) => { + if info.disconnected.take().is_some() { + ctx.disconnected(); + } + loop { + match ctx.poll() { + Ok(Async::Ready(Some(vec))) => { + if vec.is_empty() { + continue; + } + for frame in vec { + match frame { + Frame::Chunk(None) => { + info.context = Some(ctx); + return Some(FinishingMiddlewares::init( + info, mws, self.resp.take().unwrap(), + )) + } + Frame::Chunk(Some(_)) => (), + Frame::Drain(fut) => {let _ = fut.send(());}, + } + } + } + Ok(Async::Ready(None)) => + return Some(FinishingMiddlewares::init( + info, mws, self.resp.take().unwrap(), + )), + Ok(Async::NotReady) => { + self.iostate = IOState::Actor(ctx); + return None; + } + Err(err) => { + info.context = Some(ctx); + info.error = Some(err); + return Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())); + } + } + } + } + IOState::Done => Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())) + } + } + fn poll_io( mut self, io: &mut Writer, info: &mut PipelineInfo, mws: &[Box>], @@ -462,24 +507,24 @@ impl ProcessResponse { let result = match mem::replace(&mut self.iostate, IOState::Done) { IOState::Response => { let encoding = - self.resp.content_encoding().unwrap_or(info.encoding); + self.resp.as_ref().unwrap().content_encoding().unwrap_or(info.encoding); let result = - match io.start(&info.req, &mut self.resp, encoding) { + match io.start(&info.req, self.resp.as_mut().unwrap(), encoding) { Ok(res) => res, Err(err) => { info.error = Some(err.into()); return Ok(FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), )); } }; - if let Some(err) = self.resp.error() { - if self.resp.status().is_server_error() { + if let Some(err) = self.resp.as_ref().unwrap().error() { + if self.resp.as_ref().unwrap().status().is_server_error() { error!( "Error occured during request handling, status: {} {}", - self.resp.status(), err + self.resp.as_ref().unwrap().status(), err ); } else { warn!( @@ -493,7 +538,7 @@ impl ProcessResponse { } // always poll stream or actor for the first time - match self.resp.replace_body(Body::Empty) { + match self.resp.as_mut().unwrap().replace_body(Body::Empty) { Body::Streaming(stream) => { self.iostate = IOState::Payload(stream); continue 'inner; @@ -512,7 +557,7 @@ impl ProcessResponse { if let Err(err) = io.write_eof() { info.error = Some(err.into()); return Ok(FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), )); } break; @@ -523,7 +568,7 @@ impl ProcessResponse { Err(err) => { info.error = Some(err.into()); return Ok(FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), )); } Ok(result) => result, @@ -536,7 +581,7 @@ impl ProcessResponse { Err(err) => { info.error = Some(err); return Ok(FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), )); } }, @@ -559,7 +604,7 @@ impl ProcessResponse { info.error = Some(err.into()); return Ok( FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), ), ); } @@ -572,7 +617,7 @@ impl ProcessResponse { info.error = Some(err.into()); return Ok( FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), ), ); } @@ -598,7 +643,7 @@ impl ProcessResponse { info.context = Some(ctx); info.error = Some(err); return Ok(FinishingMiddlewares::init( - info, mws, self.resp, + info, mws, self.resp.take().unwrap(), )); } } @@ -638,7 +683,7 @@ impl ProcessResponse { info.context = Some(ctx); } info.error = Some(err.into()); - return Ok(FinishingMiddlewares::init(info, mws, self.resp)); + return Ok(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())); } } } @@ -652,11 +697,11 @@ impl ProcessResponse { Ok(_) => (), Err(err) => { info.error = Some(err.into()); - return Ok(FinishingMiddlewares::init(info, mws, self.resp)); + return Ok(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())); } } - self.resp.set_response_size(io.written()); - Ok(FinishingMiddlewares::init(info, mws, self.resp)) + self.resp.as_mut().unwrap().set_response_size(io.written()); + Ok(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())) } _ => Err(PipelineState::Response(self)), } diff --git a/src/server/h2.rs b/src/server/h2.rs index 9f0725022..d52dc74f6 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -102,13 +102,19 @@ where loop { let mut not_ready = true; + let disconnected = self.flags.contains(Flags::DISCONNECTED); // check in-flight connections for item in &mut self.tasks { // read payload - item.poll_payload(); + if !disconnected { + item.poll_payload(); + } if !item.flags.contains(EntryFlags::EOF) { + if disconnected { + item.flags.insert(EntryFlags::EOF); + } else { let retry = item.payload.need_read() == PayloadStatus::Read; loop { match item.task.poll_io(&mut item.stream) { @@ -141,12 +147,14 @@ where } break; } - } else if !item.flags.contains(EntryFlags::FINISHED) { + } + } + + if item.flags.contains(EntryFlags::EOF) && !item.flags.contains(EntryFlags::FINISHED) { match item.task.poll_completed() { Ok(Async::NotReady) => (), Ok(Async::Ready(_)) => { - not_ready = false; - item.flags.insert(EntryFlags::FINISHED); + item.flags.insert(EntryFlags::FINISHED | EntryFlags::WRITE_DONE); } Err(err) => { item.flags.insert( @@ -161,6 +169,7 @@ where if item.flags.contains(EntryFlags::FINISHED) && !item.flags.contains(EntryFlags::WRITE_DONE) + && !disconnected { match item.stream.poll_completed(false) { Ok(Async::NotReady) => (), @@ -168,7 +177,7 @@ where not_ready = false; item.flags.insert(EntryFlags::WRITE_DONE); } - Err(_err) => { + Err(_) => { item.flags.insert(EntryFlags::ERROR); } } @@ -177,7 +186,7 @@ where // cleanup finished tasks while !self.tasks.is_empty() { - if self.tasks[0].flags.contains(EntryFlags::EOF) + if self.tasks[0].flags.contains(EntryFlags::FINISHED) && self.tasks[0].flags.contains(EntryFlags::WRITE_DONE) || self.tasks[0].flags.contains(EntryFlags::ERROR) { @@ -397,6 +406,7 @@ impl Entry { } Ok(Async::NotReady) => break, Err(err) => { + println!("POLL-PAYLOAD error: {:?}", err); self.payload.set_error(PayloadError::Http2(err)); break; } diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index 511929fa8..ce61b3ed7 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -167,7 +167,6 @@ impl Writer for H2Writer { Ok(WriterState::Done) } else { self.flags.insert(Flags::EOF); - self.written = bytes.len() as u64; self.buffer.write(bytes.as_ref())?; if let Some(ref mut stream) = self.stream { self.flags.insert(Flags::RESERVED); @@ -183,8 +182,6 @@ impl Writer for H2Writer { } fn write(&mut self, payload: &Binary) -> io::Result { - self.written = payload.len() as u64; - if !self.flags.contains(Flags::DISCONNECTED) { if self.flags.contains(Flags::STARTED) { // TODO: add warning, write after EOF @@ -253,7 +250,9 @@ impl Writer for H2Writer { return Ok(Async::Ready(())); } } - Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), + Err(e) => { + return Err(io::Error::new(io::ErrorKind::Other, e)) + } } } } diff --git a/src/test.rs b/src/test.rs index 244c079a7..70de5a16b 100644 --- a/src/test.rs +++ b/src/test.rs @@ -15,8 +15,10 @@ use tokio::runtime::current_thread::Runtime; #[cfg(feature = "alpn")] use openssl::ssl::SslAcceptorBuilder; -#[cfg(all(feature = "rust-tls"))] +#[cfg(feature = "rust-tls")] use rustls::ServerConfig; +#[cfg(feature = "rust-tls")] +use server::RustlsAcceptor; use application::{App, HttpApplication}; use body::Binary; @@ -342,7 +344,7 @@ impl TestServerBuilder { let ssl = self.rust_ssl.take(); if let Some(ssl) = ssl { let tcp = net::TcpListener::bind(addr).unwrap(); - srv = srv.listen_rustls(tcp, ssl).unwrap(); + srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl)).unwrap(); } } if !has_ssl { diff --git a/tests/test_ws.rs b/tests/test_ws.rs index aa57faf66..752e88b52 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -210,7 +210,7 @@ impl Ws2 { ctx.drain() .and_then(|_, act, ctx| { act.count += 1; - if act.count != 10_000 { + if act.count != 1_000 { act.send(ctx); } actix::fut::ok(()) @@ -248,7 +248,7 @@ fn test_server_send_text() { }); let (mut reader, _writer) = srv.ws().unwrap(); - for _ in 0..10_000 { + for _ in 0..1_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); @@ -272,7 +272,7 @@ fn test_server_send_bin() { }); let (mut reader, _writer) = srv.ws().unwrap(); - for _ in 0..10_000 { + for _ in 0..1_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); @@ -308,7 +308,7 @@ fn test_ws_server_ssl() { let (mut reader, _writer) = srv.ws().unwrap(); let data = Some(ws::Message::Text("0".repeat(65_536))); - for _ in 0..10_000 { + for _ in 0..1_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); @@ -347,7 +347,7 @@ fn test_ws_server_rust_tls() { let (mut reader, _writer) = srv.ws().unwrap(); let data = Some(ws::Message::Text("0".repeat(65_536))); - for _ in 0..10_000 { + for _ in 0..1_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); From 992f7a11b37d6dbc6a0a3634b5b1fadba573b1c9 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 7 Aug 2018 22:40:09 -0700 Subject: [PATCH 045/219] remove debug println --- src/pipeline.rs | 1 - src/server/h2.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/src/pipeline.rs b/src/pipeline.rs index ca6e974d8..09c5e49d2 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -446,7 +446,6 @@ impl ProcessResponse { fn poll( &mut self, info: &mut PipelineInfo, mws: &[Box>], ) -> Option> { - println!("POLL"); // connection is dead at this point match mem::replace(&mut self.iostate, IOState::Done) { IOState::Response => diff --git a/src/server/h2.rs b/src/server/h2.rs index d52dc74f6..0835f5920 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -406,7 +406,6 @@ impl Entry { } Ok(Async::NotReady) => break, Err(err) => { - println!("POLL-PAYLOAD error: {:?}", err); self.payload.set_error(PayloadError::Http2(err)); break; } From 8eb9eb42479a6812fa1c2f519d8dd18013f7a690 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 8 Aug 2018 09:12:32 -0700 Subject: [PATCH 046/219] flush io on complete --- src/server/h1writer.rs | 3 ++- tests/test_ws.rs | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index e8f172f40..8c948471f 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -337,9 +337,10 @@ impl Writer for H1Writer { } } if shutdown { + self.stream.poll_flush()?; self.stream.shutdown() } else { - Ok(Async::Ready(())) + self.stream.poll_flush() } } } diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 752e88b52..aa57faf66 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -210,7 +210,7 @@ impl Ws2 { ctx.drain() .and_then(|_, act, ctx| { act.count += 1; - if act.count != 1_000 { + if act.count != 10_000 { act.send(ctx); } actix::fut::ok(()) @@ -248,7 +248,7 @@ fn test_server_send_text() { }); let (mut reader, _writer) = srv.ws().unwrap(); - for _ in 0..1_000 { + for _ in 0..10_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); @@ -272,7 +272,7 @@ fn test_server_send_bin() { }); let (mut reader, _writer) = srv.ws().unwrap(); - for _ in 0..1_000 { + for _ in 0..10_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); @@ -308,7 +308,7 @@ fn test_ws_server_ssl() { let (mut reader, _writer) = srv.ws().unwrap(); let data = Some(ws::Message::Text("0".repeat(65_536))); - for _ in 0..1_000 { + for _ in 0..10_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); @@ -347,7 +347,7 @@ fn test_ws_server_rust_tls() { let (mut reader, _writer) = srv.ws().unwrap(); let data = Some(ws::Message::Text("0".repeat(65_536))); - for _ in 0..1_000 { + for _ in 0..10_000 { let (item, r) = srv.execute(reader.into_future()).unwrap(); reader = r; assert_eq!(item, data); From 7a11c2eac11458293544f9e79daa1771b437131d Mon Sep 17 00:00:00 2001 From: David McNeil Date: Wed, 8 Aug 2018 11:11:15 -0600 Subject: [PATCH 047/219] Add json2 HttpResponseBuilder method --- src/httpresponse.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/httpresponse.rs b/src/httpresponse.rs index 2673da2a3..87bd8c8b8 100644 --- a/src/httpresponse.rs +++ b/src/httpresponse.rs @@ -650,7 +650,14 @@ impl HttpResponseBuilder { /// /// `HttpResponseBuilder` can not be used after this call. pub fn json(&mut self, value: T) -> HttpResponse { - match serde_json::to_string(&value) { + self.json2(&value) + } + + /// Set a json body and generate `HttpResponse` + /// + /// `HttpResponseBuilder` can not be used after this call. + pub fn json2(&mut self, value: &T) -> HttpResponse { + match serde_json::to_string(value) { Ok(body) => { let contains = if let Some(parts) = parts(&mut self.response, &self.err) { From 7c8dc4c201c88eb480dc0428a2f3430feb404f3a Mon Sep 17 00:00:00 2001 From: David McNeil Date: Wed, 8 Aug 2018 11:58:56 -0600 Subject: [PATCH 048/219] Add json2 tests --- src/httpresponse.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/httpresponse.rs b/src/httpresponse.rs index 87bd8c8b8..7700d3523 100644 --- a/src/httpresponse.rs +++ b/src/httpresponse.rs @@ -1193,6 +1193,30 @@ mod tests { ); } + #[test] + fn test_json2() { + let resp = HttpResponse::build(StatusCode::OK).json2(&vec!["v1", "v2", "v3"]); + let ct = resp.headers().get(CONTENT_TYPE).unwrap(); + assert_eq!(ct, HeaderValue::from_static("application/json")); + assert_eq!( + *resp.body(), + Body::from(Bytes::from_static(b"[\"v1\",\"v2\",\"v3\"]")) + ); + } + + #[test] + fn test_json2_ct() { + let resp = HttpResponse::build(StatusCode::OK) + .header(CONTENT_TYPE, "text/json") + .json2(&vec!["v1", "v2", "v3"]); + let ct = resp.headers().get(CONTENT_TYPE).unwrap(); + assert_eq!(ct, HeaderValue::from_static("text/json")); + assert_eq!( + *resp.body(), + Body::from(Bytes::from_static(b"[\"v1\",\"v2\",\"v3\"]")) + ); + } + impl Body { pub(crate) fn bin_ref(&self) -> &Binary { match *self { From 542782f28a6a2c9215f63be27bd13d68a17b5523 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 8 Aug 2018 13:57:13 -0700 Subject: [PATCH 049/219] add HttpRequest::drop_state() --- src/httprequest.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/httprequest.rs b/src/httprequest.rs index 6f3bfe13e..a21d772e8 100644 --- a/src/httprequest.rs +++ b/src/httprequest.rs @@ -81,6 +81,15 @@ impl HttpRequest { } } + /// Construct new http request with empty state. + pub fn drop_state(&self) -> HttpRequest { + HttpRequest { + Rc::new(()), + req: self.req.as_ref().map(|r| r.clone()), + resource: self.resource.clone(), + } + } + #[inline] /// Construct new http request with new RouteInfo. pub(crate) fn with_route_info(&self, mut resource: ResourceInfo) -> HttpRequest { From b69774db61cef70ee08a024e1a7383dd93b6eb19 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 8 Aug 2018 14:23:16 -0700 Subject: [PATCH 050/219] fix attr name --- src/httprequest.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/httprequest.rs b/src/httprequest.rs index a21d772e8..128dcbf17 100644 --- a/src/httprequest.rs +++ b/src/httprequest.rs @@ -84,7 +84,7 @@ impl HttpRequest { /// Construct new http request with empty state. pub fn drop_state(&self) -> HttpRequest { HttpRequest { - Rc::new(()), + state: Rc::new(()), req: self.req.as_ref().map(|r| r.clone()), resource: self.resource.clone(), } From cfe4829a56bc523afc7df8ca6314a52c35d01bc2 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 8 Aug 2018 16:13:45 -0700 Subject: [PATCH 051/219] add TestRequest::execute() helper method --- src/extractor.rs | 6 ++++++ src/router.rs | 13 ------------- src/test.rs | 32 +++++++++++++++++++++++++++----- 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/src/extractor.rs b/src/extractor.rs index 312287e0f..233ad6ce5 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -101,6 +101,12 @@ impl Path { } } +impl From for Path { + fn from(inner: T) -> Path { + Path{inner} + } +} + impl FromRequest for Path where T: DeserializeOwned, diff --git a/src/router.rs b/src/router.rs index 3d112bf60..ff52eac5f 100644 --- a/src/router.rs +++ b/src/router.rs @@ -290,19 +290,6 @@ impl Router { } } - #[cfg(test)] - pub(crate) fn route_info(&self, req: &Request, prefix: u16) -> ResourceInfo { - let mut params = Params::with_url(req.url()); - params.set_tail(prefix); - - ResourceInfo { - params, - prefix: 0, - rmap: self.rmap.clone(), - resource: ResourceId::Default, - } - } - #[cfg(test)] pub(crate) fn default_route_info(&self) -> ResourceInfo { ResourceInfo { diff --git a/src/test.rs b/src/test.rs index 70de5a16b..42f511749 100644 --- a/src/test.rs +++ b/src/test.rs @@ -676,8 +676,6 @@ impl TestRequest { /// This method generates `HttpRequest` instance and runs handler /// with generated request. - /// - /// This method panics is handler returns actor or async result. pub fn run>(self, h: &H) -> Result { let req = self.finish(); let resp = h.handle(&req); @@ -686,7 +684,10 @@ impl TestRequest { Ok(resp) => match resp.into().into() { AsyncResultItem::Ok(resp) => Ok(resp), AsyncResultItem::Err(err) => Err(err), - AsyncResultItem::Future(_) => panic!("Async handler is not supported."), + AsyncResultItem::Future(fut) => { + let mut sys = System::new("test"); + sys.block_on(fut) + } }, Err(err) => Err(err.into()), } @@ -706,8 +707,8 @@ impl TestRequest { let req = self.finish(); let fut = h(req.clone()); - let mut core = Runtime::new().unwrap(); - match core.block_on(fut) { + let mut sys = System::new("test"); + match sys.block_on(fut) { Ok(r) => match r.respond_to(&req) { Ok(reply) => match reply.into().into() { AsyncResultItem::Ok(resp) => Ok(resp), @@ -718,4 +719,25 @@ impl TestRequest { Err(err) => Err(err), } } + + /// This method generates `HttpRequest` instance and executes handler + pub fn execute(self, f: F) -> Result + where F: FnOnce(&HttpRequest) -> R, + R: Responder + 'static, + { + let req = self.finish(); + let resp = f(&req); + + match resp.respond_to(&req) { + Ok(resp) => match resp.into().into() { + AsyncResultItem::Ok(resp) => Ok(resp), + AsyncResultItem::Err(err) => Err(err), + AsyncResultItem::Future(fut) => { + let mut sys = System::new("test"); + sys.block_on(fut) + } + }, + Err(err) => Err(err.into()), + } + } } From e4ce6dfbdf0c6cd25ec5c1723b6611cf456ed8f5 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 9 Aug 2018 11:52:32 -0700 Subject: [PATCH 052/219] refactor workers management --- src/server/accept.rs | 131 +++----- src/server/channel.rs | 16 +- src/server/h1.rs | 14 +- src/server/mod.rs | 14 + src/server/server.rs | 504 +++++++++++++++++++++++++++++++ src/server/settings.rs | 68 +++-- src/server/srv.rs | 664 +++++++++++++++++++++++++---------------- src/server/worker.rs | 477 +++-------------------------- src/test.rs | 13 +- 9 files changed, 1061 insertions(+), 840 deletions(-) create mode 100644 src/server/server.rs diff --git a/src/server/accept.rs b/src/server/accept.rs index 61bc72fbe..d642c40f6 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -9,8 +9,9 @@ use tokio_timer::Delay; use actix::{msgs::Execute, Arbiter, System}; -use super::srv::ServerCommand; -use super::worker::{Conn, Socket, Token, WorkerClient}; +use super::server::ServerCommand; +use super::worker::{Conn, WorkerClient}; +use super::Token; pub(crate) enum Command { Pause, @@ -22,51 +23,27 @@ pub(crate) enum Command { struct ServerSocketInfo { addr: net::SocketAddr, token: Token, + handler: Token, sock: mio::net::TcpListener, timeout: Option, } #[derive(Clone)] -pub(crate) struct AcceptNotify { - ready: mio::SetReadiness, - maxconn: usize, - maxconn_low: usize, - maxconnrate: usize, - maxconnrate_low: usize, -} +pub(crate) struct AcceptNotify(mio::SetReadiness); impl AcceptNotify { - pub fn new(ready: mio::SetReadiness, maxconn: usize, maxconnrate: usize) -> Self { - let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 }; - let maxconnrate_low = if maxconnrate > 10 { - maxconnrate - 10 - } else { - 0 - }; - AcceptNotify { - ready, - maxconn, - maxconn_low, - maxconnrate, - maxconnrate_low, - } + pub(crate) fn new(ready: mio::SetReadiness) -> Self { + AcceptNotify(ready) } - pub fn notify_maxconn(&self, maxconn: usize) { - if maxconn > self.maxconn_low && maxconn <= self.maxconn { - let _ = self.ready.set_readiness(mio::Ready::readable()); - } - } - pub fn notify_maxconnrate(&self, connrate: usize) { - if connrate > self.maxconnrate_low && connrate <= self.maxconnrate { - let _ = self.ready.set_readiness(mio::Ready::readable()); - } + pub(crate) fn notify(&self) { + let _ = self.0.set_readiness(mio::Ready::readable()); } } impl Default for AcceptNotify { fn default() -> Self { - AcceptNotify::new(mio::Registration::new2().1, 0, 0) + AcceptNotify::new(mio::Registration::new2().1) } } @@ -81,8 +58,6 @@ pub(crate) struct AcceptLoop { mpsc::UnboundedSender, mpsc::UnboundedReceiver, )>, - maxconn: usize, - maxconnrate: usize, } impl AcceptLoop { @@ -97,8 +72,6 @@ impl AcceptLoop { cmd_reg: Some(cmd_reg), notify_ready, notify_reg: Some(notify_reg), - maxconn: 102_400, - maxconnrate: 256, rx: Some(rx), srv: Some(mpsc::unbounded()), } @@ -110,19 +83,12 @@ impl AcceptLoop { } pub fn get_notify(&self) -> AcceptNotify { - AcceptNotify::new(self.notify_ready.clone(), self.maxconn, self.maxconnrate) - } - - pub fn maxconn(&mut self, num: usize) { - self.maxconn = num; - } - - pub fn maxconnrate(&mut self, num: usize) { - self.maxconnrate = num; + AcceptNotify::new(self.notify_ready.clone()) } pub(crate) fn start( - &mut self, socks: Vec, workers: Vec, + &mut self, socks: Vec>, + workers: Vec, ) -> mpsc::UnboundedReceiver { let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo"); @@ -130,8 +96,6 @@ impl AcceptLoop { self.rx.take().expect("Can not re-use AcceptInfo"), self.cmd_reg.take().expect("Can not re-use AcceptInfo"), self.notify_reg.take().expect("Can not re-use AcceptInfo"), - self.maxconn, - self.maxconnrate, socks, tx, workers, @@ -148,8 +112,6 @@ struct Accept { srv: mpsc::UnboundedSender, timer: (mio::Registration, mio::SetReadiness), next: usize, - maxconn: usize, - maxconnrate: usize, backpressure: bool, } @@ -175,9 +137,8 @@ impl Accept { #![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] pub(crate) fn start( rx: sync_mpsc::Receiver, cmd_reg: mio::Registration, - notify_reg: mio::Registration, maxconn: usize, maxconnrate: usize, - socks: Vec, srv: mpsc::UnboundedSender, - workers: Vec, + notify_reg: mio::Registration, socks: Vec>, + srv: mpsc::UnboundedSender, workers: Vec, ) { let sys = System::current(); @@ -187,8 +148,6 @@ impl Accept { .spawn(move || { System::set_current(sys); let mut accept = Accept::new(rx, socks, workers, srv); - accept.maxconn = maxconn; - accept.maxconnrate = maxconnrate; // Start listening for incoming commands if let Err(err) = accept.poll.register( @@ -215,7 +174,7 @@ impl Accept { } fn new( - rx: sync_mpsc::Receiver, socks: Vec, + rx: sync_mpsc::Receiver, socks: Vec>, workers: Vec, srv: mpsc::UnboundedSender, ) -> Accept { // Create a poll instance @@ -226,29 +185,33 @@ impl Accept { // Start accept let mut sockets = Slab::new(); - for sock in socks { - let server = mio::net::TcpListener::from_std(sock.lst) - .expect("Can not create mio::net::TcpListener"); + for (idx, srv_socks) in socks.into_iter().enumerate() { + for (hnd_token, lst) in srv_socks { + let addr = lst.local_addr().unwrap(); + let server = mio::net::TcpListener::from_std(lst) + .expect("Can not create mio::net::TcpListener"); - let entry = sockets.vacant_entry(); - let token = entry.key(); + let entry = sockets.vacant_entry(); + let token = entry.key(); - // Start listening for incoming connections - if let Err(err) = poll.register( - &server, - mio::Token(token + DELTA), - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - panic!("Can not register io: {}", err); + // Start listening for incoming connections + if let Err(err) = poll.register( + &server, + mio::Token(token + DELTA), + mio::Ready::readable(), + mio::PollOpt::edge(), + ) { + panic!("Can not register io: {}", err); + } + + entry.insert(ServerSocketInfo { + addr, + token: hnd_token, + handler: Token(idx), + sock: server, + timeout: None, + }); } - - entry.insert(ServerSocketInfo { - token: sock.token, - addr: sock.addr, - sock: server, - timeout: None, - }); } // Timer @@ -267,8 +230,6 @@ impl Accept { srv, next: 0, timer: (tm, tmr), - maxconn: 102_400, - maxconnrate: 256, backpressure: false, } } @@ -431,7 +392,7 @@ impl Accept { let mut idx = 0; while idx < self.workers.len() { idx += 1; - if self.workers[self.next].available(self.maxconn, self.maxconnrate) { + if self.workers[self.next].available() { match self.workers[self.next].send(msg) { Ok(_) => { self.next = (self.next + 1) % self.workers.len(); @@ -469,6 +430,7 @@ impl Accept { Ok((io, addr)) => Conn { io, token: info.token, + handler: info.handler, peer: Some(addr), }, Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return, @@ -489,11 +451,10 @@ impl Accept { Delay::new( Instant::now() + Duration::from_millis(510), ).map_err(|_| ()) - .and_then(move |_| { - let _ = - r.set_readiness(mio::Ready::readable()); - Ok(()) - }), + .and_then(move |_| { + let _ = r.set_readiness(mio::Ready::readable()); + Ok(()) + }), ); Ok(()) }, diff --git a/src/server/channel.rs b/src/server/channel.rs index c158f66b4..7de561c6b 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -7,7 +7,7 @@ use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; use super::settings::WorkerSettings; -use super::{h1, h2, HttpHandler, IoStream}; +use super::{h1, h2, ConnectionTag, HttpHandler, IoStream}; const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; @@ -30,6 +30,7 @@ where { proto: Option>, node: Option>>, + _tag: ConnectionTag, } impl HttpChannel @@ -40,9 +41,10 @@ where pub(crate) fn new( settings: Rc>, io: T, peer: Option, ) -> HttpChannel { - settings.add_channel(); + let _tag = settings.connection(); HttpChannel { + _tag, node: None, proto: Some(HttpProtocol::Unknown( settings, @@ -97,7 +99,6 @@ where let result = h1.poll(); match result { Ok(Async::Ready(())) | Err(_) => { - h1.settings().remove_channel(); if let Some(n) = self.node.as_mut() { n.remove() }; @@ -110,7 +111,6 @@ where let result = h2.poll(); match result { Ok(Async::Ready(())) | Err(_) => { - h2.settings().remove_channel(); if let Some(n) = self.node.as_mut() { n.remove() }; @@ -119,16 +119,10 @@ where } return result; } - Some(HttpProtocol::Unknown( - ref mut settings, - _, - ref mut io, - ref mut buf, - )) => { + Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => { match io.read_available(buf) { Ok(Async::Ready(true)) | Err(_) => { debug!("Ignored premature client disconnection"); - settings.remove_channel(); if let Some(n) = self.node.as_mut() { n.remove() }; diff --git a/src/server/h1.rs b/src/server/h1.rs index 2c07f0cf4..808dc11a1 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -468,7 +468,6 @@ where #[cfg(test)] mod tests { use std::net::Shutdown; - use std::sync::{atomic::AtomicUsize, Arc}; use std::{cmp, io, time}; use bytes::{Buf, Bytes, BytesMut}; @@ -478,20 +477,17 @@ mod tests { use super::*; use application::HttpApplication; use httpmessage::HttpMessage; - use server::accept::AcceptNotify; use server::h1decoder::Message; use server::settings::{ServerSettings, WorkerSettings}; - use server::{KeepAlive, Request}; + use server::{Connections, KeepAlive, Request}; - fn wrk_settings() -> WorkerSettings { - WorkerSettings::::new( + fn wrk_settings() -> Rc> { + Rc::new(WorkerSettings::::new( Vec::new(), KeepAlive::Os, ServerSettings::default(), - AcceptNotify::default(), - Arc::new(AtomicUsize::new(0)), - Arc::new(AtomicUsize::new(0)), - ) + Connections::default(), + )) } impl Message { diff --git a/src/server/mod.rs b/src/server/mod.rs index baf004926..f34497936 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -21,12 +21,16 @@ pub(crate) mod helpers; pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; +mod server; pub(crate) mod settings; mod srv; mod ssl; mod worker; pub use self::message::Request; +pub use self::server::{ + ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler, +}; pub use self::settings::ServerSettings; pub use self::srv::HttpServer; pub use self::ssl::*; @@ -136,6 +140,16 @@ impl Message for StopServer { type Result = Result<(), ()>; } +/// Socket id token +#[derive(Clone, Copy)] +pub struct Token(usize); + +impl Token { + pub(crate) fn new(val: usize) -> Token { + Token(val) + } +} + /// Low level http request handler #[allow(unused_variables)] pub trait HttpHandler: 'static { diff --git a/src/server/server.rs b/src/server/server.rs new file mode 100644 index 000000000..ff88040fe --- /dev/null +++ b/src/server/server.rs @@ -0,0 +1,504 @@ +use std::{mem, net}; +use std::time::Duration; +use std::sync::{Arc, atomic::{AtomicUsize, Ordering}}; + +use futures::{Future, Stream, Sink}; +use futures::sync::{mpsc, mpsc::unbounded}; + +use actix::{fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, + Context, Handler, Response, System, StreamHandler, WrapFuture}; + +use super::accept::{AcceptLoop, AcceptNotify, Command}; +use super::worker::{StopWorker, Worker, WorkerClient, Conn}; +use super::{PauseServer, ResumeServer, StopServer, Token}; + +pub trait Service: Send + 'static { + /// Clone service + fn clone(&self) -> Box; + + /// Create service handler for this service + fn create(&self, conn: Connections) -> Box; +} + +impl Service for Box { + fn clone(&self) -> Box { + self.as_ref().clone() + } + + fn create(&self, conn: Connections) -> Box { + self.as_ref().create(conn) + } +} + +pub trait ServiceHandler { + /// Handle incoming stream + fn handle(&mut self, token: Token, io: net::TcpStream, peer: Option); + + /// Shutdown open handlers + fn shutdown(&self, _: bool) {} +} + +pub(crate) enum ServerCommand { + WorkerDied(usize), +} + +pub struct Server { + threads: usize, + workers: Vec<(usize, Addr)>, + services: Vec>, + sockets: Vec>, + accept: AcceptLoop, + exit: bool, + shutdown_timeout: u16, + signals: Option>, + no_signals: bool, + maxconn: usize, + maxconnrate: usize, +} + +impl Default for Server { + fn default() -> Self { + Self::new() + } +} + +impl Server { + /// Create new Server instance + pub fn new() -> Server { + Server { + threads: num_cpus::get(), + workers: Vec::new(), + services: Vec::new(), + sockets: Vec::new(), + accept: AcceptLoop::new(), + exit: false, + shutdown_timeout: 30, + signals: None, + no_signals: false, + maxconn: 102_400, + maxconnrate: 256, + } + } + + /// Set number of workers to start. + /// + /// By default http server uses number of available logical cpu as threads + /// count. + pub fn workers(mut self, num: usize) -> Self { + self.threads = num; + self + } + + /// Sets the maximum per-worker number of concurrent connections. + /// + /// All socket listeners will stop accepting connections when this limit is reached + /// for each worker. + /// + /// By default max connections is set to a 100k. + pub fn maxconn(mut self, num: usize) -> Self { + self.maxconn = num; + self + } + + /// Sets the maximum per-worker concurrent connection establish process. + /// + /// All listeners will stop accepting connections when this limit is reached. It + /// can be used to limit the global SSL CPU usage. + /// + /// By default max connections is set to a 256. + pub fn maxconnrate(mut self, num: usize) -> Self { + self.maxconnrate= num; + self + } + + /// Stop actix system. + /// + /// `SystemExit` message stops currently running system. + pub fn system_exit(mut self) -> Self { + self.exit = true; + self + } + + #[doc(hidden)] + /// Set alternative address for `ProcessSignals` actor. + pub fn signals(mut self, addr: Addr) -> Self { + self.signals = Some(addr); + self + } + + /// Disable signal handling + pub fn disable_signals(mut self) -> Self { + self.no_signals = true; + self + } + + /// Timeout for graceful workers shutdown. + /// + /// After receiving a stop signal, workers have this much time to finish + /// serving requests. Workers still alive after the timeout are force + /// dropped. + /// + /// By default shutdown timeout sets to 30 seconds. + pub fn shutdown_timeout(mut self, sec: u16) -> Self { + self.shutdown_timeout = sec; + self + } + + /// Add new service to server + pub fn service(mut self, srv: T, sockets: Vec<(Token, net::TcpListener)>) -> Self + where + T: Into> + { + self.services.push(srv.into()); + self.sockets.push(sockets); + self + } + + /// Spawn new thread and start listening for incoming connections. + /// + /// This method spawns new thread and starts new actix system. Other than + /// that it is similar to `start()` method. This method blocks. + /// + /// This methods panics if no socket addresses get bound. + /// + /// ```rust,ignore + /// # extern crate futures; + /// # extern crate actix_web; + /// # use futures::Future; + /// use actix_web::*; + /// + /// fn main() { + /// Server::new(). + /// .service( + /// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok()))) + /// .bind("127.0.0.1:0") + /// .expect("Can not bind to 127.0.0.1:0")) + /// .run(); + /// } + /// ``` + pub fn run(self) { + let sys = System::new("http-server"); + self.start(); + sys.run(); + } + + /// Start + pub fn start(mut self) -> Addr { + if self.sockets.is_empty() { + panic!("Service should have at least one bound socket"); + } else { + info!("Starting {} http workers", self.threads); + + // start workers + let mut workers = Vec::new(); + for idx in 0..self.threads { + let (addr, worker) = self.start_worker(idx, self.accept.get_notify()); + workers.push(worker); + self.workers.push((idx, addr)); + } + + // start accept thread + for sock in &self.sockets { + for s in sock.iter() { + info!("Starting server on http://{:?}", s.1.local_addr().ok()); + } + } + let rx = self.accept.start( + mem::replace(&mut self.sockets, Vec::new()), workers); + + // start http server actor + let signals = self.subscribe_to_signals(); + let addr = Actor::create(move |ctx| { + ctx.add_stream(rx); + self + }); + if let Some(signals) = signals { + signals.do_send(signal::Subscribe(addr.clone().recipient())) + } + addr + } + } + + // subscribe to os signals + fn subscribe_to_signals(&self) -> Option> { + if !self.no_signals { + if let Some(ref signals) = self.signals { + Some(signals.clone()) + } else { + Some(System::current().registry().get::()) + } + } else { + None + } + } + + fn start_worker(&self, idx: usize, notify: AcceptNotify) -> (Addr, WorkerClient) { + let (tx, rx) = unbounded::>(); + let conns = Connections::new(notify, self.maxconn, self.maxconnrate); + let worker = WorkerClient::new(idx, tx, conns.clone()); + let services: Vec<_> = self.services.iter().map(|v| v.clone()).collect(); + + let addr = Arbiter::start(move |ctx: &mut Context<_>| { + ctx.add_message_stream(rx); + let handlers: Vec<_> = services.into_iter().map(|s| s.create(conns.clone())).collect(); + Worker::new(conns, handlers) + }); + + (addr, worker) + } +} + +impl Actor for Server +{ + type Context = Context; +} + +/// Signals support +/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system +/// message to `System` actor. +impl Handler for Server { + type Result = (); + + fn handle(&mut self, msg: signal::Signal, ctx: &mut Context) { + match msg.0 { + signal::SignalType::Int => { + info!("SIGINT received, exiting"); + self.exit = true; + Handler::::handle(self, StopServer { graceful: false }, ctx); + } + signal::SignalType::Term => { + info!("SIGTERM received, stopping"); + self.exit = true; + Handler::::handle(self, StopServer { graceful: true }, ctx); + } + signal::SignalType::Quit => { + info!("SIGQUIT received, exiting"); + self.exit = true; + Handler::::handle(self, StopServer { graceful: false }, ctx); + } + _ => (), + } + } +} + +impl Handler for Server { + type Result = (); + + fn handle(&mut self, _: PauseServer, _: &mut Context) { + self.accept.send(Command::Pause); + } +} + +impl Handler for Server { + type Result = (); + + fn handle(&mut self, _: ResumeServer, _: &mut Context) { + self.accept.send(Command::Resume); + } +} + +impl Handler for Server { + type Result = Response<(), ()>; + + fn handle(&mut self, msg: StopServer, ctx: &mut Context) -> Self::Result { + // stop accept thread + self.accept.send(Command::Stop); + + // stop workers + let (tx, rx) = mpsc::channel(1); + + let dur = if msg.graceful { + Some(Duration::new(u64::from(self.shutdown_timeout), 0)) + } else { + None + }; + for worker in &self.workers { + let tx2 = tx.clone(); + ctx.spawn( + worker + .1 + .send(StopWorker { graceful: dur }) + .into_actor(self) + .then(move |_, slf, ctx| { + slf.workers.pop(); + if slf.workers.is_empty() { + let _ = tx2.send(()); + + // we need to stop system if server was spawned + if slf.exit { + ctx.run_later(Duration::from_millis(300), |_, _| { + System::current().stop(); + }); + } + } + + fut::ok(()) + }), + ); + } + + if !self.workers.is_empty() { + Response::async(rx.into_future().map(|_| ()).map_err(|_| ())) + } else { + // we need to stop system if server was spawned + if self.exit { + ctx.run_later(Duration::from_millis(300), |_, _| { + System::current().stop(); + }); + } + Response::reply(Ok(())) + } + } +} + +/// Commands from accept threads +impl StreamHandler for Server { + fn finished(&mut self, _: &mut Context) {} + + fn handle(&mut self, msg: ServerCommand, _: &mut Context) { + match msg { + ServerCommand::WorkerDied(idx) => { + let mut found = false; + for i in 0..self.workers.len() { + if self.workers[i].0 == idx { + self.workers.swap_remove(i); + found = true; + break; + } + } + + if found { + error!("Worker has died {:?}, restarting", idx); + + let mut new_idx = self.workers.len(); + 'found: loop { + for i in 0..self.workers.len() { + if self.workers[i].0 == new_idx { + new_idx += 1; + continue 'found; + } + } + break; + } + + let (addr, worker) = self.start_worker(new_idx, self.accept.get_notify()); + self.workers.push((new_idx, addr)); + self.accept.send(Command::Worker(worker)); + } + } + } + } +} + +#[derive(Clone, Default)] +pub struct Connections (Arc); + +impl Connections { + fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self { + let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 }; + let maxconnrate_low = if maxconnrate > 10 { + maxconnrate - 10 + } else { + 0 + }; + + Connections ( + Arc::new(ConnectionsInner { + notify, + maxconn, maxconnrate, + maxconn_low, maxconnrate_low, + conn: AtomicUsize::new(0), + connrate: AtomicUsize::new(0), + })) + } + + pub(crate) fn available(&self) -> bool { + self.0.available() + } + + pub(crate) fn num_connections(&self) -> usize { + self.0.conn.load(Ordering::Relaxed) + } + + /// Report opened connection + pub fn connection(&self) -> ConnectionTag { + ConnectionTag::new(self.0.clone()) + } + + /// Report rate connection, rate is usually ssl handshake + pub fn connection_rate(&self) -> ConnectionRateTag { + ConnectionRateTag::new(self.0.clone()) + } +} + +#[derive(Default)] +struct ConnectionsInner { + notify: AcceptNotify, + conn: AtomicUsize, + connrate: AtomicUsize, + maxconn: usize, + maxconnrate: usize, + maxconn_low: usize, + maxconnrate_low: usize, +} + +impl ConnectionsInner { + fn available(&self) -> bool { + if self.maxconnrate <= self.connrate.load(Ordering::Relaxed) { + false + } else { + self.maxconn > self.conn.load(Ordering::Relaxed) + } + } + + fn notify_maxconn(&self, maxconn: usize) { + if maxconn > self.maxconn_low && maxconn <= self.maxconn { + self.notify.notify(); + } + } + + fn notify_maxconnrate(&self, connrate: usize) { + if connrate > self.maxconnrate_low && connrate <= self.maxconnrate { + self.notify.notify(); + } + } + +} + +/// Type responsible for max connection stat. +/// +/// Max connections stat get updated on drop. +pub struct ConnectionTag(Arc); + +impl ConnectionTag { + fn new(inner: Arc) -> Self { + inner.conn.fetch_add(1, Ordering::Relaxed); + ConnectionTag(inner) + } +} + +impl Drop for ConnectionTag { + fn drop(&mut self) { + let conn = self.0.conn.fetch_sub(1, Ordering::Relaxed); + self.0.notify_maxconn(conn); + } +} + +/// Type responsible for max connection rate stat. +/// +/// Max connections rate stat get updated on drop. +pub struct ConnectionRateTag (Arc); + +impl ConnectionRateTag { + fn new(inner: Arc) -> Self { + inner.connrate.fetch_add(1, Ordering::Relaxed); + ConnectionRateTag(inner) + } +} + +impl Drop for ConnectionRateTag { + fn drop(&mut self) { + let connrate = self.0.connrate.fetch_sub(1, Ordering::Relaxed); + self.0.notify_maxconnrate(connrate); + } +} diff --git a/src/server/settings.rs b/src/server/settings.rs index 508be67dd..e9ca0f851 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -2,19 +2,22 @@ use std::cell::{RefCell, RefMut, UnsafeCell}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; -use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; +use std::time::{Duration, Instant}; use std::{env, fmt, net}; +use actix::Arbiter; use bytes::BytesMut; +use futures::Stream; use futures_cpupool::CpuPool; use http::StatusCode; use lazycell::LazyCell; use parking_lot::Mutex; use time; +use tokio_timer::Interval; -use super::accept::AcceptNotify; use super::channel::Node; use super::message::{Request, RequestPool}; +use super::server::{ConnectionRateTag, ConnectionTag, Connections}; use super::KeepAlive; use body::Body; use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool}; @@ -137,17 +140,36 @@ pub(crate) struct WorkerSettings { ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, - channels: Arc, + conns: Connections, node: RefCell>, date: UnsafeCell, - connrate: Arc, - notify: AcceptNotify, +} + +impl WorkerSettings { + pub(crate) fn create( + apps: Vec, keep_alive: KeepAlive, settings: ServerSettings, + conns: Connections, + ) -> Rc> { + let settings = Rc::new(Self::new(apps, keep_alive, settings, conns)); + + // periodic date update + let s = settings.clone(); + Arbiter::spawn( + Interval::new(Instant::now(), Duration::from_secs(1)) + .map_err(|_| ()) + .and_then(move |_| { + s.update_date(); + Ok(()) + }).fold((), |(), _| Ok(())), + ); + + settings + } } impl WorkerSettings { pub(crate) fn new( - h: Vec, keep_alive: KeepAlive, settings: ServerSettings, - notify: AcceptNotify, channels: Arc, connrate: Arc, + h: Vec, keep_alive: KeepAlive, settings: ServerSettings, conns: Connections, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -163,16 +185,10 @@ impl WorkerSettings { date: UnsafeCell::new(Date::new()), keep_alive, ka_enabled, - channels, - connrate, - notify, + conns, } } - pub fn num_channels(&self) -> usize { - self.channels.load(Ordering::Relaxed) - } - pub fn head(&self) -> RefMut> { self.node.borrow_mut() } @@ -201,16 +217,11 @@ impl WorkerSettings { RequestPool::get(self.messages) } - pub fn add_channel(&self) { - self.channels.fetch_add(1, Ordering::Relaxed); + pub fn connection(&self) -> ConnectionTag { + self.conns.connection() } - pub fn remove_channel(&self) { - let val = self.channels.fetch_sub(1, Ordering::Relaxed); - self.notify.notify_maxconn(val); - } - - pub fn update_date(&self) { + fn update_date(&self) { // Unsafe: WorkerSetting is !Sync and !Send unsafe { &mut *self.date.get() }.update(); } @@ -230,13 +241,8 @@ impl WorkerSettings { } #[allow(dead_code)] - pub(crate) fn conn_rate_add(&self) { - self.connrate.fetch_add(1, Ordering::Relaxed); - } - #[allow(dead_code)] - pub(crate) fn conn_rate_del(&self) { - let val = self.connrate.fetch_sub(1, Ordering::Relaxed); - self.notify.notify_maxconnrate(val); + pub(crate) fn connection_rate(&self) -> ConnectionRateTag { + self.conns.connection_rate() } } @@ -309,9 +315,7 @@ mod tests { Vec::new(), KeepAlive::Os, ServerSettings::default(), - AcceptNotify::default(), - Arc::new(AtomicUsize::new(0)), - Arc::new(AtomicUsize::new(0)), + Connections::default(), ); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); settings.set_date(&mut buf1, true); diff --git a/src/server/srv.rs b/src/server/srv.rs index c2bb6c819..eaf7802c7 100644 --- a/src/server/srv.rs +++ b/src/server/srv.rs @@ -1,16 +1,14 @@ +use std::marker::PhantomData; use std::rc::Rc; -use std::sync::{atomic::AtomicUsize, Arc}; -use std::time::Duration; -use std::{io, net}; +use std::sync::Arc; +use std::{io, mem, net, time}; -use actix::{ - fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler, - Response, StreamHandler, System, WrapFuture, -}; +use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; -use futures::sync::mpsc; -use futures::{Future, Sink, Stream}; +use futures::{Future, Stream}; +use net2::{TcpBuilder, TcpStreamExt}; use num_cpus; +use tokio::executor::current_thread; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_tcp::TcpStream; @@ -23,39 +21,33 @@ use openssl::ssl::SslAcceptorBuilder; #[cfg(feature = "rust-tls")] use rustls::ServerConfig; -use super::accept::{AcceptLoop, AcceptNotify, Command}; use super::channel::{HttpChannel, WrapperStream}; +use super::server::{Connections, Server, Service, ServiceHandler}; use super::settings::{ServerSettings, WorkerSettings}; -use super::worker::{Conn, StopWorker, Token, Worker, WorkerClient, WorkerFactory}; -use super::{AcceptorService, IntoHttpHandler, IoStream, KeepAlive}; -use super::{PauseServer, ResumeServer, StopServer}; +use super::worker::{Conn, Socket}; +use super::{ + AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive, + Token, +}; /// An HTTP Server pub struct HttpServer where H: IntoHttpHandler + 'static, { + factory: Arc Vec + Send + Sync>, + host: Option, + keep_alive: KeepAlive, + backlog: i32, threads: usize, - factory: WorkerFactory, - workers: Vec<(usize, Addr)>, - accept: AcceptLoop, exit: bool, shutdown_timeout: u16, - signals: Option>, no_http2: bool, no_signals: bool, - settings: Option>>, -} - -pub(crate) enum ServerCommand { - WorkerDied(usize), -} - -impl Actor for HttpServer -where - H: IntoHttpHandler, -{ - type Context = Context; + maxconn: usize, + maxconnrate: usize, + sockets: Vec, + handlers: Vec>>, } impl HttpServer @@ -72,15 +64,19 @@ where HttpServer { threads: num_cpus::get(), - factory: WorkerFactory::new(f), - workers: Vec::new(), - accept: AcceptLoop::new(), - exit: false, + factory: Arc::new(f), + host: None, + backlog: 2048, + keep_alive: KeepAlive::Os, shutdown_timeout: 30, - signals: None, + exit: true, no_http2: false, no_signals: false, - settings: None, + maxconn: 102_400, + maxconnrate: 256, + // settings: None, + sockets: Vec::new(), + handlers: Vec::new(), } } @@ -104,7 +100,7 @@ where /// /// This method should be called before `bind()` method call. pub fn backlog(mut self, num: i32) -> Self { - self.factory.backlog = num; + self.backlog = num; self } @@ -115,7 +111,7 @@ where /// /// By default max connections is set to a 100k. pub fn maxconn(mut self, num: usize) -> Self { - self.accept.maxconn(num); + self.maxconn = num; self } @@ -126,7 +122,7 @@ where /// /// By default max connections is set to a 256. pub fn maxconnrate(mut self, num: usize) -> Self { - self.accept.maxconnrate(num); + self.maxconnrate = num; self } @@ -134,7 +130,7 @@ where /// /// By default keep alive is set to a `Os`. pub fn keep_alive>(mut self, val: T) -> Self { - self.factory.keep_alive = val.into(); + self.keep_alive = val.into(); self } @@ -144,7 +140,7 @@ where /// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo. /// html#method.host) documentation for more information. pub fn server_hostname(mut self, val: String) -> Self { - self.factory.host = Some(val); + self.host = Some(val); self } @@ -156,12 +152,6 @@ where self } - /// Set alternative address for `ProcessSignals` actor. - pub fn signals(mut self, addr: Addr) -> Self { - self.signals = Some(addr); - self - } - /// Disable signal handling pub fn disable_signals(mut self) -> Self { self.no_signals = true; @@ -182,7 +172,10 @@ where /// Disable `HTTP/2` support #[doc(hidden)] - #[deprecated(since = "0.7.4", note = "please use acceptor service with proper ServerFlags parama")] + #[deprecated( + since = "0.7.4", + note = "please use acceptor service with proper ServerFlags parama" + )] pub fn no_http2(mut self) -> Self { self.no_http2 = true; self @@ -190,7 +183,7 @@ where /// Get addresses of bound sockets. pub fn addrs(&self) -> Vec { - self.factory.addrs() + self.sockets.iter().map(|s| s.addr).collect() } /// Get addresses of bound sockets and the scheme for it. @@ -200,7 +193,10 @@ where /// and the user should be presented with an enumeration of which /// socket requires which protocol. pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { - self.factory.addrs_with_scheme() + self.handlers + .iter() + .map(|s| (s.addr(), s.scheme())) + .collect() } /// Use listener for accepting incoming connection requests @@ -208,19 +204,29 @@ where /// HttpServer does not change any configuration for TcpListener, /// it needs to be configured before passing it to listen() method. pub fn listen(mut self, lst: net::TcpListener) -> Self { - self.factory.listen(lst); + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers + .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); + self.sockets.push(Socket { lst, addr, token }); + self } /// Use listener for accepting incoming connection requests - pub fn listen_with( - mut self, lst: net::TcpListener, acceptor: A, - ) -> io::Result + pub fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self where A: AcceptorService + Send + 'static, { - self.factory.listen_with(lst, acceptor); - Ok(self) + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers.push(Box::new(StreamHandler::new( + lst.local_addr().unwrap(), + acceptor, + ))); + self.sockets.push(Socket { lst, addr, token }); + + self } #[cfg(feature = "tls")] @@ -233,12 +239,10 @@ where /// /// HttpServer does not change any configuration for TcpListener, /// it needs to be configured before passing it to listen() method. - pub fn listen_tls( - self, lst: net::TcpListener, acceptor: TlsAcceptor, - ) -> io::Result { + pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { use super::NativeTlsAcceptor; - self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) + Ok(self.listen_with(lst, NativeTlsAcceptor::new(acceptor))) } #[cfg(feature = "alpn")] @@ -262,7 +266,7 @@ where ServerFlags::HTTP1 | ServerFlags::HTTP2 }; - self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?) + Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?)) } #[cfg(feature = "rust-tls")] @@ -274,9 +278,7 @@ where /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn listen_rustls( - self, lst: net::TcpListener, builder: ServerConfig, - ) -> io::Result { + pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self { use super::{RustlsAcceptor, ServerFlags}; // alpn support @@ -293,7 +295,16 @@ where /// /// To bind multiple addresses this method can be called multiple times. pub fn bind(mut self, addr: S) -> io::Result { - self.factory.bind(addr)?; + let sockets = self.bind2(addr)?; + + for lst in sockets { + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers + .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); + self.sockets.push(Socket { lst, addr, token }) + } + Ok(self) } @@ -304,10 +315,51 @@ where S: net::ToSocketAddrs, A: AcceptorService + Send + 'static, { - self.factory.bind_with(addr, &acceptor)?; + let sockets = self.bind2(addr)?; + + for lst in sockets { + let token = Token(self.handlers.len()); + let addr = lst.local_addr().unwrap(); + self.handlers.push(Box::new(StreamHandler::new( + lst.local_addr().unwrap(), + acceptor.clone(), + ))); + self.sockets.push(Socket { lst, addr, token }) + } + Ok(self) } + fn bind2( + &self, addr: S, + ) -> io::Result> { + let mut err = None; + let mut succ = false; + let mut sockets = Vec::new(); + for addr in addr.to_socket_addrs()? { + match create_tcp_listener(addr, self.backlog) { + Ok(lst) => { + succ = true; + sockets.push(lst); + } + Err(e) => err = Some(e), + } + } + + if !succ { + if let Some(e) = err.take() { + Err(e) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "Can not bind to address.", + )) + } + } else { + Ok(sockets) + } + } + #[cfg(feature = "tls")] #[doc(hidden)] #[deprecated( @@ -373,37 +425,59 @@ where self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags)) } +} - fn start_workers(&mut self, notify: &AcceptNotify) -> Vec { - // start workers - let mut workers = Vec::new(); - for idx in 0..self.threads { - let (worker, addr) = self.factory.start(idx, notify.clone()); - workers.push(worker); - self.workers.push((idx, addr)); - } - info!("Starting {} http workers", self.threads); - workers +impl Into> for HttpServer { + fn into(self) -> Box { + Box::new(HttpService { + factory: self.factory, + host: self.host, + keep_alive: self.keep_alive, + handlers: self.handlers, + }) + } +} + +struct HttpService { + factory: Arc Vec + Send + Sync>, + host: Option, + keep_alive: KeepAlive, + handlers: Vec>>, +} + +impl Service for HttpService { + fn clone(&self) -> Box { + Box::new(HttpService { + factory: self.factory.clone(), + host: self.host.clone(), + keep_alive: self.keep_alive, + handlers: self.handlers.iter().map(|v| v.clone()).collect(), + }) } - // subscribe to os signals - fn subscribe_to_signals(&self) -> Option> { - if !self.no_signals { - if let Some(ref signals) = self.signals { - Some(signals.clone()) - } else { - Some(System::current().registry().get::()) - } - } else { - None - } + fn create(&self, conns: Connections) -> Box { + let addr = self.handlers[0].addr(); + let s = ServerSettings::new(Some(addr), &self.host, false); + let apps: Vec<_> = (*self.factory)() + .into_iter() + .map(|h| h.into_handler()) + .collect(); + let handlers = self.handlers.iter().map(|h| h.clone()).collect(); + + Box::new(HttpServiceHandler::new( + apps, + handlers, + self.keep_alive, + s, + conns, + )) } } impl HttpServer { /// Start listening for incoming connections. /// - /// This method starts number of http handler workers in separate threads. + /// This method starts number of http workers in separate threads. /// For each address this method starts separate thread which does /// `accept()` in a loop. /// @@ -426,31 +500,25 @@ impl HttpServer { /// sys.run(); // <- Run actix system, this method starts all async processes /// } /// ``` - pub fn start(mut self) -> Addr { - let sockets = self.factory.take_sockets(); - if sockets.is_empty() { - panic!("HttpServer::bind() has to be called before start()"); + pub fn start(mut self) -> Addr { + let mut srv = Server::new() + .workers(self.threads) + .maxconn(self.maxconn) + .maxconnrate(self.maxconnrate) + .shutdown_timeout(self.shutdown_timeout); + + srv = if self.exit { srv.system_exit() } else { srv }; + srv = if self.no_signals { + srv.disable_signals() } else { - let notify = self.accept.get_notify(); - let workers = self.start_workers(¬ify); + srv + }; - // start accept thread - for sock in &sockets { - info!("Starting server on http://{}", sock.addr); - } - let rx = self.accept.start(sockets, workers.clone()); - - // start http server actor - let signals = self.subscribe_to_signals(); - let addr = Actor::create(move |ctx| { - ctx.add_stream(rx); - self - }); - if let Some(signals) = signals { - signals.do_send(signal::Subscribe(addr.clone().recipient())) - } - addr - } + let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new()) + .into_iter() + .map(|item| (item.token, item.lst)) + .collect(); + srv.service(self, sockets).start() } /// Spawn new thread and start listening for incoming connections. @@ -484,195 +552,279 @@ impl HttpServer { /// Start listening for incoming connections from a stream. /// /// This method uses only one thread for handling incoming connections. - pub fn start_incoming(mut self, stream: S, secure: bool) -> Addr + pub fn start_incoming(self, stream: S, secure: bool) where S: Stream + Send + 'static, T: AsyncRead + AsyncWrite + Send + 'static, { // set server settings let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); - let settings = ServerSettings::new(Some(addr), &self.factory.host, secure); - let apps: Vec<_> = (*self.factory.factory)() + let srv_settings = ServerSettings::new(Some(addr), &self.host, secure); + let apps: Vec<_> = (*self.factory)() .into_iter() .map(|h| h.into_handler()) .collect(); - self.settings = Some(Rc::new(WorkerSettings::new( + let settings = WorkerSettings::create( apps, - self.factory.keep_alive, - settings, - AcceptNotify::default(), - Arc::new(AtomicUsize::new(0)), - Arc::new(AtomicUsize::new(0)), - ))); + self.keep_alive, + srv_settings, + Connections::default(), + ); // start server - let signals = self.subscribe_to_signals(); - let addr = HttpServer::create(move |ctx| { + HttpIncoming::create(move |ctx| { ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn { io: WrapperStream::new(t), + handler: Token::new(0), token: Token::new(0), peer: None, })); - self + HttpIncoming { settings } }); - - if let Some(signals) = signals { - signals.do_send(signal::Subscribe(addr.clone().recipient())) - } - addr } } -/// Signals support -/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system -/// message to `System` actor. -impl Handler for HttpServer { - type Result = (); - - fn handle(&mut self, msg: signal::Signal, ctx: &mut Context) { - match msg.0 { - signal::SignalType::Int => { - info!("SIGINT received, exiting"); - self.exit = true; - Handler::::handle(self, StopServer { graceful: false }, ctx); - } - signal::SignalType::Term => { - info!("SIGTERM received, stopping"); - self.exit = true; - Handler::::handle(self, StopServer { graceful: true }, ctx); - } - signal::SignalType::Quit => { - info!("SIGQUIT received, exiting"); - self.exit = true; - Handler::::handle(self, StopServer { graceful: false }, ctx); - } - _ => (), - } - } +struct HttpIncoming { + settings: Rc>, } -/// Commands from accept threads -impl StreamHandler for HttpServer { - fn finished(&mut self, _: &mut Context) {} - - fn handle(&mut self, msg: ServerCommand, _: &mut Context) { - match msg { - ServerCommand::WorkerDied(idx) => { - let mut found = false; - for i in 0..self.workers.len() { - if self.workers[i].0 == idx { - self.workers.swap_remove(i); - found = true; - break; - } - } - - if found { - error!("Worker has died {:?}, restarting", idx); - - let mut new_idx = self.workers.len(); - 'found: loop { - for i in 0..self.workers.len() { - if self.workers[i].0 == new_idx { - new_idx += 1; - continue 'found; - } - } - break; - } - - let (worker, addr) = - self.factory.start(new_idx, self.accept.get_notify()); - self.workers.push((new_idx, addr)); - self.accept.send(Command::Worker(worker)); - } - } - } - } +impl Actor for HttpIncoming +where + H: HttpHandler, +{ + type Context = Context; } -impl Handler> for HttpServer +impl Handler> for HttpIncoming where T: IoStream, - H: IntoHttpHandler, + H: HttpHandler, { type Result = (); fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { Arbiter::spawn(HttpChannel::new( - Rc::clone(self.settings.as_ref().unwrap()), + Rc::clone(&self.settings), msg.io, msg.peer, )); } } -impl Handler for HttpServer { - type Result = (); - - fn handle(&mut self, _: PauseServer, _: &mut Context) { - self.accept.send(Command::Pause); - } +struct HttpServiceHandler +where + H: HttpHandler + 'static, +{ + settings: Rc>, + handlers: Vec>>, + tcp_ka: Option, } -impl Handler for HttpServer { - type Result = (); - - fn handle(&mut self, _: ResumeServer, _: &mut Context) { - self.accept.send(Command::Resume); - } -} - -impl Handler for HttpServer { - type Result = Response<(), ()>; - - fn handle(&mut self, msg: StopServer, ctx: &mut Context) -> Self::Result { - // stop accept threads - self.accept.send(Command::Stop); - - // stop workers - let (tx, rx) = mpsc::channel(1); - - let dur = if msg.graceful { - Some(Duration::new(u64::from(self.shutdown_timeout), 0)) +impl HttpServiceHandler { + fn new( + apps: Vec, handlers: Vec>>, + keep_alive: KeepAlive, settings: ServerSettings, conns: Connections, + ) -> HttpServiceHandler { + let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { + Some(time::Duration::new(val as u64, 0)) } else { None }; - for worker in &self.workers { - let tx2 = tx.clone(); - ctx.spawn( - worker - .1 - .send(StopWorker { graceful: dur }) - .into_actor(self) - .then(move |_, slf, ctx| { - slf.workers.pop(); - if slf.workers.is_empty() { - let _ = tx2.send(()); + let settings = WorkerSettings::create(apps, keep_alive, settings, conns); - // we need to stop system if server was spawned - if slf.exit { - ctx.run_later(Duration::from_millis(300), |_, _| { - System::current().stop(); - }); - } - } - - fut::ok(()) - }), - ); - } - - if !self.workers.is_empty() { - Response::async(rx.into_future().map(|_| ()).map_err(|_| ())) - } else { - // we need to stop system if server was spawned - if self.exit { - ctx.run_later(Duration::from_millis(300), |_, _| { - System::current().stop(); - }); - } - Response::reply(Ok(())) + HttpServiceHandler { + handlers, + tcp_ka, + settings, } } -} \ No newline at end of file +} + +impl ServiceHandler for HttpServiceHandler +where + H: HttpHandler + 'static, +{ + fn handle( + &mut self, token: Token, io: net::TcpStream, peer: Option, + ) { + if self.tcp_ka.is_some() && io.set_keepalive(self.tcp_ka).is_err() { + error!("Can not set socket keep-alive option"); + } + self.handlers[token.0].handle(Rc::clone(&self.settings), io, peer); + } + + fn shutdown(&self, force: bool) { + if force { + self.settings.head().traverse::(); + } + } +} + +struct SimpleHandler { + addr: net::SocketAddr, + io: PhantomData, +} + +impl Clone for SimpleHandler { + fn clone(&self) -> Self { + SimpleHandler { + addr: self.addr, + io: PhantomData, + } + } +} + +impl SimpleHandler { + fn new(addr: net::SocketAddr) -> Self { + SimpleHandler { + addr, + io: PhantomData, + } + } +} + +impl IoStreamHandler for SimpleHandler +where + H: HttpHandler, + Io: IntoAsyncIo + Send + 'static, + Io::Io: IoStream, +{ + fn addr(&self) -> net::SocketAddr { + self.addr + } + + fn clone(&self) -> Box> { + Box::new(Clone::clone(self)) + } + + fn scheme(&self) -> &'static str { + "http" + } + + fn handle(&self, h: Rc>, io: Io, peer: Option) { + let mut io = match io.into_async_io() { + Ok(io) => io, + Err(err) => { + trace!("Failed to create async io: {}", err); + return; + } + }; + let _ = io.set_nodelay(true); + + current_thread::spawn(HttpChannel::new(h, io, peer)); + } +} + +struct StreamHandler { + acceptor: A, + addr: net::SocketAddr, + io: PhantomData, +} + +impl> StreamHandler { + fn new(addr: net::SocketAddr, acceptor: A) -> Self { + StreamHandler { + addr, + acceptor, + io: PhantomData, + } + } +} + +impl> Clone for StreamHandler { + fn clone(&self) -> Self { + StreamHandler { + addr: self.addr, + acceptor: self.acceptor.clone(), + io: PhantomData, + } + } +} + +impl IoStreamHandler for StreamHandler +where + H: HttpHandler, + Io: IntoAsyncIo + Send + 'static, + Io::Io: IoStream, + A: AcceptorService + Send + 'static, +{ + fn addr(&self) -> net::SocketAddr { + self.addr + } + + fn clone(&self) -> Box> { + Box::new(Clone::clone(self)) + } + + fn scheme(&self) -> &'static str { + self.acceptor.scheme() + } + + fn handle(&self, h: Rc>, io: Io, peer: Option) { + let mut io = match io.into_async_io() { + Ok(io) => io, + Err(err) => { + trace!("Failed to create async io: {}", err); + return; + } + }; + let _ = io.set_nodelay(true); + + let rate = h.connection_rate(); + current_thread::spawn(self.acceptor.accept(io).then(move |res| { + drop(rate); + match res { + Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer)), + Err(err) => trace!("Can not establish connection: {}", err), + } + Ok(()) + })) + } +} + +impl IoStreamHandler for Box> +where + H: HttpHandler, + Io: IntoAsyncIo, +{ + fn addr(&self) -> net::SocketAddr { + self.as_ref().addr() + } + + fn clone(&self) -> Box> { + self.as_ref().clone() + } + + fn scheme(&self) -> &'static str { + self.as_ref().scheme() + } + + fn handle(&self, h: Rc>, io: Io, peer: Option) { + self.as_ref().handle(h, io, peer) + } +} + +trait IoStreamHandler: Send +where + H: HttpHandler, +{ + fn clone(&self) -> Box>; + + fn addr(&self) -> net::SocketAddr; + + fn scheme(&self) -> &'static str; + + fn handle(&self, h: Rc>, io: Io, peer: Option); +} + +fn create_tcp_listener( + addr: net::SocketAddr, backlog: i32, +) -> io::Result { + let builder = match addr { + net::SocketAddr::V4(_) => TcpBuilder::new_v4()?, + net::SocketAddr::V6(_) => TcpBuilder::new_v6()?, + }; + builder.reuse_address(true)?; + builder.bind(addr)?; + Ok(builder.listen(backlog)?) +} diff --git a/src/server/worker.rs b/src/server/worker.rs index 168382e64..77128adc0 100644 --- a/src/server/worker.rs +++ b/src/server/worker.rs @@ -1,216 +1,41 @@ -use std::marker::PhantomData; -use std::rc::Rc; -use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; -use std::{io, mem, net, time}; +use std::{net, time}; -use futures::sync::mpsc::{unbounded, SendError, UnboundedSender}; +use futures::sync::mpsc::{SendError, UnboundedSender}; use futures::sync::oneshot; use futures::Future; -use net2::{TcpBuilder, TcpStreamExt}; -use tokio::executor::current_thread; -use tokio_tcp::TcpStream; use actix::msgs::StopArbiter; -use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, Message, Response}; +use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response}; -use super::accept::AcceptNotify; -use super::channel::HttpChannel; -use super::settings::{ServerSettings, WorkerSettings}; -use super::{ - AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive, -}; +use super::server::{Connections, ServiceHandler}; +use super::Token; #[derive(Message)] pub(crate) struct Conn { pub io: T, + pub handler: Token, pub token: Token, pub peer: Option, } -#[derive(Clone, Copy)] -pub struct Token(usize); - -impl Token { - pub(crate) fn new(val: usize) -> Token { - Token(val) - } -} - pub(crate) struct Socket { pub lst: net::TcpListener, pub addr: net::SocketAddr, pub token: Token, } -pub(crate) struct WorkerFactory { - pub factory: Arc Vec + Send + Sync>, - pub host: Option, - pub keep_alive: KeepAlive, - pub backlog: i32, - sockets: Vec, - handlers: Vec>>, -} - -impl WorkerFactory { - pub fn new(factory: F) -> Self - where - F: Fn() -> Vec + Send + Sync + 'static, - { - WorkerFactory { - factory: Arc::new(factory), - host: None, - backlog: 2048, - keep_alive: KeepAlive::Os, - sockets: Vec::new(), - handlers: Vec::new(), - } - } - - pub fn addrs(&self) -> Vec { - self.sockets.iter().map(|s| s.addr).collect() - } - - pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { - self.handlers - .iter() - .map(|s| (s.addr(), s.scheme())) - .collect() - } - - pub fn take_sockets(&mut self) -> Vec { - mem::replace(&mut self.sockets, Vec::new()) - } - - pub fn listen(&mut self, lst: net::TcpListener) { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers - .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); - self.sockets.push(Socket { lst, addr, token }) - } - - pub fn listen_with(&mut self, lst: net::TcpListener, acceptor: A) - where - A: AcceptorService + Send + 'static, - { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers.push(Box::new(StreamHandler::new( - lst.local_addr().unwrap(), - acceptor, - ))); - self.sockets.push(Socket { lst, addr, token }) - } - - pub fn bind(&mut self, addr: S) -> io::Result<()> - where - S: net::ToSocketAddrs, - { - let sockets = self.bind2(addr)?; - - for lst in sockets { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers - .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); - self.sockets.push(Socket { lst, addr, token }) - } - Ok(()) - } - - pub fn bind_with(&mut self, addr: S, acceptor: &A) -> io::Result<()> - where - S: net::ToSocketAddrs, - A: AcceptorService + Send + 'static, - { - let sockets = self.bind2(addr)?; - - for lst in sockets { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers.push(Box::new(StreamHandler::new( - lst.local_addr().unwrap(), - acceptor.clone(), - ))); - self.sockets.push(Socket { lst, addr, token }) - } - Ok(()) - } - - fn bind2( - &self, addr: S, - ) -> io::Result> { - let mut err = None; - let mut succ = false; - let mut sockets = Vec::new(); - for addr in addr.to_socket_addrs()? { - match create_tcp_listener(addr, self.backlog) { - Ok(lst) => { - succ = true; - sockets.push(lst); - } - Err(e) => err = Some(e), - } - } - - if !succ { - if let Some(e) = err.take() { - Err(e) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "Can not bind to address.", - )) - } - } else { - Ok(sockets) - } - } - - pub fn start( - &mut self, idx: usize, notify: AcceptNotify, - ) -> (WorkerClient, Addr) { - let host = self.host.clone(); - let addr = self.handlers[0].addr(); - let factory = Arc::clone(&self.factory); - let ka = self.keep_alive; - let (tx, rx) = unbounded::>(); - let client = WorkerClient::new(idx, tx); - let conn = client.conn.clone(); - let sslrate = client.sslrate.clone(); - let handlers: Vec<_> = self.handlers.iter().map(|v| v.clone()).collect(); - - let addr = Arbiter::start(move |ctx: &mut Context<_>| { - let s = ServerSettings::new(Some(addr), &host, false); - let apps: Vec<_> = - (*factory)().into_iter().map(|h| h.into_handler()).collect(); - ctx.add_message_stream(rx); - let inner = WorkerInner::new(apps, handlers, ka, s, conn, sslrate, notify); - Worker { - inner: Box::new(inner), - } - }); - - (client, addr) - } -} - #[derive(Clone)] pub(crate) struct WorkerClient { pub idx: usize, tx: UnboundedSender>, - pub conn: Arc, - pub sslrate: Arc, + conns: Connections, } impl WorkerClient { - fn new(idx: usize, tx: UnboundedSender>) -> Self { - WorkerClient { - idx, - tx, - conn: Arc::new(AtomicUsize::new(0)), - sslrate: Arc::new(AtomicUsize::new(0)), - } + pub fn new( + idx: usize, tx: UnboundedSender>, conns: Connections, + ) -> Self { + WorkerClient { idx, tx, conns } } pub fn send( @@ -219,12 +44,8 @@ impl WorkerClient { self.tx.unbounded_send(msg) } - pub fn available(&self, maxconn: usize, maxsslrate: usize) -> bool { - if maxsslrate <= self.sslrate.load(Ordering::Relaxed) { - false - } else { - maxconn > self.conn.load(Ordering::Relaxed) - } + pub fn available(&self) -> bool { + self.conns.available() } } @@ -243,21 +64,21 @@ impl Message for StopWorker { /// Worker accepts Socket objects via unbounded channel and start requests /// processing. pub(crate) struct Worker { - inner: Box, + conns: Connections, + handlers: Vec>, } impl Actor for Worker { type Context = Context; - - fn started(&mut self, ctx: &mut Self::Context) { - self.update_date(ctx); - } } impl Worker { - fn update_date(&self, ctx: &mut Context) { - self.inner.update_date(); - ctx.run_later(time::Duration::new(1, 0), |slf, ctx| slf.update_date(ctx)); + pub(crate) fn new(conns: Connections, handlers: Vec>) -> Self { + Worker { conns, handlers } + } + + fn shutdown(&self, force: bool) { + self.handlers.iter().for_each(|h| h.shutdown(force)); } fn shutdown_timeout( @@ -265,7 +86,7 @@ impl Worker { ) { // sleep for 1 second and then check again ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| { - let num = slf.inner.num_channels(); + let num = slf.conns.num_connections(); if num == 0 { let _ = tx.send(true); Arbiter::current().do_send(StopArbiter(0)); @@ -273,7 +94,7 @@ impl Worker { slf.shutdown_timeout(ctx, tx, d); } else { info!("Force shutdown http worker, {} connections", num); - slf.inner.force_shutdown(); + slf.shutdown(true); let _ = tx.send(false); Arbiter::current().do_send(StopArbiter(0)); } @@ -285,7 +106,7 @@ impl Handler> for Worker { type Result = (); fn handle(&mut self, msg: Conn, _: &mut Context) { - self.inner.handle_connect(msg) + self.handlers[msg.handler.0].handle(msg.token, msg.io, msg.peer) } } @@ -294,253 +115,25 @@ impl Handler for Worker { type Result = Response; fn handle(&mut self, msg: StopWorker, ctx: &mut Context) -> Self::Result { - let num = self.inner.num_channels(); + let num = self.conns.num_connections(); if num == 0 { info!("Shutting down http worker, 0 connections"); Response::reply(Ok(true)) } else if let Some(dur) = msg.graceful { - info!("Graceful http worker shutdown, {} connections", num); + self.shutdown(false); let (tx, rx) = oneshot::channel(); - self.shutdown_timeout(ctx, tx, dur); - Response::async(rx.map_err(|_| ())) + let num = self.conns.num_connections(); + if num != 0 { + info!("Graceful http worker shutdown, {} connections", num); + self.shutdown_timeout(ctx, tx, dur); + Response::reply(Ok(true)) + } else { + Response::async(rx.map_err(|_| ())) + } } else { info!("Force shutdown http worker, {} connections", num); - self.inner.force_shutdown(); + self.shutdown(true); Response::reply(Ok(false)) } } } - -trait WorkerHandler { - fn update_date(&self); - - fn handle_connect(&mut self, Conn); - - fn force_shutdown(&self); - - fn num_channels(&self) -> usize; -} - -struct WorkerInner -where - H: HttpHandler + 'static, -{ - settings: Rc>, - socks: Vec>>, - tcp_ka: Option, -} - -impl WorkerInner { - pub(crate) fn new( - h: Vec, socks: Vec>>, - keep_alive: KeepAlive, settings: ServerSettings, conn: Arc, - sslrate: Arc, notify: AcceptNotify, - ) -> WorkerInner { - let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { - Some(time::Duration::new(val as u64, 0)) - } else { - None - }; - - WorkerInner { - settings: Rc::new(WorkerSettings::new( - h, keep_alive, settings, notify, conn, sslrate, - )), - socks, - tcp_ka, - } - } -} - -impl WorkerHandler for WorkerInner -where - H: HttpHandler + 'static, -{ - fn update_date(&self) { - self.settings.update_date(); - } - - fn handle_connect(&mut self, msg: Conn) { - if self.tcp_ka.is_some() && msg.io.set_keepalive(self.tcp_ka).is_err() { - error!("Can not set socket keep-alive option"); - } - self.socks[msg.token.0].handle(Rc::clone(&self.settings), msg.io, msg.peer); - } - - fn num_channels(&self) -> usize { - self.settings.num_channels() - } - - fn force_shutdown(&self) { - self.settings.head().traverse::(); - } -} - -struct SimpleHandler { - addr: net::SocketAddr, - io: PhantomData, -} - -impl Clone for SimpleHandler { - fn clone(&self) -> Self { - SimpleHandler { - addr: self.addr, - io: PhantomData, - } - } -} - -impl SimpleHandler { - fn new(addr: net::SocketAddr) -> Self { - SimpleHandler { - addr, - io: PhantomData, - } - } -} - -impl IoStreamHandler for SimpleHandler -where - H: HttpHandler, - Io: IntoAsyncIo + Send + 'static, - Io::Io: IoStream, -{ - fn addr(&self) -> net::SocketAddr { - self.addr - } - - fn clone(&self) -> Box> { - Box::new(Clone::clone(self)) - } - - fn scheme(&self) -> &'static str { - "http" - } - - fn handle(&self, h: Rc>, io: Io, peer: Option) { - let mut io = match io.into_async_io() { - Ok(io) => io, - Err(err) => { - trace!("Failed to create async io: {}", err); - return; - } - }; - let _ = io.set_nodelay(true); - - current_thread::spawn(HttpChannel::new(h, io, peer)); - } -} - -struct StreamHandler { - acceptor: A, - addr: net::SocketAddr, - io: PhantomData, -} - -impl> StreamHandler { - fn new(addr: net::SocketAddr, acceptor: A) -> Self { - StreamHandler { - addr, - acceptor, - io: PhantomData, - } - } -} - -impl> Clone for StreamHandler { - fn clone(&self) -> Self { - StreamHandler { - addr: self.addr, - acceptor: self.acceptor.clone(), - io: PhantomData, - } - } -} - -impl IoStreamHandler for StreamHandler -where - H: HttpHandler, - Io: IntoAsyncIo + Send + 'static, - Io::Io: IoStream, - A: AcceptorService + Send + 'static, -{ - fn addr(&self) -> net::SocketAddr { - self.addr - } - - fn clone(&self) -> Box> { - Box::new(Clone::clone(self)) - } - - fn scheme(&self) -> &'static str { - self.acceptor.scheme() - } - - fn handle(&self, h: Rc>, io: Io, peer: Option) { - let mut io = match io.into_async_io() { - Ok(io) => io, - Err(err) => { - trace!("Failed to create async io: {}", err); - return; - } - }; - let _ = io.set_nodelay(true); - - h.conn_rate_add(); - current_thread::spawn(self.acceptor.accept(io).then(move |res| { - h.conn_rate_del(); - match res { - Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer)), - Err(err) => trace!("Can not establish connection: {}", err), - } - Ok(()) - })) - } -} - -impl IoStreamHandler for Box> -where - H: HttpHandler, - Io: IntoAsyncIo, -{ - fn addr(&self) -> net::SocketAddr { - self.as_ref().addr() - } - - fn clone(&self) -> Box> { - self.as_ref().clone() - } - - fn scheme(&self) -> &'static str { - self.as_ref().scheme() - } - - fn handle(&self, h: Rc>, io: Io, peer: Option) { - self.as_ref().handle(h, io, peer) - } -} - -pub(crate) trait IoStreamHandler: Send -where - H: HttpHandler, -{ - fn clone(&self) -> Box>; - - fn addr(&self) -> net::SocketAddr; - - fn scheme(&self) -> &'static str; - - fn handle(&self, h: Rc>, io: Io, peer: Option); -} - -fn create_tcp_listener( - addr: net::SocketAddr, backlog: i32, -) -> io::Result { - let builder = match addr { - net::SocketAddr::V4(_) => TcpBuilder::new_v4()?, - net::SocketAddr::V6(_) => TcpBuilder::new_v6()?, - }; - builder.reuse_address(true)?; - builder.bind(addr)?; - Ok(builder.listen(backlog)?) -} diff --git a/src/test.rs b/src/test.rs index 42f511749..92aa6c8d2 100644 --- a/src/test.rs +++ b/src/test.rs @@ -17,6 +17,8 @@ use tokio::runtime::current_thread::Runtime; use openssl::ssl::SslAcceptorBuilder; #[cfg(feature = "rust-tls")] use rustls::ServerConfig; +#[cfg(feature = "alpn")] +use server::OpensslAcceptor; #[cfg(feature = "rust-tls")] use server::RustlsAcceptor; @@ -326,7 +328,7 @@ impl TestServerBuilder { config(&mut app); vec![app] }).workers(1) - .disable_signals(); + .disable_signals(); tx.send((System::current(), addr, TestServer::get_conn())) .unwrap(); @@ -336,7 +338,7 @@ impl TestServerBuilder { let ssl = self.ssl.take(); if let Some(ssl) = ssl { let tcp = net::TcpListener::bind(addr).unwrap(); - srv = srv.listen_ssl(tcp, ssl).unwrap(); + srv = srv.listen_with(tcp, OpensslAcceptor::new(ssl).unwrap()); } } #[cfg(feature = "rust-tls")] @@ -344,7 +346,7 @@ impl TestServerBuilder { let ssl = self.rust_ssl.take(); if let Some(ssl) = ssl { let tcp = net::TcpListener::bind(addr).unwrap(); - srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl)).unwrap(); + srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl)); } } if !has_ssl { @@ -722,8 +724,9 @@ impl TestRequest { /// This method generates `HttpRequest` instance and executes handler pub fn execute(self, f: F) -> Result - where F: FnOnce(&HttpRequest) -> R, - R: Responder + 'static, + where + F: FnOnce(&HttpRequest) -> R, + R: Responder + 'static, { let req = self.finish(); let resp = f(&req); From 2e8d67e2aecfb850c341146bce5ccf60ff04f73b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 9 Aug 2018 13:08:59 -0700 Subject: [PATCH 053/219] upgrade native-tls package --- CHANGES.md | 4 ++ Cargo.toml | 5 +- src/lib.rs | 2 - src/server/{srv.rs => http.rs} | 2 +- src/server/mod.rs | 7 ++- src/server/ssl/mod.rs | 2 +- src/server/ssl/nativetls.rs | 111 +++++++++++++++++++++++++++------ 7 files changed, 104 insertions(+), 29 deletions(-) rename src/server/{srv.rs => http.rs} (99%) diff --git a/CHANGES.md b/CHANGES.md index bfd86a1a3..3dbb3795f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,10 @@ * Allow to customize connection handshake process via `HttpServer::listen_with()` and `HttpServer::bind_with()` methods +### Changed + +* native-tls - 0.2 + ### Fixed * Use zlib instead of raw deflate for decoding and encoding payloads with diff --git a/Cargo.toml b/Cargo.toml index 86cb53d10..3bfac16c1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ path = "src/lib.rs" default = ["session", "brotli", "flate2-c"] # tls -tls = ["native-tls", "tokio-tls"] +tls = ["native-tls"] # openssl alpn = ["openssl", "tokio-openssl"] @@ -100,8 +100,7 @@ tokio-timer = "0.2" tokio-reactor = "0.1" # native-tls -native-tls = { version="0.1", optional = true } -tokio-tls = { version="0.1", optional = true } +native-tls = { version="0.2", optional = true } # openssl openssl = { version="0.10", optional = true } diff --git a/src/lib.rs b/src/lib.rs index 626bb95f8..ed02b1b69 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -143,8 +143,6 @@ extern crate serde_derive; #[cfg(feature = "tls")] extern crate native_tls; -#[cfg(feature = "tls")] -extern crate tokio_tls; #[cfg(feature = "openssl")] extern crate openssl; diff --git a/src/server/srv.rs b/src/server/http.rs similarity index 99% rename from src/server/srv.rs rename to src/server/http.rs index eaf7802c7..5deaf029b 100644 --- a/src/server/srv.rs +++ b/src/server/http.rs @@ -242,7 +242,7 @@ where pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { use super::NativeTlsAcceptor; - Ok(self.listen_with(lst, NativeTlsAcceptor::new(acceptor))) + self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) } #[cfg(feature = "alpn")] diff --git a/src/server/mod.rs b/src/server/mod.rs index f34497936..67952e433 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -23,22 +23,23 @@ pub(crate) mod message; pub(crate) mod output; mod server; pub(crate) mod settings; -mod srv; +mod http; mod ssl; mod worker; +use actix::Message; + pub use self::message::Request; pub use self::server::{ ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler, }; pub use self::settings::ServerSettings; -pub use self::srv::HttpServer; +pub use self::http::HttpServer; pub use self::ssl::*; #[doc(hidden)] pub use self::helpers::write_content_length; -use actix::Message; use body::Binary; use error::Error; use extensions::Extensions; diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs index d99c4a584..b29a7d4a6 100644 --- a/src/server/ssl/mod.rs +++ b/src/server/ssl/mod.rs @@ -6,7 +6,7 @@ pub use self::openssl::OpensslAcceptor; #[cfg(feature = "tls")] mod nativetls; #[cfg(feature = "tls")] -pub use self::nativetls::NativeTlsAcceptor; +pub use self::nativetls::{TlsStream, NativeTlsAcceptor}; #[cfg(feature = "rust-tls")] mod rustls; diff --git a/src/server/ssl/nativetls.rs b/src/server/ssl/nativetls.rs index 8749599e9..c3f2c38d4 100644 --- a/src/server/ssl/nativetls.rs +++ b/src/server/ssl/nativetls.rs @@ -1,9 +1,9 @@ use std::net::Shutdown; use std::{io, time}; -use futures::{Future, Poll}; -use native_tls::TlsAcceptor; -use tokio_tls::{AcceptAsync, TlsAcceptorExt, TlsStream}; +use futures::{Async, Future, Poll}; +use native_tls::{self, TlsAcceptor, HandshakeError}; +use tokio_io::{AsyncRead, AsyncWrite}; use server::{AcceptorService, IoStream}; @@ -15,36 +15,41 @@ pub struct NativeTlsAcceptor { acceptor: TlsAcceptor, } +/// A wrapper around an underlying raw stream which implements the TLS or SSL +/// protocol. +/// +/// A `TlsStream` represents a handshake that has been completed successfully +/// and both the server and the client are ready for receiving and sending +/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written +/// to a `TlsStream` are encrypted when passing through to `S`. +#[derive(Debug)] +pub struct TlsStream { + inner: native_tls::TlsStream, +} + +/// Future returned from `NativeTlsAcceptor::accept` which will resolve +/// once the accept handshake has finished. +pub struct Accept{ + inner: Option, HandshakeError>>, +} + impl NativeTlsAcceptor { /// Create `NativeTlsAcceptor` instance pub fn new(acceptor: TlsAcceptor) -> Self { - NativeTlsAcceptor { acceptor } - } -} - -pub struct AcceptorFut(AcceptAsync); - -impl Future for AcceptorFut { - type Item = TlsStream; - type Error = io::Error; - - fn poll(&mut self) -> Poll { - self.0 - .poll() - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + NativeTlsAcceptor { acceptor: acceptor.into() } } } impl AcceptorService for NativeTlsAcceptor { type Accepted = TlsStream; - type Future = AcceptorFut; + type Future = Accept; fn scheme(&self) -> &'static str { "https" } fn accept(&self, io: Io) -> Self::Future { - AcceptorFut(TlsAcceptorExt::accept_async(&self.acceptor, io)) + Accept { inner: Some(self.acceptor.accept(io)) } } } @@ -65,3 +70,71 @@ impl IoStream for TlsStream { self.get_mut().get_mut().set_linger(dur) } } + +impl Future for Accept { + type Item = TlsStream; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + match self.inner.take().expect("cannot poll MidHandshake twice") { + Ok(stream) => Ok(TlsStream { inner: stream }.into()), + Err(HandshakeError::Failure(e)) => Err(io::Error::new(io::ErrorKind::Other, e)), + Err(HandshakeError::WouldBlock(s)) => { + match s.handshake() { + Ok(stream) => Ok(TlsStream { inner: stream }.into()), + Err(HandshakeError::Failure(e)) => + Err(io::Error::new(io::ErrorKind::Other, e)), + Err(HandshakeError::WouldBlock(s)) => { + self.inner = Some(Err(HandshakeError::WouldBlock(s))); + Ok(Async::NotReady) + } + } + } + } + } +} + +impl TlsStream { + /// Get access to the internal `native_tls::TlsStream` stream which also + /// transitively allows access to `S`. + pub fn get_ref(&self) -> &native_tls::TlsStream { + &self.inner + } + + /// Get mutable access to the internal `native_tls::TlsStream` stream which + /// also transitively allows mutable access to `S`. + pub fn get_mut(&mut self) -> &mut native_tls::TlsStream { + &mut self.inner + } +} + +impl io::Read for TlsStream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } +} + +impl io::Write for TlsStream { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + + +impl AsyncRead for TlsStream { +} + +impl AsyncWrite for TlsStream { + fn shutdown(&mut self) -> Poll<(), io::Error> { + match self.inner.shutdown() { + Ok(_) => (), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (), + Err(e) => return Err(e), + } + self.inner.get_mut().shutdown() + } +} \ No newline at end of file From 2ab7dbadce151bf9b3b7e24b6694ae8f52021b12 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 9 Aug 2018 13:38:10 -0700 Subject: [PATCH 054/219] better ergonomics for Server::service() method --- src/server/http.rs | 21 +++++++++++---------- src/server/server.rs | 9 +++++---- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/server/http.rs b/src/server/http.rs index 5deaf029b..edf8aef60 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -427,14 +427,19 @@ where } } -impl Into> for HttpServer { - fn into(self) -> Box { - Box::new(HttpService { +impl Into<(Box, Vec<(Token, net::TcpListener)>)> for HttpServer { + fn into(mut self) -> (Box, Vec<(Token, net::TcpListener)>) { + let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new()) + .into_iter() + .map(|item| (item.token, item.lst)) + .collect(); + + (Box::new(HttpService { factory: self.factory, host: self.host, keep_alive: self.keep_alive, handlers: self.handlers, - }) + }), sockets) } } @@ -500,7 +505,7 @@ impl HttpServer { /// sys.run(); // <- Run actix system, this method starts all async processes /// } /// ``` - pub fn start(mut self) -> Addr { + pub fn start(self) -> Addr { let mut srv = Server::new() .workers(self.threads) .maxconn(self.maxconn) @@ -514,11 +519,7 @@ impl HttpServer { srv }; - let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new()) - .into_iter() - .map(|item| (item.token, item.lst)) - .collect(); - srv.service(self, sockets).start() + srv.service(self).start() } /// Spawn new thread and start listening for incoming connections. diff --git a/src/server/server.rs b/src/server/server.rs index ff88040fe..bef1ed165 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -145,11 +145,12 @@ impl Server { } /// Add new service to server - pub fn service(mut self, srv: T, sockets: Vec<(Token, net::TcpListener)>) -> Self - where - T: Into> + pub fn service(mut self, srv: T) -> Self + where + T: Into<(Box, Vec<(Token, net::TcpListener)>)> { - self.services.push(srv.into()); + let (srv, sockets) = srv.into(); + self.services.push(srv); self.sockets.push(sockets); self } From 26629aafa589f8fc6a85a9bad1cf561664c36421 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 9 Aug 2018 13:41:13 -0700 Subject: [PATCH 055/219] explicit use --- src/server/server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server/server.rs b/src/server/server.rs index bef1ed165..9e25efc56 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -2,6 +2,7 @@ use std::{mem, net}; use std::time::Duration; use std::sync::{Arc, atomic::{AtomicUsize, Ordering}}; +use num_cpus; use futures::{Future, Stream, Sink}; use futures::sync::{mpsc, mpsc::unbounded}; From cc3fbd27e05723c5004ae302263ad78fc2a53d39 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 9 Aug 2018 17:25:23 -0700 Subject: [PATCH 056/219] better ergonomics --- src/handler.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/handler.rs b/src/handler.rs index 3ac0c2ab2..661cd0285 100644 --- a/src/handler.rs +++ b/src/handler.rs @@ -353,13 +353,16 @@ impl> From> for AsyncResult { } } -impl> From>, E>> - for AsyncResult +impl From>, E>> for AsyncResult +where T: 'static, + E: Into + 'static { #[inline] - fn from(res: Result>, E>) -> Self { + fn from(res: Result>, E>) -> Self { match res { - Ok(fut) => AsyncResult(Some(AsyncResultItem::Future(fut))), + Ok(fut) => AsyncResult( + Some(AsyncResultItem::Future( + Box::new(fut.map_err(|e| e.into()))))), Err(err) => AsyncResult(Some(AsyncResultItem::Err(err.into()))), } } From bf7779a9a35c5b49f56904b644a6d033c2e59928 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 9 Aug 2018 18:58:14 -0700 Subject: [PATCH 057/219] add TestRequest::run_async_result helper method --- src/test.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/test.rs b/src/test.rs index 92aa6c8d2..64aef6638 100644 --- a/src/test.rs +++ b/src/test.rs @@ -26,7 +26,7 @@ use application::{App, HttpApplication}; use body::Binary; use client::{ClientConnector, ClientRequest, ClientRequestBuilder}; use error::Error; -use handler::{AsyncResultItem, Handler, Responder}; +use handler::{AsyncResult, AsyncResultItem, Handler, Responder}; use header::{Header, IntoHeaderValue}; use httprequest::HttpRequest; use httpresponse::HttpResponse; @@ -722,6 +722,25 @@ impl TestRequest { } } + /// This method generates `HttpRequest` instance and executes handler + pub fn run_async_result(self, f: F) -> Result + where + F: FnOnce(&HttpRequest) -> R, + R: Into>, + { + let req = self.finish(); + let res = f(&req); + + match res.into().into() { + AsyncResultItem::Ok(resp) => Ok(resp), + AsyncResultItem::Err(err) => Err(err), + AsyncResultItem::Future(fut) => { + let mut sys = System::new("test"); + sys.block_on(fut) + } + } + } + /// This method generates `HttpRequest` instance and executes handler pub fn execute(self, f: F) -> Result where From d9c7cd96a6d9e1fef0f38b7410cf954cb3e6b38c Mon Sep 17 00:00:00 2001 From: Gowee Date: Mon, 13 Aug 2018 22:34:05 +0800 Subject: [PATCH 058/219] Rework Content-Disposition parsing totally (#461) --- src/fs.rs | 10 +- src/header/common/content_disposition.rs | 927 +++++++++++++++++++---- src/header/mod.rs | 6 +- src/multipart.rs | 4 +- 4 files changed, 791 insertions(+), 156 deletions(-) diff --git a/src/fs.rs b/src/fs.rs index f23ba12cd..4c8192126 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -164,11 +164,7 @@ impl NamedFile { let disposition_type = C::content_disposition_map(ct.type_()); let cd = ContentDisposition { disposition: disposition_type, - parameters: vec![DispositionParam::Filename( - header::Charset::Ext("UTF-8".to_owned()), - None, - filename.as_bytes().to_vec(), - )], + parameters: vec![DispositionParam::Filename(filename.into_owned())], }; (ct, cd) }; @@ -991,9 +987,7 @@ mod tests { let cd = ContentDisposition { disposition: DispositionType::Attachment, parameters: vec![DispositionParam::Filename( - header::Charset::Ext("UTF-8".to_owned()), - None, - "test.png".as_bytes().to_vec(), + String::from("test.png") )], }; let mut file = NamedFile::open("tests/test.png") diff --git a/src/header/common/content_disposition.rs b/src/header/common/content_disposition.rs index ff04ef565..686cf9c67 100644 --- a/src/header/common/content_disposition.rs +++ b/src/header/common/content_disposition.rs @@ -2,17 +2,35 @@ // // "The Content-Disposition Header Field" https://www.ietf.org/rfc/rfc2183.txt // "The Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)" https://www.ietf.org/rfc/rfc6266.txt -// "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc2388.txt +// "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc7578.txt // Browser conformance tests at: http://greenbytes.de/tech/tc2231/ // IANA assignment: http://www.iana.org/assignments/cont-disp/cont-disp.xhtml -use language_tags::LanguageTag; use header; +use header::ExtendedValue; use header::{Header, IntoHeaderValue, Writer}; -use header::shared::Charset; +use regex::Regex; use std::fmt::{self, Write}; +/// Split at the index of the first `needle` if it exists or at the end. +fn split_once<'a>(haystack: &'a str, needle: char) -> (&'a str, &'a str) { + haystack.find(needle).map_or_else( + || (haystack, ""), + |sc| { + let (first, last) = haystack.split_at(sc); + (first, last.split_at(1).1) + }, + ) +} + +/// Split at the index of the first `needle` if it exists or at the end, trim the right of the +/// first part and the left of the last part. +fn split_once_and_trim<'a>(haystack: &'a str, needle: char) -> (&'a str, &'a str) { + let (first, last) = split_once(haystack, needle); + (first.trim_right(), last.trim_left()) +} + /// The implied disposition of the content of the HTTP body. #[derive(Clone, Debug, PartialEq)] pub enum DispositionType { @@ -21,27 +39,166 @@ pub enum DispositionType { /// Attachment implies that the recipient should prompt the user to save the response locally, /// rather than process it normally (as per its media type). Attachment, - /// Extension type. Should be handled by recipients the same way as Attachment - Ext(String) + /// Used in *multipart/form-data* as defined in + /// [RFC7578](https://tools.ietf.org/html/rfc7578) to carry the field name and the file name. + FormData, + /// Extension type. Should be handled by recipients the same way as Attachment + Ext(String), } -/// A parameter to the disposition type. +impl<'a> From<&'a str> for DispositionType { + fn from(origin: &'a str) -> DispositionType { + if origin.eq_ignore_ascii_case("inline") { + DispositionType::Inline + } else if origin.eq_ignore_ascii_case("attachment") { + DispositionType::Attachment + } else if origin.eq_ignore_ascii_case("form-data") { + DispositionType::FormData + } else { + DispositionType::Ext(origin.to_owned()) + } + } +} + +/// Parameter in [`ContentDisposition`]. +/// +/// # Examples +/// ``` +/// use actix_web::http::header::DispositionParam; +/// +/// let param = DispositionParam::Filename(String::from("sample.txt")); +/// assert!(param.is_filename()); +/// assert_eq!(param.as_filename().unwrap(), "sample.txt"); +/// ``` #[derive(Clone, Debug, PartialEq)] pub enum DispositionParam { - /// A Filename consisting of a Charset, an optional LanguageTag, and finally a sequence of - /// bytes representing the filename - Filename(Charset, Option, Vec), - /// Extension type consisting of token and value. Recipients should ignore unrecognized - /// parameters. - Ext(String, String) + /// For [`DispositionType::FormData`] (i.e. *multipart/form-data*), the name of an field from + /// the form. + Name(String), + /// A plain file name. + Filename(String), + /// An extended file name. It must not exist for `ContentType::Formdata` according to + /// [RFC7578 Section 4.2](https://tools.ietf.org/html/rfc7578#section-4.2). + FilenameExt(ExtendedValue), + /// An unrecognized regular parameter as defined in + /// [RFC5987](https://tools.ietf.org/html/rfc5987) as *reg-parameter*, in + /// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should + /// ignore unrecognizable parameters. + Unknown(String, String), + /// An unrecognized extended paramater as defined in + /// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in + /// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single + /// trailling asterisk is not included. Recipients should ignore unrecognizable parameters. + UnknownExt(String, ExtendedValue), } -/// A `Content-Disposition` header, (re)defined in [RFC6266](https://tools.ietf.org/html/rfc6266). +impl DispositionParam { + /// Returns `true` if the paramater is [`Name`](DispositionParam::Name). + #[inline] + pub fn is_name(&self) -> bool { + self.as_name().is_some() + } + + /// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename). + #[inline] + pub fn is_filename(&self) -> bool { + self.as_filename().is_some() + } + + /// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt). + #[inline] + pub fn is_filename_ext(&self) -> bool { + self.as_filename_ext().is_some() + } + + /// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name` + #[inline] + /// matches. + pub fn is_unknown<'a, T: AsRef>(&self, name: T) -> bool { + self.as_unknown(name).is_some() + } + + /// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the + /// `name` matches. + #[inline] + pub fn is_unknown_ext<'a, T: AsRef>(&self, name: T) -> bool { + self.as_unknown_ext(name).is_some() + } + + /// Returns the name if applicable. + #[inline] + pub fn as_name<'a>(&'a self) -> Option<&'a str> { + match self { + DispositionParam::Name(ref name) => Some(name.as_str()), + _ => None, + } + } + + /// Returns the filename if applicable. + #[inline] + pub fn as_filename<'a>(&'a self) -> Option<&'a str> { + match self { + &DispositionParam::Filename(ref filename) => Some(filename.as_str()), + _ => None, + } + } + + /// Returns the filename* if applicable. + #[inline] + pub fn as_filename_ext<'a>(&'a self) -> Option<&'a ExtendedValue> { + match self { + &DispositionParam::FilenameExt(ref value) => Some(value), + _ => None, + } + } + + /// Returns the value of the unrecognized regular parameter if it is + /// [`Unknown`](DispositionParam::Unknown) and the `name` matches. + #[inline] + pub fn as_unknown<'a, T: AsRef>(&'a self, name: T) -> Option<&'a str> { + match self { + &DispositionParam::Unknown(ref ext_name, ref value) + if ext_name.eq_ignore_ascii_case(name.as_ref()) => + { + Some(value.as_str()) + } + _ => None, + } + } + + /// Returns the value of the unrecognized extended parameter if it is + /// [`Unknown`](DispositionParam::Unknown) and the `name` matches. + #[inline] + pub fn as_unknown_ext<'a, T: AsRef>( + &'a self, name: T, + ) -> Option<&'a ExtendedValue> { + match self { + &DispositionParam::UnknownExt(ref ext_name, ref value) + if ext_name.eq_ignore_ascii_case(name.as_ref()) => + { + Some(value) + } + _ => None, + } + } +} + +/// A *Content-Disposition* header. It is compatible to be used either as +/// [a response header for the main body](https://mdn.io/Content-Disposition#As_a_response_header_for_the_main_body) +/// as (re)defined in [RFC6266](https://tools.ietf.org/html/rfc6266), or as +/// [a header for a multipart body](https://mdn.io/Content-Disposition#As_a_header_for_a_multipart_body) +/// as (re)defined in [RFC7587](https://tools.ietf.org/html/rfc7578). /// -/// The Content-Disposition response header field is used to convey -/// additional information about how to process the response payload, and -/// also can be used to attach additional metadata, such as the filename -/// to use when saving the response payload locally. +/// In a regular HTTP response, the *Content-Disposition* response header is a header indicating if +/// the content is expected to be displayed *inline* in the browser, that is, as a Web page or as +/// part of a Web page, or as an attachment, that is downloaded and saved locally, and also can be +/// used to attach additional metadata, such as the filename to use when saving the response payload +/// locally. +/// +/// In a *multipart/form-data* body, the HTTP *Content-Disposition* general header is a header that +/// can be used on the subpart of a multipart body to give information about the field it applies to. +/// The subpart is delimited by the boundary defined in the *Content-Type* header. Used on the body +/// itself, *Content-Disposition* has no effect. /// /// # ABNF @@ -65,88 +222,219 @@ pub enum DispositionParam { /// ext-token = /// ``` /// +/// **Note**: filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within +/// *multipart/form-data*. +/// /// # Example /// /// ``` -/// use actix_web::http::header::{ContentDisposition, DispositionType, DispositionParam, Charset}; +/// use actix_web::http::header::{ +/// Charset, ContentDisposition, DispositionParam, DispositionType, +/// ExtendedValue, +/// }; /// /// let cd1 = ContentDisposition { /// disposition: DispositionType::Attachment, -/// parameters: vec![DispositionParam::Filename( -/// Charset::Iso_8859_1, // The character set for the bytes of the filename -/// None, // The optional language tag (see `language-tag` crate) -/// b"\xa9 Copyright 1989.txt".to_vec() // the actual bytes of the filename -/// )] +/// parameters: vec![DispositionParam::FilenameExt(ExtendedValue { +/// charset: Charset::Iso_8859_1, // The character set for the bytes of the filename +/// language_tag: None, // The optional language tag (see `language-tag` crate) +/// value: b"\xa9 Copyright 1989.txt".to_vec(), // the actual bytes of the filename +/// })], /// }; +/// assert!(cd1.is_attachment()); +/// assert!(cd1.get_filename_ext().is_some()); /// /// let cd2 = ContentDisposition { -/// disposition: DispositionType::Inline, -/// parameters: vec![DispositionParam::Filename( -/// Charset::Ext("UTF-8".to_owned()), -/// None, -/// "\u{2764}".as_bytes().to_vec() -/// )] +/// disposition: DispositionType::FormData, +/// parameters: vec![ +/// DispositionParam::Name(String::from("file")), +/// DispositionParam::Filename(String::from("bill.odt")), +/// ], /// }; +/// assert_eq!(cd2.get_name(), Some("file")); // field name +/// assert_eq!(cd2.get_filename(), Some("bill.odt")); /// ``` +/// +/// # WARN +/// If "filename" parameter is supplied, do not use the file name blindly, check and possibly +/// change to match local file system conventions if applicable, and do not use directory path +/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3) +/// . #[derive(Clone, Debug, PartialEq)] pub struct ContentDisposition { - /// The disposition + /// The disposition type pub disposition: DispositionType, /// Disposition parameters pub parameters: Vec, } + impl ContentDisposition { - /// Parse a raw Content-Disposition header value + /// Parse a raw Content-Disposition header value. pub fn from_raw(hv: &header::HeaderValue) -> Result { - header::from_one_raw_str(Some(hv)).and_then(|s: String| { - let mut sections = s.split(';'); - let disposition = match sections.next() { - Some(s) => s.trim(), - None => return Err(::error::ParseError::Header), - }; + // `header::from_one_raw_str` invokes `hv.to_str` which assumes `hv` contains only visible + // ASCII characters. So `hv.as_bytes` is necessary here. + let hv = String::from_utf8(hv.as_bytes().to_vec()) + .map_err(|_| ::error::ParseError::Header)?; + let (disp_type, mut left) = split_once_and_trim(hv.as_str().trim(), ';'); + if disp_type.len() == 0 { + return Err(::error::ParseError::Header); + } + let mut cd = ContentDisposition { + disposition: disp_type.into(), + parameters: Vec::new(), + }; - let mut cd = ContentDisposition { - disposition: if disposition.eq_ignore_ascii_case("inline") { - DispositionType::Inline - } else if disposition.eq_ignore_ascii_case("attachment") { - DispositionType::Attachment - } else { - DispositionType::Ext(disposition.to_owned()) - }, - parameters: Vec::new(), - }; - - for section in sections { - let mut parts = section.splitn(2, '='); - - let key = if let Some(key) = parts.next() { - key.trim() - } else { - return Err(::error::ParseError::Header); - }; - - let val = if let Some(val) = parts.next() { - val.trim() - } else { - return Err(::error::ParseError::Header); - }; - - cd.parameters.push( - if key.eq_ignore_ascii_case("filename") { - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), None, - val.trim_matches('"').as_bytes().to_owned()) - } else if key.eq_ignore_ascii_case("filename*") { - let extended_value = try!(header::parse_extended_value(val)); - DispositionParam::Filename(extended_value.charset, extended_value.language_tag, extended_value.value) - } else { - DispositionParam::Ext(key.to_owned(), val.trim_matches('"').to_owned()) - } - ); + while left.len() > 0 { + let (param_name, new_left) = split_once_and_trim(left, '='); + if param_name.len() == 0 || param_name == "*" || new_left.len() == 0 { + return Err(::error::ParseError::Header); } + left = new_left; + if param_name.ends_with('*') { + // extended parameters + let param_name = ¶m_name[..param_name.len() - 1]; // trim asterisk + let (ext_value, new_left) = split_once_and_trim(left, ';'); + left = new_left; + let ext_value = header::parse_extended_value(ext_value)?; - Ok(cd) - }) + let param = if param_name.eq_ignore_ascii_case("filename") { + DispositionParam::FilenameExt(ext_value) + } else { + DispositionParam::UnknownExt(param_name.to_owned(), ext_value) + }; + cd.parameters.push(param); + } else { + // regular parameters + let value = if left.starts_with('\"') { + // quoted-string: defined in RFC6266 -> RFC2616 Section 3.6 + let mut escaping = false; + let mut quoted_string = vec![]; + let mut end = None; + // search for closing quote + for (i, &c) in left.as_bytes().iter().skip(1).enumerate() { + if escaping { + escaping = false; + quoted_string.push(c); + } else { + if c == 0x5c + // backslash + { + escaping = true; + } else if c == 0x22 + // double quote + { + end = Some(i + 1); // cuz skipped 1 for the leading quote + break; + } else { + quoted_string.push(c); + } + } + } + left = &left[end.ok_or(::error::ParseError::Header)? + 1..]; + left = split_once(left, ';').1.trim_left(); + // In fact, it should not be Err if the above code is correct. + let quoted_string = String::from_utf8(quoted_string) + .map_err(|_| ::error::ParseError::Header)?; + quoted_string + } else { + // token: won't contains semicolon according to RFC 2616 Section 2.2 + let (token, new_left) = split_once_and_trim(left, ';'); + left = new_left; + token.to_owned() + }; + if value.len() == 0 { + return Err(::error::ParseError::Header); + } + + let param = if param_name.eq_ignore_ascii_case("name") { + DispositionParam::Name(value) + } else if param_name.eq_ignore_ascii_case("filename") { + DispositionParam::Filename(value) + } else { + DispositionParam::Unknown(param_name.to_owned(), value) + }; + cd.parameters.push(param); + } + } + + Ok(cd) + } + + /// Returns `true` if it is [`Inline`](DispositionType::Inline). + pub fn is_inline(&self) -> bool { + match self.disposition { + DispositionType::Inline => true, + _ => false, + } + } + + /// Returns `true` if it is [`Attachment`](DispositionType::Attachment). + pub fn is_attachment(&self) -> bool { + match self.disposition { + DispositionType::Attachment => true, + _ => false, + } + } + + /// Returns `true` if it is [`FormData`](DispositionType::FormData). + pub fn is_form_data(&self) -> bool { + match self.disposition { + DispositionType::FormData => true, + _ => false, + } + } + + /// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches. + pub fn is_ext>(&self, disp_type: T) -> bool { + match self.disposition { + DispositionType::Ext(ref t) + if t.eq_ignore_ascii_case(disp_type.as_ref()) => + { + true + } + _ => false, + } + } + + /// Return the value of *name* if exists. + pub fn get_name<'a>(&'a self) -> Option<&'a str> { + self.parameters.iter().filter_map(|p| p.as_name()).nth(0) + } + + /// Return the value of *filename* if exists. + pub fn get_filename<'a>(&'a self) -> Option<&'a str> { + self.parameters + .iter() + .filter_map(|p| p.as_filename()) + .nth(0) + } + + /// Return the value of *filename\** if exists. + pub fn get_filename_ext<'a>(&'a self) -> Option<&'a ExtendedValue> { + self.parameters + .iter() + .filter_map(|p| p.as_filename_ext()) + .nth(0) + } + + /// Return the value of the parameter which the `name` matches. + pub fn get_unknown<'a, T: AsRef>(&'a self, name: T) -> Option<&'a str> { + let name = name.as_ref(); + self.parameters + .iter() + .filter_map(|p| p.as_unknown(name)) + .nth(0) + } + + /// Return the value of the extended parameter which the `name` matches. + pub fn get_unknown_ext<'a, T: AsRef>( + &'a self, name: T, + ) -> Option<&'a ExtendedValue> { + let name = name.as_ref(); + self.parameters + .iter() + .filter_map(|p| p.as_unknown_ext(name)) + .nth(0) } } @@ -174,67 +462,76 @@ impl Header for ContentDisposition { } } -impl fmt::Display for ContentDisposition { +impl fmt::Display for DispositionType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.disposition { - DispositionType::Inline => try!(write!(f, "inline")), - DispositionType::Attachment => try!(write!(f, "attachment")), - DispositionType::Ext(ref s) => try!(write!(f, "{}", s)), + match self { + DispositionType::Inline => write!(f, "inline"), + DispositionType::Attachment => write!(f, "attachment"), + DispositionType::FormData => write!(f, "form-data"), + DispositionType::Ext(ref s) => write!(f, "{}", s), } - for param in &self.parameters { - match *param { - DispositionParam::Filename(ref charset, ref opt_lang, ref bytes) => { - let mut use_simple_format: bool = false; - if opt_lang.is_none() { - if let Charset::Ext(ref ext) = *charset { - if ext.eq_ignore_ascii_case("utf-8") { - use_simple_format = true; - } - } - } - if use_simple_format { - use std::str; - try!(write!(f, "; filename=\"{}\"", - match str::from_utf8(bytes) { - Ok(s) => s, - Err(_) => return Err(fmt::Error), - })); - } else { - try!(write!(f, "; filename*={}'", charset)); - if let Some(ref lang) = *opt_lang { - try!(write!(f, "{}", lang)); - }; - try!(write!(f, "'")); - try!(header::http_percent_encode(f, bytes)) - } - }, - DispositionParam::Ext(ref k, ref v) => try!(write!(f, "; {}=\"{}\"", k, v)), + } +} + +impl fmt::Display for DispositionParam { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // All ASCII control charaters (0-30, 127) excepting horizontal tab, double quote, and + // backslash should be escaped in quoted-string (i.e. "foobar"). + // Ref: RFC6266 S4.1 -> RFC2616 S2.2; RFC 7578 S4.2 -> RFC2183 S2 -> ... . + lazy_static! { + static ref RE: Regex = Regex::new("[\x01-\x08\x10\x1F\x7F\"\\\\]").unwrap(); + } + match self { + DispositionParam::Name(ref value) => write!(f, "name={}", value), + DispositionParam::Filename(ref value) => { + write!(f, "filename=\"{}\"", RE.replace_all(value, "\\$0").as_ref()) + } + DispositionParam::Unknown(ref name, ref value) => write!( + f, + "{}=\"{}\"", + name, + &RE.replace_all(value, "\\$0").as_ref() + ), + DispositionParam::FilenameExt(ref ext_value) => { + write!(f, "filename*={}", ext_value) + } + DispositionParam::UnknownExt(ref name, ref ext_value) => { + write!(f, "{}*={}", name, ext_value) } } - Ok(()) + } +} + +impl fmt::Display for ContentDisposition { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.disposition)?; + self.parameters + .iter() + .map(|param| write!(f, "; {}", param)) + .collect() } } #[cfg(test)] mod tests { - use super::{ContentDisposition,DispositionType,DispositionParam}; - use header::HeaderValue; + use super::{ContentDisposition, DispositionParam, DispositionType}; use header::shared::Charset; + use header::{ExtendedValue, HeaderValue}; #[test] - fn test_from_raw() { + fn test_from_raw_basic() { assert!(ContentDisposition::from_raw(&HeaderValue::from_static("")).is_err()); - let a = HeaderValue::from_static("form-data; dummy=3; name=upload; filename=\"sample.png\""); + let a = HeaderValue::from_static( + "form-data; dummy=3; name=upload; filename=\"sample.png\"", + ); let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); let b = ContentDisposition { - disposition: DispositionType::Ext("form-data".to_owned()), + disposition: DispositionType::FormData, parameters: vec![ - DispositionParam::Ext("dummy".to_owned(), "3".to_owned()), - DispositionParam::Ext("name".to_owned(), "upload".to_owned()), - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), - None, - "sample.png".bytes().collect()) ] + DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()), + DispositionParam::Name("upload".to_owned()), + DispositionParam::Filename("sample.png".to_owned()), + ], }; assert_eq!(a, b); @@ -242,44 +539,386 @@ mod tests { let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); let b = ContentDisposition { disposition: DispositionType::Attachment, - parameters: vec![ - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), - None, - "image.jpg".bytes().collect()) ] + parameters: vec![DispositionParam::Filename("image.jpg".to_owned())], }; assert_eq!(a, b); - let a = HeaderValue::from_static("attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates"); + let a = HeaderValue::from_static("inline; filename=image.jpg"); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Inline, + parameters: vec![DispositionParam::Filename("image.jpg".to_owned())], + }; + assert_eq!(a, b); + + let a = HeaderValue::from_static( + "attachment; creation-date=\"Wed, 12 Feb 1997 16:29:51 -0500\"", + ); let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); let b = ContentDisposition { disposition: DispositionType::Attachment, - parameters: vec![ - DispositionParam::Filename( - Charset::Ext("UTF-8".to_owned()), - None, - vec![0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, - 0xe2, 0x82, 0xac, 0x20, b'r', b'a', b't', b'e', b's']) ] + parameters: vec![DispositionParam::Unknown( + String::from("creation-date"), + "Wed, 12 Feb 1997 16:29:51 -0500".to_owned(), + )], }; assert_eq!(a, b); } #[test] - fn test_display() { - let as_string = "attachment; filename*=UTF-8'en'%C2%A3%20and%20%E2%82%AC%20rates"; + fn test_from_raw_extended() { + let a = HeaderValue::from_static( + "attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates", + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Attachment, + parameters: vec![DispositionParam::FilenameExt(ExtendedValue { + charset: Charset::Ext(String::from("UTF-8")), + language_tag: None, + value: vec![ + 0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, 0xe2, 0x82, 0xac, 0x20, + b'r', b'a', b't', b'e', b's', + ], + })], + }; + assert_eq!(a, b); + + let a = HeaderValue::from_static( + "attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates", + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Attachment, + parameters: vec![DispositionParam::FilenameExt(ExtendedValue { + charset: Charset::Ext(String::from("UTF-8")), + language_tag: None, + value: vec![ + 0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, 0xe2, 0x82, 0xac, 0x20, + b'r', b'a', b't', b'e', b's', + ], + })], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_extra_whitespace() { + let a = HeaderValue::from_static( + "form-data ; du-mmy= 3 ; name =upload ; filename = \"sample.png\" ; ", + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Unknown("du-mmy".to_owned(), "3".to_owned()), + DispositionParam::Name("upload".to_owned()), + DispositionParam::Filename("sample.png".to_owned()), + ], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_unordered() { + let a = HeaderValue::from_static( + "form-data; dummy=3; filename=\"sample.png\" ; name=upload;", + // Actually, a trailling semolocon is not compliant. But it is fine to accept. + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()), + DispositionParam::Filename("sample.png".to_owned()), + DispositionParam::Name("upload".to_owned()), + ], + }; + assert_eq!(a, b); + + let a = HeaderValue::from_str( + "attachment; filename*=iso-8859-1''foo-%E4.html; filename=\"foo-ä.html\"", + ).unwrap(); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Attachment, + parameters: vec![ + DispositionParam::FilenameExt(ExtendedValue { + charset: Charset::Iso_8859_1, + language_tag: None, + value: b"foo-\xe4.html".to_vec(), + }), + DispositionParam::Filename("foo-ä.html".to_owned()), + ], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_only_disp() { + let a = ContentDisposition::from_raw(&HeaderValue::from_static("attachment")) + .unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Attachment, + parameters: vec![], + }; + assert_eq!(a, b); + + let a = + ContentDisposition::from_raw(&HeaderValue::from_static("inline ;")).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Inline, + parameters: vec![], + }; + assert_eq!(a, b); + + let a = ContentDisposition::from_raw(&HeaderValue::from_static( + "unknown-disp-param", + )).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Ext(String::from("unknown-disp-param")), + parameters: vec![], + }; + assert_eq!(a, b); + } + + #[test] + fn from_raw_with_mixed_case() { + let a = HeaderValue::from_str( + "InLInE; fIlenAME*=iso-8859-1''foo-%E4.html; filEName=\"foo-ä.html\"", + ).unwrap(); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::Inline, + parameters: vec![ + DispositionParam::FilenameExt(ExtendedValue { + charset: Charset::Iso_8859_1, + language_tag: None, + value: b"foo-\xe4.html".to_vec(), + }), + DispositionParam::Filename("foo-ä.html".to_owned()), + ], + }; + assert_eq!(a, b); + } + + #[test] + fn from_raw_with_unicode() { + /* RFC7578 Section 4.2: + Some commonly deployed systems use multipart/form-data with file names directly encoded + including octets outside the US-ASCII range. The encoding used for the file names is + typically UTF-8, although HTML forms will use the charset associated with the form. + + Mainstream browsers like Firefox (gecko) and Chrome use UTF-8 directly as above. + (And now, only UTF-8 is handled by this implementation.) + */ + let a = + HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"") + .unwrap(); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Name(String::from("upload")), + DispositionParam::Filename(String::from("文件.webp")), + ], + }; + assert_eq!(a, b); + + let a = + HeaderValue::from_str("form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"").unwrap(); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Name(String::from("upload")), + DispositionParam::Filename(String::from( + "余固知謇謇之為患兮,忍而不能舍也.pptx", + )), + ], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_escape() { + let a = HeaderValue::from_static( + "form-data; dummy=3; name=upload; filename=\"s\\amp\\\"le.png\"", + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()), + DispositionParam::Name("upload".to_owned()), + DispositionParam::Filename( + ['s', 'a', 'm', 'p', '\"', 'l', 'e', '.', 'p', 'n', 'g'] + .iter() + .collect(), + ), + ], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_semicolon() { + let a = + HeaderValue::from_static("form-data; filename=\"A semicolon here;.pdf\""); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![DispositionParam::Filename(String::from( + "A semicolon here;.pdf", + ))], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_uncessary_percent_decode() { + let a = HeaderValue::from_static( + "form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"", // Should not be decoded! + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Name("photo".to_owned()), + DispositionParam::Filename(String::from("%74%65%73%74%2e%70%6e%67")), + ], + }; + assert_eq!(a, b); + + let a = HeaderValue::from_static( + "form-data; name=photo; filename=\"%74%65%73%74.png\"", + ); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let b = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Name("photo".to_owned()), + DispositionParam::Filename(String::from("%74%65%73%74.png")), + ], + }; + assert_eq!(a, b); + } + + #[test] + fn test_from_raw_param_value_missing() { + let a = HeaderValue::from_static("form-data; name=upload ; filename="); + assert!(ContentDisposition::from_raw(&a).is_err()); + + let a = HeaderValue::from_static("attachment; dummy=; filename=invoice.pdf"); + assert!(ContentDisposition::from_raw(&a).is_err()); + + let a = HeaderValue::from_static("inline; filename= "); + assert!(ContentDisposition::from_raw(&a).is_err()); + } + + #[test] + fn test_from_raw_param_name_missing() { + let a = HeaderValue::from_static("inline; =\"test.txt\""); + assert!(ContentDisposition::from_raw(&a).is_err()); + + let a = HeaderValue::from_static("inline; =diary.odt"); + assert!(ContentDisposition::from_raw(&a).is_err()); + + let a = HeaderValue::from_static("inline; ="); + assert!(ContentDisposition::from_raw(&a).is_err()); + } + + #[test] + fn test_display_extended() { + let as_string = + "attachment; filename*=UTF-8'en'%C2%A3%20and%20%E2%82%AC%20rates"; let a = HeaderValue::from_static(as_string); let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); - let display_rendered = format!("{}",a); + let display_rendered = format!("{}", a); assert_eq!(as_string, display_rendered); - let a = HeaderValue::from_static("attachment; filename*=UTF-8''black%20and%20white.csv"); - let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); - let display_rendered = format!("{}",a); - assert_eq!("attachment; filename=\"black and white.csv\"".to_owned(), display_rendered); - let a = HeaderValue::from_static("attachment; filename=colourful.csv"); let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); - let display_rendered = format!("{}",a); - assert_eq!("attachment; filename=\"colourful.csv\"".to_owned(), display_rendered); + let display_rendered = format!("{}", a); + assert_eq!( + "attachment; filename=\"colourful.csv\"".to_owned(), + display_rendered + ); + } + + #[test] + fn test_display_quote() { + let as_string = "form-data; name=upload; filename=\"Quote\\\"here.png\""; + as_string + .find(['\\', '\"'].iter().collect::().as_str()) + .unwrap(); // ensure `\"` is there + let a = HeaderValue::from_static(as_string); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let display_rendered = format!("{}", a); + assert_eq!(as_string, display_rendered); + } + + #[test] + fn test_display_space_tab() { + let as_string = "form-data; name=upload; filename=\"Space here.png\""; + let a = HeaderValue::from_static(as_string); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let display_rendered = format!("{}", a); + assert_eq!(as_string, display_rendered); + + let a: ContentDisposition = ContentDisposition { + disposition: DispositionType::Inline, + parameters: vec![DispositionParam::Filename(String::from("Tab\there.png"))], + }; + let display_rendered = format!("{}", a); + assert_eq!("inline; filename=\"Tab\x09here.png\"", display_rendered); + } + + #[test] + fn test_display_control_characters() { + /* let a = "attachment; filename=\"carriage\rreturn.png\""; + let a = HeaderValue::from_static(a); + let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap(); + let display_rendered = format!("{}", a); + assert_eq!( + "attachment; filename=\"carriage\\\rreturn.png\"", + display_rendered + );*/ + // No way to create a HeaderValue containing a carriage return. + + let a: ContentDisposition = ContentDisposition { + disposition: DispositionType::Inline, + parameters: vec![DispositionParam::Filename(String::from("bell\x07.png"))], + }; + let display_rendered = format!("{}", a); + assert_eq!("inline; filename=\"bell\\\x07.png\"", display_rendered); + } + + #[test] + fn test_param_methods() { + let param = DispositionParam::Filename(String::from("sample.txt")); + assert!(param.is_filename()); + assert_eq!(param.as_filename().unwrap(), "sample.txt"); + + let param = DispositionParam::Unknown(String::from("foo"), String::from("bar")); + assert!(param.is_unknown("foo")); + assert_eq!(param.as_unknown("fOo"), Some("bar")); + } + + #[test] + fn test_disposition_methods() { + let cd = ContentDisposition { + disposition: DispositionType::FormData, + parameters: vec![ + DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()), + DispositionParam::Name("upload".to_owned()), + DispositionParam::Filename("sample.png".to_owned()), + ], + }; + assert_eq!(cd.get_name(), Some("upload")); + assert_eq!(cd.get_unknown("dummy"), Some("3")); + assert_eq!(cd.get_filename(), Some("sample.png")); + assert_eq!(cd.get_unknown_ext("dummy"), None); + assert_eq!(cd.get_unknown("duMMy"), Some("3")); } } diff --git a/src/header/mod.rs b/src/header/mod.rs index 291bc6eac..cdd2ad200 100644 --- a/src/header/mod.rs +++ b/src/header/mod.rs @@ -263,8 +263,10 @@ where // From hyper v0.11.27 src/header/parsing.rs -/// An extended header parameter value (i.e., tagged with a character set and optionally, -/// a language), as defined in [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2). +/// The value part of an extended parameter consisting of three parts: +/// the REQUIRED character set name (`charset`), the OPTIONAL language information (`language_tag`), +/// and a character sequence representing the actual value (`value`), separated by single quote +/// characters. It is defined in [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2). #[derive(Clone, Debug, PartialEq)] pub struct ExtendedValue { /// The character set that is used to encode the `value` to a string. diff --git a/src/multipart.rs b/src/multipart.rs index d4b6059f2..dbf3d179e 100644 --- a/src/multipart.rs +++ b/src/multipart.rs @@ -758,11 +758,11 @@ mod tests { let cd = field.content_disposition().unwrap(); assert_eq!( cd.disposition, - DispositionType::Ext("form-data".into()) + DispositionType::FormData ); assert_eq!( cd.parameters[0], - DispositionParam::Ext("name".into(), "file".into()) + DispositionParam::Name("file".into()) ); } assert_eq!(field.content_type().type_(), mime::TEXT); From 9f5641c85b5c44eec0b4b9ab3e5c29c8e65c8682 Mon Sep 17 00:00:00 2001 From: Douman Date: Mon, 13 Aug 2018 17:37:00 +0300 Subject: [PATCH 059/219] Add mention of reworked Content-Disposition --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 3dbb3795f..ac302ed0a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,6 +14,8 @@ * native-tls - 0.2 +* `Content-Disposition` is re-worked. Its parser is now more robust and handles quoted content better. See #461 + ### Fixed * Use zlib instead of raw deflate for decoding and encoding payloads with From 248bd388cadf30003e77b9167a8751653cde08ad Mon Sep 17 00:00:00 2001 From: Douman Date: Thu, 16 Aug 2018 16:11:15 +0300 Subject: [PATCH 060/219] Improve HTTP server docs (#470) --- src/lib.rs | 3 +- src/pipeline.rs | 4 +- src/server/http.rs | 4 ++ src/server/mod.rs | 112 ++++++++++++++++++++++++++++++++++++++++++- src/server/server.rs | 28 ++++++----- 5 files changed, 136 insertions(+), 15 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ed02b1b69..c6d3453a2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -251,7 +251,8 @@ pub mod dev { pub use context::Drain; pub use extractor::{FormConfig, PayloadConfig}; pub use handler::{AsyncResult, Handler}; - pub use httpmessage::{MessageBody, UrlEncoded}; + pub use httpmessage::{MessageBody, Readlines, UrlEncoded}; + pub use pipeline::Pipeline; pub use httpresponse::HttpResponseBuilder; pub use info::ConnectionInfo; pub use json::{JsonBody, JsonConfig}; diff --git a/src/pipeline.rs b/src/pipeline.rs index 09c5e49d2..7f206a9fd 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -83,7 +83,7 @@ impl PipelineInfo { } impl> Pipeline { - pub fn new( + pub(crate) fn new( req: HttpRequest, mws: Rc>>>, handler: Rc, ) -> Pipeline { let mut info = PipelineInfo { @@ -475,7 +475,7 @@ impl ProcessResponse { } } } - Ok(Async::Ready(None)) => + Ok(Async::Ready(None)) => return Some(FinishingMiddlewares::init( info, mws, self.resp.take().unwrap(), )), diff --git a/src/server/http.rs b/src/server/http.rs index edf8aef60..e3740d955 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -31,6 +31,10 @@ use super::{ }; /// An HTTP Server +/// +/// By default it serves HTTP2 when HTTPs is enabled, +/// in order to change it, use `ServerFlags` that can be provided +/// to acceptor service. pub struct HttpServer where H: IntoHttpHandler + 'static, diff --git a/src/server/mod.rs b/src/server/mod.rs index 67952e433..f33a345e5 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -1,4 +1,111 @@ -//! Http server +//! Http server module +//! +//! The module contains everything necessary to setup +//! HTTP server. +//! +//! In order to start HTTP server, first you need to create and configure it +//! using factory that can be supplied to [new](fn.new.html). +//! +//! ## Factory +//! +//! Factory is a function that returns Application, describing how +//! to serve incoming HTTP requests. +//! +//! As the server uses worker pool, the factory function is restricted to trait bounds +//! `Sync + Send + 'static` so that each worker would be able to accept Application +//! without a need for synchronization. +//! +//! If you wish to share part of state among all workers you should +//! wrap it in `Arc` and potentially synchronization primitive like +//! [RwLock](https://doc.rust-lang.org/std/sync/struct.RwLock.html) +//! If the wrapped type is not thread safe. +//! +//! Note though that locking is not advisable for asynchronous programming +//! and you should minimize all locks in your request handlers +//! +//! ## HTTPS Support +//! +//! Actix-web provides support for major crates that provides TLS. +//! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html) +//! that describes how HTTP Server accepts connections. +//! +//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts +//! these services. +//! +//! By default, acceptor would work with both HTTP2 and HTTP1 protocols. +//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which +//! can be supplied when creating `AcceptorService`. +//! +//! **NOTE:** `native-tls` doesn't support `HTTP2` yet +//! +//! ## Signal handling and shutdown +//! +//! By default HTTP Server listens for system signals +//! and, gracefully shuts down at most after 30 seconds. +//! +//! Both signal handling and shutdown timeout can be controlled +//! using corresponding methods. +//! +//! If worker, for some reason, unable to shut down within timeout +//! it is forcibly dropped. +//! +//! ## Example +//! +//! ```rust,ignore +//!extern crate actix; +//!extern crate actix_web; +//!extern crate rustls; +//! +//!use actix_web::{http, middleware, server, App, Error, HttpRequest, HttpResponse, Responder}; +//!use std::io::BufReader; +//!use rustls::internal::pemfile::{certs, rsa_private_keys}; +//!use rustls::{NoClientAuth, ServerConfig}; +//! +//!fn index(req: &HttpRequest) -> Result { +//! Ok(HttpResponse::Ok().content_type("text/plain").body("Welcome!")) +//!} +//! +//!fn load_ssl() -> ServerConfig { +//! use std::io::BufReader; +//! +//! const CERT: &'static [u8] = include_bytes!("../cert.pem"); +//! const KEY: &'static [u8] = include_bytes!("../key.pem"); +//! +//! let mut cert = BufReader::new(CERT); +//! let mut key = BufReader::new(KEY); +//! +//! let mut config = ServerConfig::new(NoClientAuth::new()); +//! let cert_chain = certs(&mut cert).unwrap(); +//! let mut keys = rsa_private_keys(&mut key).unwrap(); +//! config.set_single_cert(cert_chain, keys.remove(0)).unwrap(); +//! +//! config +//!} +//! +//!fn main() { +//! let sys = actix::System::new("http-server"); +//! // load ssl keys +//! let config = load_ssl(); +//! +//! // Create acceptor service for only HTTP1 protocol +//! // You can use ::new(config) to leave defaults +//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1); +//! +//! // create and start server at once +//! server::new(|| { +//! App::new() +//! // register simple handler, handle all methods +//! .resource("/index.html", |r| r.f(index)) +//! })) +//! }).bind_with("127.0.0.1:8080", acceptor) +//! .unwrap() +//! .start(); +//! +//! println!("Started http server: 127.0.0.1:8080"); +//! //Run system so that server would start accepting connections +//! let _ = sys.run(); +//!} +//! ``` use std::net::Shutdown; use std::rc::Rc; use std::{io, net, time}; @@ -83,8 +190,11 @@ where } bitflags! { + ///Flags that can be used to configure HTTP Server. pub struct ServerFlags: u8 { + ///Use HTTP1 protocol const HTTP1 = 0b0000_0001; + ///Use HTTP2 protocol const HTTP2 = 0b0000_0010; } } diff --git a/src/server/server.rs b/src/server/server.rs index 9e25efc56..552ba8ee2 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -13,6 +13,8 @@ use super::accept::{AcceptLoop, AcceptNotify, Command}; use super::worker::{StopWorker, Worker, WorkerClient, Conn}; use super::{PauseServer, ResumeServer, StopServer, Token}; +///Describes service that could be used +///with [Server](struct.Server.html) pub trait Service: Send + 'static { /// Clone service fn clone(&self) -> Box; @@ -31,6 +33,8 @@ impl Service for Box { } } +///Describes the way serivce handles incoming +///TCP connections. pub trait ServiceHandler { /// Handle incoming stream fn handle(&mut self, token: Token, io: net::TcpStream, peer: Option); @@ -43,6 +47,7 @@ pub(crate) enum ServerCommand { WorkerDied(usize), } +///Server pub struct Server { threads: usize, workers: Vec<(usize, Addr)>, @@ -80,7 +85,7 @@ impl Server { maxconnrate: 256, } } - + /// Set number of workers to start. /// /// By default http server uses number of available logical cpu as threads @@ -108,7 +113,7 @@ impl Server { /// /// By default max connections is set to a 256. pub fn maxconnrate(mut self, num: usize) -> Self { - self.maxconnrate= num; + self.maxconnrate = num; self } @@ -146,7 +151,7 @@ impl Server { } /// Add new service to server - pub fn service(mut self, srv: T) -> Self + pub fn service(mut self, srv: T) -> Self where T: Into<(Box, Vec<(Token, net::TcpListener)>)> { @@ -171,7 +176,7 @@ impl Server { /// /// fn main() { /// Server::new(). - /// .service( + /// .service( /// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok()))) /// .bind("127.0.0.1:0") /// .expect("Can not bind to 127.0.0.1:0")) @@ -184,7 +189,7 @@ impl Server { sys.run(); } - /// Start + /// Starts Server Actor and returns its address pub fn start(mut self) -> Addr { if self.sockets.is_empty() { panic!("Service should have at least one bound socket"); @@ -393,7 +398,8 @@ impl StreamHandler for Server { } #[derive(Clone, Default)] -pub struct Connections (Arc); +///Contains information about connection. +pub struct Connections(Arc); impl Connections { fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self { @@ -458,7 +464,7 @@ impl ConnectionsInner { self.notify.notify(); } } - + fn notify_maxconnrate(&self, connrate: usize) { if connrate > self.maxconnrate_low && connrate <= self.maxconnrate { self.notify.notify(); @@ -468,8 +474,8 @@ impl ConnectionsInner { } /// Type responsible for max connection stat. -/// -/// Max connections stat get updated on drop. +/// +/// Max connections stat get updated on drop. pub struct ConnectionTag(Arc); impl ConnectionTag { @@ -487,8 +493,8 @@ impl Drop for ConnectionTag { } /// Type responsible for max connection rate stat. -/// -/// Max connections rate stat get updated on drop. +/// +/// Max connections rate stat get updated on drop. pub struct ConnectionRateTag (Arc); impl ConnectionRateTag { From eb1e9a785f72e9702a773395dfff8e437ab74635 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 16 Aug 2018 20:29:06 -0700 Subject: [PATCH 061/219] allow to use fn with multiple arguments with .with()/.with_async() --- CHANGES.md | 4 +- MIGRATION.md | 6 ++ src/application.rs | 3 +- src/extractor.rs | 4 +- src/json.rs | 2 +- src/resource.rs | 3 +- src/route.rs | 27 +++---- src/router.rs | 3 +- src/scope.rs | 3 +- src/with.rs | 156 +++++++++++++++++++++++++++++++++-------- tests/test_handlers.rs | 4 +- tests/test_server.rs | 9 +-- 12 files changed, 169 insertions(+), 55 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ac302ed0a..e73b929aa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.4] - 2018-08-xx +## [0.8.0] - 2018-08-xx ### Added @@ -12,6 +12,8 @@ ### Changed +* It is allowed to use function with up to 10 parameters for handler with `extractor parameters`. + * native-tls - 0.2 * `Content-Disposition` is re-worked. Its parser is now more robust and handles quoted content better. See #461 diff --git a/MIGRATION.md b/MIGRATION.md index 29bf0c348..910e99a4a 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,3 +1,9 @@ +## 0.8 + +* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple + even for handler with one parameter. + + ## 0.7 * `HttpRequest` does not implement `Stream` anymore. If you need to read request payload diff --git a/src/application.rs b/src/application.rs index 6885185f2..4c8946c4e 100644 --- a/src/application.rs +++ b/src/application.rs @@ -12,6 +12,7 @@ use resource::Resource; use router::{ResourceDef, Router}; use scope::Scope; use server::{HttpHandler, HttpHandlerTask, IntoHttpHandler, Request}; +use with::WithFactory; /// Application pub struct HttpApplication { @@ -249,7 +250,7 @@ where /// ``` pub fn route(mut self, path: &str, method: Method, f: F) -> App where - F: Fn(T) -> R + 'static, + F: WithFactory, R: Responder + 'static, T: FromRequest + 'static, { diff --git a/src/extractor.rs b/src/extractor.rs index 233ad6ce5..6d156d47a 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -332,7 +332,7 @@ impl fmt::Display for Form { /// |r| { /// r.method(http::Method::GET) /// // register form handler and change form extractor configuration -/// .with_config(index, |cfg| {cfg.limit(4096);}) +/// .with_config(index, |cfg| {cfg.0.limit(4096);}) /// }, /// ); /// } @@ -427,7 +427,7 @@ impl FromRequest for Bytes { /// let app = App::new().resource("/index.html", |r| { /// r.method(http::Method::GET) /// .with_config(index, |cfg| { // <- register handler with extractor params -/// cfg.limit(4096); // <- limit size of the payload +/// cfg.0.limit(4096); // <- limit size of the payload /// }) /// }); /// } diff --git a/src/json.rs b/src/json.rs index c76aeaa7d..86eefca96 100644 --- a/src/json.rs +++ b/src/json.rs @@ -172,7 +172,7 @@ where /// let app = App::new().resource("/index.html", |r| { /// r.method(http::Method::POST) /// .with_config(index, |cfg| { -/// cfg.limit(4096) // <- change json extractor configuration +/// cfg.0.limit(4096) // <- change json extractor configuration /// .error_handler(|err, req| { // <- create custom error response /// error::InternalError::from_response( /// err, HttpResponse::Conflict().finish()).into() diff --git a/src/resource.rs b/src/resource.rs index 1bf8d88fa..d884dd447 100644 --- a/src/resource.rs +++ b/src/resource.rs @@ -13,6 +13,7 @@ use middleware::Middleware; use pred; use route::Route; use router::ResourceDef; +use with::WithFactory; #[derive(Copy, Clone)] pub(crate) struct RouteId(usize); @@ -217,7 +218,7 @@ impl Resource { /// ``` pub fn with(&mut self, handler: F) where - F: Fn(T) -> R + 'static, + F: WithFactory, R: Responder + 'static, T: FromRequest + 'static, { diff --git a/src/route.rs b/src/route.rs index d383d90be..e2635aa65 100644 --- a/src/route.rs +++ b/src/route.rs @@ -16,7 +16,7 @@ use middleware::{ Started as MiddlewareStarted, }; use pred::Predicate; -use with::{With, WithAsync}; +use with::{WithAsyncFactory, WithFactory}; /// Resource route definition /// @@ -166,15 +166,15 @@ impl Route { /// ``` pub fn with(&mut self, handler: F) where - F: Fn(T) -> R + 'static, + F: WithFactory + 'static, R: Responder + 'static, T: FromRequest + 'static, { - self.h(With::new(handler, ::default())); + self.h(handler.create()); } /// Set handler function. Same as `.with()` but it allows to configure - /// extractor. + /// extractor. Configuration closure accepts config objects as tuple. /// /// ```rust /// # extern crate bytes; @@ -192,21 +192,21 @@ impl Route { /// let app = App::new().resource("/index.html", |r| { /// r.method(http::Method::GET) /// .with_config(index, |cfg| { // <- register handler - /// cfg.limit(4096); // <- limit size of the payload + /// cfg.0.limit(4096); // <- limit size of the payload /// }) /// }); /// } /// ``` pub fn with_config(&mut self, handler: F, cfg_f: C) where - F: Fn(T) -> R + 'static, + F: WithFactory, R: Responder + 'static, T: FromRequest + 'static, C: FnOnce(&mut T::Config), { let mut cfg = ::default(); cfg_f(&mut cfg); - self.h(With::new(handler, cfg)); + self.h(handler.create_with_config(cfg)); } /// Set async handler function, use request extractor for parameters. @@ -240,17 +240,18 @@ impl Route { /// ``` pub fn with_async(&mut self, handler: F) where - F: Fn(T) -> R + 'static, + F: WithAsyncFactory, R: Future + 'static, I: Responder + 'static, E: Into + 'static, T: FromRequest + 'static, { - self.h(WithAsync::new(handler, ::default())); + self.h(handler.create()); } /// Set async handler function, use request extractor for parameters. - /// This method allows to configure extractor. + /// This method allows to configure extractor. Configuration closure + /// accepts config objects as tuple. /// /// ```rust /// # extern crate bytes; @@ -275,14 +276,14 @@ impl Route { /// "/{username}/index.html", // <- define path parameters /// |r| r.method(http::Method::GET) /// .with_async_config(index, |cfg| { - /// cfg.limit(4096); + /// cfg.0.limit(4096); /// }), /// ); // <- use `with` extractor /// } /// ``` pub fn with_async_config(&mut self, handler: F, cfg: C) where - F: Fn(T) -> R + 'static, + F: WithAsyncFactory, R: Future + 'static, I: Responder + 'static, E: Into + 'static, @@ -291,7 +292,7 @@ impl Route { { let mut extractor_cfg = ::default(); cfg(&mut extractor_cfg); - self.h(WithAsync::new(handler, extractor_cfg)); + self.h(handler.create_with_config(extractor_cfg)); } } diff --git a/src/router.rs b/src/router.rs index ff52eac5f..6dc6224ac 100644 --- a/src/router.rs +++ b/src/router.rs @@ -17,6 +17,7 @@ use pred::Predicate; use resource::{DefaultResource, Resource}; use scope::Scope; use server::Request; +use with::WithFactory; #[derive(Debug, Copy, Clone, PartialEq)] pub(crate) enum ResourceId { @@ -398,7 +399,7 @@ impl Router { pub(crate) fn register_route(&mut self, path: &str, method: Method, f: F) where - F: Fn(T) -> R + 'static, + F: WithFactory, R: Responder + 'static, T: FromRequest + 'static, { diff --git a/src/scope.rs b/src/scope.rs index d8a0a81ad..baf891c36 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -17,6 +17,7 @@ use pred::Predicate; use resource::{DefaultResource, Resource}; use router::{ResourceDef, Router}; use server::Request; +use with::WithFactory; /// Resources scope /// @@ -222,7 +223,7 @@ impl Scope { /// ``` pub fn route(mut self, path: &str, method: Method, f: F) -> Scope where - F: Fn(T) -> R + 'static, + F: WithFactory, R: Responder + 'static, T: FromRequest + 'static, { diff --git a/src/with.rs b/src/with.rs index 0af626c8b..caffe0acb 100644 --- a/src/with.rs +++ b/src/with.rs @@ -7,24 +7,74 @@ use handler::{AsyncResult, AsyncResultItem, FromRequest, Handler, Responder}; use httprequest::HttpRequest; use httpresponse::HttpResponse; -pub(crate) struct With +trait FnWith: 'static { + fn call_with(self: &Self, T) -> R; +} + +impl R + 'static> FnWith for F { + #[cfg_attr(feature = "cargo-clippy", allow(boxed_local))] + fn call_with(self: &Self, arg: T) -> R { + (*self)(arg) + } +} + +#[doc(hidden)] +pub trait WithFactory: 'static +where T: FromRequest, + R: Responder, +{ + fn create(self) -> With; + + fn create_with_config(self, T::Config) -> With; +} + +#[doc(hidden)] +pub trait WithAsyncFactory: 'static +where T: FromRequest, + R: Future, + I: Responder, + E: Into, +{ + fn create(self) -> WithAsync; + + fn create_with_config(self, T::Config) -> WithAsync; +} + +// impl WithFactory<(T1, T2, T3), S, R> for F +// where F: Fn(T1, T2, T3) -> R + 'static, +// T1: FromRequest + 'static, +// T2: FromRequest + 'static, +// T3: FromRequest + 'static, +// R: Responder + 'static, +// S: 'static, +// { +// fn create(self) -> With<(T1, T2, T3), S, R> { +// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), ( +// T1::Config::default(), T2::Config::default(), T3::Config::default())) +// } + +// fn create_with_config(self, cfg: (T1::Config, T2::Config, T3::Config,)) -> With<(T1, T2, T3), S, R> { +// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), cfg) +// } +// } + +#[doc(hidden)] +pub struct With where - F: Fn(T) -> R, T: FromRequest, S: 'static, { - hnd: Rc, + hnd: Rc>, cfg: Rc, _s: PhantomData, } -impl With +impl With where - F: Fn(T) -> R, T: FromRequest, S: 'static, { - pub fn new(f: F, cfg: T::Config) -> Self { + pub fn new R + 'static>(f: F, cfg: T::Config) -> Self { With { cfg: Rc::new(cfg), hnd: Rc::new(f), @@ -33,9 +83,8 @@ where } } -impl Handler for With +impl Handler for With where - F: Fn(T) -> R + 'static, R: Responder + 'static, T: FromRequest + 'static, S: 'static, @@ -60,24 +109,22 @@ where } } -struct WithHandlerFut +struct WithHandlerFut where - F: Fn(T) -> R, R: Responder, T: FromRequest + 'static, S: 'static, { started: bool, - hnd: Rc, + hnd: Rc>, cfg: Rc, req: HttpRequest, fut1: Option>>, fut2: Option>>, } -impl Future for WithHandlerFut +impl Future for WithHandlerFut where - F: Fn(T) -> R, R: Responder + 'static, T: FromRequest + 'static, S: 'static, @@ -108,7 +155,7 @@ where } }; - let item = match (*self.hnd)(item).respond_to(&self.req) { + let item = match self.hnd.as_ref().call_with(item).respond_to(&self.req) { Ok(item) => item.into(), Err(e) => return Err(e.into()), }; @@ -124,30 +171,29 @@ where } } -pub(crate) struct WithAsync +#[doc(hidden)] +pub struct WithAsync where - F: Fn(T) -> R, R: Future, I: Responder, E: Into, T: FromRequest, S: 'static, { - hnd: Rc, + hnd: Rc>, cfg: Rc, _s: PhantomData, } -impl WithAsync +impl WithAsync where - F: Fn(T) -> R, R: Future, I: Responder, E: Into, T: FromRequest, S: 'static, { - pub fn new(f: F, cfg: T::Config) -> Self { + pub fn new R + 'static>(f: F, cfg: T::Config) -> Self { WithAsync { cfg: Rc::new(cfg), hnd: Rc::new(f), @@ -156,9 +202,8 @@ where } } -impl Handler for WithAsync +impl Handler for WithAsync where - F: Fn(T) -> R + 'static, R: Future + 'static, I: Responder + 'static, E: Into + 'static, @@ -186,9 +231,8 @@ where } } -struct WithAsyncHandlerFut +struct WithAsyncHandlerFut where - F: Fn(T) -> R, R: Future + 'static, I: Responder + 'static, E: Into + 'static, @@ -196,7 +240,7 @@ where S: 'static, { started: bool, - hnd: Rc, + hnd: Rc>, cfg: Rc, req: HttpRequest, fut1: Option>>, @@ -204,9 +248,8 @@ where fut3: Option>>, } -impl Future for WithAsyncHandlerFut +impl Future for WithAsyncHandlerFut where - F: Fn(T) -> R, R: Future + 'static, I: Responder + 'static, E: Into + 'static, @@ -257,7 +300,64 @@ where } }; - self.fut2 = Some((*self.hnd)(item)); + self.fut2 = Some(self.hnd.as_ref().call_with(item)); self.poll() } } + + +macro_rules! with_factory_tuple ({$(($n:tt, $T:ident)),+} => { + impl<$($T,)+ State, Func, Res> WithFactory<($($T,)+), State, Res> for Func + where Func: Fn($($T,)+) -> Res + 'static, + $($T: FromRequest + 'static,)+ + Res: Responder + 'static, + State: 'static, + { + fn create(self) -> With<($($T,)+), State, Res> { + With::new(move |($($n,)+)| (self)($($n,)+), ($($T::Config::default(),)+)) + } + + fn create_with_config(self, cfg: ($($T::Config,)+)) -> With<($($T,)+), State, Res> { + With::new(move |($($n,)+)| (self)($($n,)+), cfg) + } + } +}); + +macro_rules! with_async_factory_tuple ({$(($n:tt, $T:ident)),+} => { + impl<$($T,)+ State, Func, Res, Item, Err> WithAsyncFactory<($($T,)+), State, Res, Item, Err> for Func + where Func: Fn($($T,)+) -> Res + 'static, + $($T: FromRequest + 'static,)+ + Res: Future, + Item: Responder + 'static, + Err: Into, + State: 'static, + { + fn create(self) -> WithAsync<($($T,)+), State, Res, Item, Err> { + WithAsync::new(move |($($n,)+)| (self)($($n,)+), ($($T::Config::default(),)+)) + } + + fn create_with_config(self, cfg: ($($T::Config,)+)) -> WithAsync<($($T,)+), State, Res, Item, Err> { + WithAsync::new(move |($($n,)+)| (self)($($n,)+), cfg) + } + } +}); + +with_factory_tuple!((a, A)); +with_factory_tuple!((a, A), (b, B)); +with_factory_tuple!((a, A), (b, B), (c, C)); +with_factory_tuple!((a, A), (b, B), (c, C), (d, D)); +with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E)); +with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F)); +with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G)); +with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H)); +with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H), (i, I)); + +with_async_factory_tuple!((a, A)); +with_async_factory_tuple!((a, A), (b, B)); +with_async_factory_tuple!((a, A), (b, B), (c, C)); +with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D)); +with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E)); +with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F)); +with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G)); +with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H)); +with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H), (i, I)); diff --git a/tests/test_handlers.rs b/tests/test_handlers.rs index c86a3e9c0..4243cd3a8 100644 --- a/tests/test_handlers.rs +++ b/tests/test_handlers.rs @@ -208,7 +208,7 @@ fn test_form_extractor2() { r.route().with_config( |form: Form| format!("{}", form.username), |cfg| { - cfg.error_handler(|err, _| { + cfg.0.error_handler(|err, _| { error::InternalError::from_response( err, HttpResponse::Conflict().finish(), @@ -423,7 +423,7 @@ fn test_path_and_query_extractor2_async3() { let mut srv = test::TestServer::new(|app| { app.resource("/{username}/index.html", |r| { r.route().with( - |(data, p, _q): (Json, Path, Query)| { + |data: Json, p: Path, _: Query| { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", p.username, data.0)) diff --git a/tests/test_server.rs b/tests/test_server.rs index 5c4385680..8739b4f71 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -13,8 +13,8 @@ extern crate tokio_reactor; extern crate tokio_tcp; use std::io::{Read, Write}; -use std::sync::{mpsc, Arc}; -use std::{net, thread, time}; +use std::sync::Arc; +use std::{thread, time}; #[cfg(feature = "brotli")] use brotli2::write::{BrotliDecoder, BrotliEncoder}; @@ -32,7 +32,6 @@ use tokio::executor::current_thread; use tokio::runtime::current_thread::Runtime; use tokio_tcp::TcpStream; -use actix::System; use actix_web::*; const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ @@ -60,11 +59,13 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ #[test] #[cfg(unix)] fn test_start() { + use std::{mpsc, net}; + let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); thread::spawn(|| { - System::run(move || { + actix::System::run(move || { let srv = server::new(|| { vec![App::new().resource("/", |r| { r.method(http::Method::GET).f(|_| HttpResponse::Ok()) From a8405d0686c2e4fd85c173e61d3ac8617a8a2cc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kornel=20Lesin=CC=81ski?= Date: Fri, 17 Aug 2018 13:12:47 +0100 Subject: [PATCH 062/219] Fix tests on Unix --- tests/test_server.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/test_server.rs b/tests/test_server.rs index 8739b4f71..36c1b6e6b 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -59,13 +59,14 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ #[test] #[cfg(unix)] fn test_start() { - use std::{mpsc, net}; + use std::sync::mpsc; + use actix::System; let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); thread::spawn(|| { - actix::System::run(move || { + System::run(move || { let srv = server::new(|| { vec![App::new().resource("/", |r| { r.method(http::Method::GET).f(|_| HttpResponse::Ok()) @@ -118,6 +119,10 @@ fn test_start() { #[test] #[cfg(unix)] fn test_shutdown() { + use std::sync::mpsc; + use std::net; + use actix::System; + let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); @@ -157,6 +162,9 @@ fn test_shutdown() { #[test] #[cfg(unix)] fn test_panic() { + use std::sync::mpsc; + use actix::System; + let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); From bdc9a8bb07afb10df5cef54f4a5f8ab36c2e253a Mon Sep 17 00:00:00 2001 From: Kornel Date: Fri, 17 Aug 2018 17:04:16 +0100 Subject: [PATCH 063/219] Optionally support tokio-uds's UnixStream as IoStream (#472) --- CHANGES.md | 2 ++ Cargo.toml | 6 ++++++ src/client/connector.rs | 4 ++++ src/lib.rs | 4 ++++ src/server/mod.rs | 18 ++++++++++++++++++ tests/test_client.rs | 10 ++++++++++ 6 files changed, 44 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index e73b929aa..9dd908aeb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,8 @@ * Allow to customize connection handshake process via `HttpServer::listen_with()` and `HttpServer::bind_with()` methods +* Support making client connections via `tokio-uds`'s `UnixStream` when "uds" feature is enabled #472 + ### Changed * It is allowed to use function with up to 10 parameters for handler with `extractor parameters`. diff --git a/Cargo.toml b/Cargo.toml index 3bfac16c1..3d72f41c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,9 @@ alpn = ["openssl", "tokio-openssl"] # rustls rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"] +# unix sockets +uds = ["tokio-uds"] + # sessions feature, session require "ring" crate and c compiler session = ["cookie/secure"] @@ -112,6 +115,9 @@ tokio-rustls = { version = "0.7", optional = true } webpki = { version = "0.18", optional = true } webpki-roots = { version = "0.15", optional = true } +# unix sockets +tokio-uds = { version="0.2", optional = true } + # forked url_encoded itoa = "0.4" dtoa = "0.4" diff --git a/src/client/connector.rs b/src/client/connector.rs index ef66cd734..75b2e149f 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -1287,6 +1287,10 @@ impl Connection { } /// Create a new connection from an IO Stream + /// + /// The stream can be a `UnixStream` if the Unix-only "uds" feature is enabled. + /// + /// See also `ClientRequestBuilder::with_connection()`. pub fn from_stream(io: T) -> Connection { Connection::new(Key::empty(), None, Box::new(io)) } diff --git a/src/lib.rs b/src/lib.rs index c6d3453a2..3f1dafc16 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -66,6 +66,8 @@ //! * `tls` - enables ssl support via `native-tls` crate //! * `alpn` - enables ssl support via `openssl` crate, require for `http/2` //! support +//! * `uds` - enables support for making client requests via Unix Domain Sockets. +//! Unix only. Not necessary for *serving* requests. //! * `session` - enables session support, includes `ring` crate as //! dependency //! * `brotli` - enables `brotli` compression support, requires `c` @@ -120,6 +122,8 @@ extern crate tokio_io; extern crate tokio_reactor; extern crate tokio_tcp; extern crate tokio_timer; +#[cfg(all(unix, feature = "uds"))] +extern crate tokio_uds; extern crate url; #[macro_use] extern crate serde; diff --git a/src/server/mod.rs b/src/server/mod.rs index f33a345e5..cccdf8267 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -421,6 +421,24 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { } } +#[cfg(all(unix, feature = "uds"))] +impl IoStream for ::tokio_uds::UnixStream { + #[inline] + fn shutdown(&mut self, how: Shutdown) -> io::Result<()> { + ::tokio_uds::UnixStream::shutdown(self, how) + } + + #[inline] + fn set_nodelay(&mut self, _nodelay: bool) -> io::Result<()> { + Ok(()) + } + + #[inline] + fn set_linger(&mut self, _dur: Option) -> io::Result<()> { + Ok(()) + } +} + impl IoStream for TcpStream { #[inline] fn shutdown(&mut self, how: Shutdown) -> io::Result<()> { diff --git a/tests/test_client.rs b/tests/test_client.rs index 5e6856998..16d95bf29 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -5,6 +5,8 @@ extern crate bytes; extern crate flate2; extern crate futures; extern crate rand; +#[cfg(all(unix, feature = "uds"))] +extern crate tokio_uds; use std::io::Read; @@ -198,6 +200,14 @@ fn test_client_gzip_encoding_large_random() { assert_eq!(bytes, Bytes::from(data)); } + +#[cfg(all(unix, feature = "uds"))] +#[test] +fn test_compatible_with_unix_socket_stream() { + let (stream, _) = tokio_uds::UnixStream::pair().unwrap(); + let _ = client::Connection::from_stream(stream); +} + #[cfg(feature = "brotli")] #[test] fn test_client_brotli_encoding() { From 56bc900a82e955fd58e2039695b4887d30386982 Mon Sep 17 00:00:00 2001 From: Douman Date: Fri, 17 Aug 2018 19:53:16 +0300 Subject: [PATCH 064/219] Set minimum rustls version that fixes corruption (#474) --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3d72f41c2..ff8571ba6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,8 +110,8 @@ openssl = { version="0.10", optional = true } tokio-openssl = { version="0.2", optional = true } #rustls -rustls = { version = "0.13", optional = true } -tokio-rustls = { version = "0.7", optional = true } +rustls = { version = "^0.13.1", optional = true } +tokio-rustls = { version = "^0.7.2", optional = true } webpki = { version = "0.18", optional = true } webpki-roots = { version = "0.15", optional = true } From e680541e10aff1fc6a4d271ab308516e835a73a0 Mon Sep 17 00:00:00 2001 From: Franz Gregor Date: Sat, 18 Aug 2018 19:32:28 +0200 Subject: [PATCH 065/219] Made extensions constructor public --- src/extensions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/extensions.rs b/src/extensions.rs index da7b5ba24..3e3f24a24 100644 --- a/src/extensions.rs +++ b/src/extensions.rs @@ -39,7 +39,7 @@ pub struct Extensions { impl Extensions { /// Create an empty `Extensions`. #[inline] - pub(crate) fn new() -> Extensions { + pub fn new() -> Extensions { Extensions { map: HashMap::default(), } From 986f19af8655b95ebbba3a2b8fe2829eca1c85c5 Mon Sep 17 00:00:00 2001 From: Douman Date: Tue, 21 Aug 2018 22:23:17 +0300 Subject: [PATCH 066/219] Revert back to serde_urlencoded dependecy (#479) --- Cargo.toml | 4 +- src/lib.rs | 2 +- src/serde_urlencoded/de.rs | 305 ------------------- src/serde_urlencoded/mod.rs | 121 -------- src/serde_urlencoded/ser/key.rs | 74 ----- src/serde_urlencoded/ser/mod.rs | 490 ------------------------------ src/serde_urlencoded/ser/pair.rs | 239 --------------- src/serde_urlencoded/ser/part.rs | 201 ------------ src/serde_urlencoded/ser/value.rs | 59 ---- 9 files changed, 2 insertions(+), 1493 deletions(-) delete mode 100644 src/serde_urlencoded/de.rs delete mode 100644 src/serde_urlencoded/mod.rs delete mode 100644 src/serde_urlencoded/ser/key.rs delete mode 100644 src/serde_urlencoded/ser/mod.rs delete mode 100644 src/serde_urlencoded/ser/pair.rs delete mode 100644 src/serde_urlencoded/ser/part.rs delete mode 100644 src/serde_urlencoded/ser/value.rs diff --git a/Cargo.toml b/Cargo.toml index ff8571ba6..6437ec268 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,9 +118,7 @@ webpki-roots = { version = "0.15", optional = true } # unix sockets tokio-uds = { version="0.2", optional = true } -# forked url_encoded -itoa = "0.4" -dtoa = "0.4" +serde_urlencoded = "^0.5.3" [dev-dependencies] env_logger = "0.5" diff --git a/src/lib.rs b/src/lib.rs index 3f1dafc16..72fe26c10 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -127,6 +127,7 @@ extern crate tokio_uds; extern crate url; #[macro_use] extern crate serde; +extern crate serde_urlencoded; #[cfg(feature = "brotli")] extern crate brotli2; extern crate encoding; @@ -184,7 +185,6 @@ mod resource; mod route; mod router; mod scope; -mod serde_urlencoded; mod uri; mod with; diff --git a/src/serde_urlencoded/de.rs b/src/serde_urlencoded/de.rs deleted file mode 100644 index ae14afbf5..000000000 --- a/src/serde_urlencoded/de.rs +++ /dev/null @@ -1,305 +0,0 @@ -//! Deserialization support for the `application/x-www-form-urlencoded` format. - -use serde::de::Error as de_Error; -use serde::de::{ - self, DeserializeSeed, EnumAccess, IntoDeserializer, VariantAccess, Visitor, -}; - -use serde::de::value::MapDeserializer; -use std::borrow::Cow; -use std::io::Read; -use url::form_urlencoded::parse; -use url::form_urlencoded::Parse as UrlEncodedParse; - -#[doc(inline)] -pub use serde::de::value::Error; - -/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`. -/// -/// ```ignore -/// let meal = vec![ -/// ("bread".to_owned(), "baguette".to_owned()), -/// ("cheese".to_owned(), "comté".to_owned()), -/// ("meat".to_owned(), "ham".to_owned()), -/// ("fat".to_owned(), "butter".to_owned()), -/// ]; -/// -/// assert_eq!( -/// serde_urlencoded::from_bytes::>( -/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"), -/// Ok(meal)); -/// ``` -pub fn from_bytes<'de, T>(input: &'de [u8]) -> Result -where - T: de::Deserialize<'de>, -{ - T::deserialize(Deserializer::new(parse(input))) -} - -/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`. -/// -/// ```ignore -/// let meal = vec![ -/// ("bread".to_owned(), "baguette".to_owned()), -/// ("cheese".to_owned(), "comté".to_owned()), -/// ("meat".to_owned(), "ham".to_owned()), -/// ("fat".to_owned(), "butter".to_owned()), -/// ]; -/// -/// assert_eq!( -/// serde_urlencoded::from_str::>( -/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"), -/// Ok(meal)); -/// ``` -pub fn from_str<'de, T>(input: &'de str) -> Result -where - T: de::Deserialize<'de>, -{ - from_bytes(input.as_bytes()) -} - -#[allow(dead_code)] -/// Convenience function that reads all bytes from `reader` and deserializes -/// them with `from_bytes`. -pub fn from_reader(mut reader: R) -> Result -where - T: de::DeserializeOwned, - R: Read, -{ - let mut buf = vec![]; - reader - .read_to_end(&mut buf) - .map_err(|e| de::Error::custom(format_args!("could not read input: {}", e)))?; - from_bytes(&buf) -} - -/// A deserializer for the `application/x-www-form-urlencoded` format. -/// -/// * Supported top-level outputs are structs, maps and sequences of pairs, -/// with or without a given length. -/// -/// * Main `deserialize` methods defers to `deserialize_map`. -/// -/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size` -/// defers to `deserialize`. -pub struct Deserializer<'de> { - inner: MapDeserializer<'de, PartIterator<'de>, Error>, -} - -impl<'de> Deserializer<'de> { - /// Returns a new `Deserializer`. - pub fn new(parser: UrlEncodedParse<'de>) -> Self { - Deserializer { - inner: MapDeserializer::new(PartIterator(parser)), - } - } -} - -impl<'de> de::Deserializer<'de> for Deserializer<'de> { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_map(visitor) - } - - fn deserialize_map(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_map(self.inner) - } - - fn deserialize_seq(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_seq(self.inner) - } - - fn deserialize_unit(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.inner.end()?; - visitor.visit_unit() - } - - forward_to_deserialize_any! { - bool - u8 - u16 - u32 - u64 - i8 - i16 - i32 - i64 - f32 - f64 - char - str - string - option - bytes - byte_buf - unit_struct - newtype_struct - tuple_struct - struct - identifier - tuple - enum - ignored_any - } -} - -struct PartIterator<'de>(UrlEncodedParse<'de>); - -impl<'de> Iterator for PartIterator<'de> { - type Item = (Part<'de>, Part<'de>); - - fn next(&mut self) -> Option { - self.0.next().map(|(k, v)| (Part(k), Part(v))) - } -} - -struct Part<'de>(Cow<'de, str>); - -impl<'de> IntoDeserializer<'de> for Part<'de> { - type Deserializer = Self; - - fn into_deserializer(self) -> Self::Deserializer { - self - } -} - -macro_rules! forward_parsed_value { - ($($ty:ident => $method:ident,)*) => { - $( - fn $method(self, visitor: V) -> Result - where V: de::Visitor<'de> - { - match self.0.parse::<$ty>() { - Ok(val) => val.into_deserializer().$method(visitor), - Err(e) => Err(de::Error::custom(e)) - } - } - )* - } -} - -impl<'de> de::Deserializer<'de> for Part<'de> { - type Error = Error; - - fn deserialize_any(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.0.into_deserializer().deserialize_any(visitor) - } - - fn deserialize_option(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_some(self) - } - - fn deserialize_enum( - self, _name: &'static str, _variants: &'static [&'static str], visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - visitor.visit_enum(ValueEnumAccess { value: self.0 }) - } - - forward_to_deserialize_any! { - char - str - string - unit - bytes - byte_buf - unit_struct - newtype_struct - tuple_struct - struct - identifier - tuple - ignored_any - seq - map - } - - forward_parsed_value! { - bool => deserialize_bool, - u8 => deserialize_u8, - u16 => deserialize_u16, - u32 => deserialize_u32, - u64 => deserialize_u64, - i8 => deserialize_i8, - i16 => deserialize_i16, - i32 => deserialize_i32, - i64 => deserialize_i64, - f32 => deserialize_f32, - f64 => deserialize_f64, - } -} - -/// Provides access to a keyword which can be deserialized into an enum variant. The enum variant -/// must be a unit variant, otherwise deserialization will fail. -struct ValueEnumAccess<'de> { - value: Cow<'de, str>, -} - -impl<'de> EnumAccess<'de> for ValueEnumAccess<'de> { - type Error = Error; - type Variant = UnitOnlyVariantAccess; - - fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> - where - V: DeserializeSeed<'de>, - { - let variant = seed.deserialize(self.value.into_deserializer())?; - Ok((variant, UnitOnlyVariantAccess)) - } -} - -/// A visitor for deserializing the contents of the enum variant. As we only support -/// `unit_variant`, all other variant types will return an error. -struct UnitOnlyVariantAccess; - -impl<'de> VariantAccess<'de> for UnitOnlyVariantAccess { - type Error = Error; - - fn unit_variant(self) -> Result<(), Self::Error> { - Ok(()) - } - - fn newtype_variant_seed(self, _seed: T) -> Result - where - T: DeserializeSeed<'de>, - { - Err(Error::custom("expected unit variant")) - } - - fn tuple_variant(self, _len: usize, _visitor: V) -> Result - where - V: Visitor<'de>, - { - Err(Error::custom("expected unit variant")) - } - - fn struct_variant( - self, _fields: &'static [&'static str], _visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - Err(Error::custom("expected unit variant")) - } -} diff --git a/src/serde_urlencoded/mod.rs b/src/serde_urlencoded/mod.rs deleted file mode 100644 index 7e2cf33ae..000000000 --- a/src/serde_urlencoded/mod.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! `x-www-form-urlencoded` meets Serde - -extern crate dtoa; -extern crate itoa; - -pub mod de; -pub mod ser; - -#[doc(inline)] -pub use self::de::{from_bytes, from_reader, from_str, Deserializer}; -#[doc(inline)] -pub use self::ser::{to_string, Serializer}; - -#[cfg(test)] -mod tests { - #[test] - fn deserialize_bytes() { - let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)]; - - assert_eq!(super::from_bytes(b"first=23&last=42"), Ok(result)); - } - - #[test] - fn deserialize_str() { - let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)]; - - assert_eq!(super::from_str("first=23&last=42"), Ok(result)); - } - - #[test] - fn deserialize_reader() { - let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)]; - - assert_eq!(super::from_reader(b"first=23&last=42" as &[_]), Ok(result)); - } - - #[test] - fn deserialize_option() { - let result = vec![ - ("first".to_owned(), Some(23)), - ("last".to_owned(), Some(42)), - ]; - assert_eq!(super::from_str("first=23&last=42"), Ok(result)); - } - - #[test] - fn deserialize_unit() { - assert_eq!(super::from_str(""), Ok(())); - assert_eq!(super::from_str("&"), Ok(())); - assert_eq!(super::from_str("&&"), Ok(())); - assert!(super::from_str::<()>("first=23").is_err()); - } - - #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] - enum X { - A, - B, - C, - } - - #[test] - fn deserialize_unit_enum() { - let result = vec![ - ("one".to_owned(), X::A), - ("two".to_owned(), X::B), - ("three".to_owned(), X::C), - ]; - - assert_eq!(super::from_str("one=A&two=B&three=C"), Ok(result)); - } - - #[test] - fn serialize_option_map_int() { - let params = &[("first", Some(23)), ("middle", None), ("last", Some(42))]; - - assert_eq!(super::to_string(params), Ok("first=23&last=42".to_owned())); - } - - #[test] - fn serialize_option_map_string() { - let params = &[ - ("first", Some("hello")), - ("middle", None), - ("last", Some("world")), - ]; - - assert_eq!( - super::to_string(params), - Ok("first=hello&last=world".to_owned()) - ); - } - - #[test] - fn serialize_option_map_bool() { - let params = &[("one", Some(true)), ("two", Some(false))]; - - assert_eq!( - super::to_string(params), - Ok("one=true&two=false".to_owned()) - ); - } - - #[test] - fn serialize_map_bool() { - let params = &[("one", true), ("two", false)]; - - assert_eq!( - super::to_string(params), - Ok("one=true&two=false".to_owned()) - ); - } - - #[test] - fn serialize_unit_enum() { - let params = &[("one", X::A), ("two", X::B), ("three", X::C)]; - assert_eq!( - super::to_string(params), - Ok("one=A&two=B&three=C".to_owned()) - ); - } -} diff --git a/src/serde_urlencoded/ser/key.rs b/src/serde_urlencoded/ser/key.rs deleted file mode 100644 index 48497a558..000000000 --- a/src/serde_urlencoded/ser/key.rs +++ /dev/null @@ -1,74 +0,0 @@ -use super::super::ser::part::Sink; -use super::super::ser::Error; -use serde::Serialize; -use std::borrow::Cow; -use std::ops::Deref; - -pub enum Key<'key> { - Static(&'static str), - Dynamic(Cow<'key, str>), -} - -impl<'key> Deref for Key<'key> { - type Target = str; - - fn deref(&self) -> &str { - match *self { - Key::Static(key) => key, - Key::Dynamic(ref key) => key, - } - } -} - -impl<'key> From> for Cow<'static, str> { - fn from(key: Key<'key>) -> Self { - match key { - Key::Static(key) => key.into(), - Key::Dynamic(key) => key.into_owned().into(), - } - } -} - -pub struct KeySink { - end: End, -} - -impl KeySink -where - End: for<'key> FnOnce(Key<'key>) -> Result, -{ - pub fn new(end: End) -> Self { - KeySink { end } - } -} - -impl Sink for KeySink -where - End: for<'key> FnOnce(Key<'key>) -> Result, -{ - type Ok = Ok; - - fn serialize_static_str(self, value: &'static str) -> Result { - (self.end)(Key::Static(value)) - } - - fn serialize_str(self, value: &str) -> Result { - (self.end)(Key::Dynamic(value.into())) - } - - fn serialize_string(self, value: String) -> Result { - (self.end)(Key::Dynamic(value.into())) - } - - fn serialize_none(self) -> Result { - Err(self.unsupported()) - } - - fn serialize_some(self, _value: &T) -> Result { - Err(self.unsupported()) - } - - fn unsupported(self) -> Error { - Error::Custom("unsupported key".into()) - } -} diff --git a/src/serde_urlencoded/ser/mod.rs b/src/serde_urlencoded/ser/mod.rs deleted file mode 100644 index b4022d563..000000000 --- a/src/serde_urlencoded/ser/mod.rs +++ /dev/null @@ -1,490 +0,0 @@ -//! Serialization support for the `application/x-www-form-urlencoded` format. - -mod key; -mod pair; -mod part; -mod value; - -use serde::ser; -use std::borrow::Cow; -use std::error; -use std::fmt; -use std::str; -use url::form_urlencoded::Serializer as UrlEncodedSerializer; -use url::form_urlencoded::Target as UrlEncodedTarget; - -/// Serializes a value into a `application/x-wwww-url-encoded` `String` buffer. -/// -/// ```ignore -/// let meal = &[ -/// ("bread", "baguette"), -/// ("cheese", "comté"), -/// ("meat", "ham"), -/// ("fat", "butter"), -/// ]; -/// -/// assert_eq!( -/// serde_urlencoded::to_string(meal), -/// Ok("bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter".to_owned())); -/// ``` -pub fn to_string(input: T) -> Result { - let mut urlencoder = UrlEncodedSerializer::new("".to_owned()); - input.serialize(Serializer::new(&mut urlencoder))?; - Ok(urlencoder.finish()) -} - -/// A serializer for the `application/x-www-form-urlencoded` format. -/// -/// * Supported top-level inputs are structs, maps and sequences of pairs, -/// with or without a given length. -/// -/// * Supported keys and values are integers, bytes (if convertible to strings), -/// unit structs and unit variants. -/// -/// * Newtype structs defer to their inner values. -pub struct Serializer<'output, Target: 'output + UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer, -} - -impl<'output, Target: 'output + UrlEncodedTarget> Serializer<'output, Target> { - /// Returns a new `Serializer`. - pub fn new(urlencoder: &'output mut UrlEncodedSerializer) -> Self { - Serializer { urlencoder } - } -} - -/// Errors returned during serializing to `application/x-www-form-urlencoded`. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Error { - Custom(Cow<'static, str>), - Utf8(str::Utf8Error), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Custom(ref msg) => msg.fmt(f), - Error::Utf8(ref err) => write!(f, "invalid UTF-8: {}", err), - } - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::Custom(ref msg) => msg, - Error::Utf8(ref err) => error::Error::description(err), - } - } - - /// The lower-level cause of this error, in the case of a `Utf8` error. - fn cause(&self) -> Option<&error::Error> { - match *self { - Error::Custom(_) => None, - Error::Utf8(ref err) => Some(err), - } - } -} - -impl ser::Error for Error { - fn custom(msg: T) -> Self { - Error::Custom(format!("{}", msg).into()) - } -} - -/// Sequence serializer. -pub struct SeqSerializer<'output, Target: 'output + UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer, -} - -/// Tuple serializer. -/// -/// Mostly used for arrays. -pub struct TupleSerializer<'output, Target: 'output + UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer, -} - -/// Tuple struct serializer. -/// -/// Never instantiated, tuple structs are not supported. -pub struct TupleStructSerializer<'output, T: 'output + UrlEncodedTarget> { - inner: ser::Impossible<&'output mut UrlEncodedSerializer, Error>, -} - -/// Tuple variant serializer. -/// -/// Never instantiated, tuple variants are not supported. -pub struct TupleVariantSerializer<'output, T: 'output + UrlEncodedTarget> { - inner: ser::Impossible<&'output mut UrlEncodedSerializer, Error>, -} - -/// Map serializer. -pub struct MapSerializer<'output, Target: 'output + UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer, - key: Option>, -} - -/// Struct serializer. -pub struct StructSerializer<'output, Target: 'output + UrlEncodedTarget> { - urlencoder: &'output mut UrlEncodedSerializer, -} - -/// Struct variant serializer. -/// -/// Never instantiated, struct variants are not supported. -pub struct StructVariantSerializer<'output, T: 'output + UrlEncodedTarget> { - inner: ser::Impossible<&'output mut UrlEncodedSerializer, Error>, -} - -impl<'output, Target> ser::Serializer for Serializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - type SerializeSeq = SeqSerializer<'output, Target>; - type SerializeTuple = TupleSerializer<'output, Target>; - type SerializeTupleStruct = TupleStructSerializer<'output, Target>; - type SerializeTupleVariant = TupleVariantSerializer<'output, Target>; - type SerializeMap = MapSerializer<'output, Target>; - type SerializeStruct = StructSerializer<'output, Target>; - type SerializeStructVariant = StructVariantSerializer<'output, Target>; - - /// Returns an error. - fn serialize_bool(self, _v: bool) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i8(self, _v: i8) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i16(self, _v: i16) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i32(self, _v: i32) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_i64(self, _v: i64) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u8(self, _v: u8) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u16(self, _v: u16) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u32(self, _v: u32) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_u64(self, _v: u64) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_f32(self, _v: f32) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_f64(self, _v: f64) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_char(self, _v: char) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_str(self, _value: &str) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_bytes(self, _value: &[u8]) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_unit(self) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_unit_struct(self, _name: &'static str) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_unit_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - ) -> Result { - Err(Error::top_level()) - } - - /// Serializes the inner value, ignoring the newtype name. - fn serialize_newtype_struct( - self, _name: &'static str, value: &T, - ) -> Result { - value.serialize(self) - } - - /// Returns an error. - fn serialize_newtype_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _value: &T, - ) -> Result { - Err(Error::top_level()) - } - - /// Returns `Ok`. - fn serialize_none(self) -> Result { - Ok(self.urlencoder) - } - - /// Serializes the given value. - fn serialize_some( - self, value: &T, - ) -> Result { - value.serialize(self) - } - - /// Serialize a sequence, given length (if any) is ignored. - fn serialize_seq(self, _len: Option) -> Result { - Ok(SeqSerializer { - urlencoder: self.urlencoder, - }) - } - - /// Returns an error. - fn serialize_tuple(self, _len: usize) -> Result { - Ok(TupleSerializer { - urlencoder: self.urlencoder, - }) - } - - /// Returns an error. - fn serialize_tuple_struct( - self, _name: &'static str, _len: usize, - ) -> Result { - Err(Error::top_level()) - } - - /// Returns an error. - fn serialize_tuple_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::top_level()) - } - - /// Serializes a map, given length is ignored. - fn serialize_map(self, _len: Option) -> Result { - Ok(MapSerializer { - urlencoder: self.urlencoder, - key: None, - }) - } - - /// Serializes a struct, given length is ignored. - fn serialize_struct( - self, _name: &'static str, _len: usize, - ) -> Result { - Ok(StructSerializer { - urlencoder: self.urlencoder, - }) - } - - /// Returns an error. - fn serialize_struct_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::top_level()) - } -} - -impl<'output, Target> ser::SerializeSeq for SeqSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_element( - &mut self, value: &T, - ) -> Result<(), Error> { - value.serialize(pair::PairSerializer::new(self.urlencoder)) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'output, Target> ser::SerializeTuple for TupleSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_element( - &mut self, value: &T, - ) -> Result<(), Error> { - value.serialize(pair::PairSerializer::new(self.urlencoder)) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'output, Target> ser::SerializeTupleStruct - for TupleStructSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_field( - &mut self, value: &T, - ) -> Result<(), Error> { - self.inner.serialize_field(value) - } - - fn end(self) -> Result { - self.inner.end() - } -} - -impl<'output, Target> ser::SerializeTupleVariant - for TupleVariantSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_field( - &mut self, value: &T, - ) -> Result<(), Error> { - self.inner.serialize_field(value) - } - - fn end(self) -> Result { - self.inner.end() - } -} - -impl<'output, Target> ser::SerializeMap for MapSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_entry( - &mut self, key: &K, value: &V, - ) -> Result<(), Error> { - let key_sink = key::KeySink::new(|key| { - let value_sink = value::ValueSink::new(self.urlencoder, &key); - value.serialize(part::PartSerializer::new(value_sink))?; - self.key = None; - Ok(()) - }); - let entry_serializer = part::PartSerializer::new(key_sink); - key.serialize(entry_serializer) - } - - fn serialize_key( - &mut self, key: &T, - ) -> Result<(), Error> { - let key_sink = key::KeySink::new(|key| Ok(key.into())); - let key_serializer = part::PartSerializer::new(key_sink); - self.key = Some(key.serialize(key_serializer)?); - Ok(()) - } - - fn serialize_value( - &mut self, value: &T, - ) -> Result<(), Error> { - { - let key = self.key.as_ref().ok_or_else(Error::no_key)?; - let value_sink = value::ValueSink::new(self.urlencoder, &key); - value.serialize(part::PartSerializer::new(value_sink))?; - } - self.key = None; - Ok(()) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'output, Target> ser::SerializeStruct for StructSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_field( - &mut self, key: &'static str, value: &T, - ) -> Result<(), Error> { - let value_sink = value::ValueSink::new(self.urlencoder, key); - value.serialize(part::PartSerializer::new(value_sink)) - } - - fn end(self) -> Result { - Ok(self.urlencoder) - } -} - -impl<'output, Target> ser::SerializeStructVariant - for StructVariantSerializer<'output, Target> -where - Target: 'output + UrlEncodedTarget, -{ - type Ok = &'output mut UrlEncodedSerializer; - type Error = Error; - - fn serialize_field( - &mut self, key: &'static str, value: &T, - ) -> Result<(), Error> { - self.inner.serialize_field(key, value) - } - - fn end(self) -> Result { - self.inner.end() - } -} - -impl Error { - fn top_level() -> Self { - let msg = "top-level serializer supports only maps and structs"; - Error::Custom(msg.into()) - } - - fn no_key() -> Self { - let msg = "tried to serialize a value before serializing key"; - Error::Custom(msg.into()) - } -} diff --git a/src/serde_urlencoded/ser/pair.rs b/src/serde_urlencoded/ser/pair.rs deleted file mode 100644 index 68db144f9..000000000 --- a/src/serde_urlencoded/ser/pair.rs +++ /dev/null @@ -1,239 +0,0 @@ -use super::super::ser::key::KeySink; -use super::super::ser::part::PartSerializer; -use super::super::ser::value::ValueSink; -use super::super::ser::Error; -use serde::ser; -use std::borrow::Cow; -use std::mem; -use url::form_urlencoded::Serializer as UrlEncodedSerializer; -use url::form_urlencoded::Target as UrlEncodedTarget; - -pub struct PairSerializer<'target, Target: 'target + UrlEncodedTarget> { - urlencoder: &'target mut UrlEncodedSerializer, - state: PairState, -} - -impl<'target, Target> PairSerializer<'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - pub fn new(urlencoder: &'target mut UrlEncodedSerializer) -> Self { - PairSerializer { - urlencoder, - state: PairState::WaitingForKey, - } - } -} - -impl<'target, Target> ser::Serializer for PairSerializer<'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - type Ok = (); - type Error = Error; - type SerializeSeq = ser::Impossible<(), Error>; - type SerializeTuple = Self; - type SerializeTupleStruct = ser::Impossible<(), Error>; - type SerializeTupleVariant = ser::Impossible<(), Error>; - type SerializeMap = ser::Impossible<(), Error>; - type SerializeStruct = ser::Impossible<(), Error>; - type SerializeStructVariant = ser::Impossible<(), Error>; - - fn serialize_bool(self, _v: bool) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i8(self, _v: i8) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i16(self, _v: i16) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i32(self, _v: i32) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_i64(self, _v: i64) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u8(self, _v: u8) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u16(self, _v: u16) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u32(self, _v: u32) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_u64(self, _v: u64) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_f32(self, _v: f32) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_f64(self, _v: f64) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_char(self, _v: char) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_str(self, _value: &str) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_bytes(self, _value: &[u8]) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_unit(self) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_unit_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - ) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_newtype_struct( - self, _name: &'static str, value: &T, - ) -> Result<(), Error> { - value.serialize(self) - } - - fn serialize_newtype_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _value: &T, - ) -> Result<(), Error> { - Err(Error::unsupported_pair()) - } - - fn serialize_none(self) -> Result<(), Error> { - Ok(()) - } - - fn serialize_some(self, value: &T) -> Result<(), Error> { - value.serialize(self) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_tuple(self, len: usize) -> Result { - if len == 2 { - Ok(self) - } else { - Err(Error::unsupported_pair()) - } - } - - fn serialize_tuple_struct( - self, _name: &'static str, _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_tuple_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_struct( - self, _name: &'static str, _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } - - fn serialize_struct_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _len: usize, - ) -> Result { - Err(Error::unsupported_pair()) - } -} - -impl<'target, Target> ser::SerializeTuple for PairSerializer<'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - type Ok = (); - type Error = Error; - - fn serialize_element( - &mut self, value: &T, - ) -> Result<(), Error> { - match mem::replace(&mut self.state, PairState::Done) { - PairState::WaitingForKey => { - let key_sink = KeySink::new(|key| Ok(key.into())); - let key_serializer = PartSerializer::new(key_sink); - self.state = PairState::WaitingForValue { - key: value.serialize(key_serializer)?, - }; - Ok(()) - } - PairState::WaitingForValue { key } => { - let result = { - let value_sink = ValueSink::new(self.urlencoder, &key); - let value_serializer = PartSerializer::new(value_sink); - value.serialize(value_serializer) - }; - if result.is_ok() { - self.state = PairState::Done; - } else { - self.state = PairState::WaitingForValue { key }; - } - result - } - PairState::Done => Err(Error::done()), - } - } - - fn end(self) -> Result<(), Error> { - if let PairState::Done = self.state { - Ok(()) - } else { - Err(Error::not_done()) - } - } -} - -enum PairState { - WaitingForKey, - WaitingForValue { key: Cow<'static, str> }, - Done, -} - -impl Error { - fn done() -> Self { - Error::Custom("this pair has already been serialized".into()) - } - - fn not_done() -> Self { - Error::Custom("this pair has not yet been serialized".into()) - } - - fn unsupported_pair() -> Self { - Error::Custom("unsupported pair".into()) - } -} diff --git a/src/serde_urlencoded/ser/part.rs b/src/serde_urlencoded/ser/part.rs deleted file mode 100644 index 4874dd34b..000000000 --- a/src/serde_urlencoded/ser/part.rs +++ /dev/null @@ -1,201 +0,0 @@ -use serde; - -use super::super::dtoa; -use super::super::itoa; -use super::super::ser::Error; -use std::str; - -pub struct PartSerializer { - sink: S, -} - -impl PartSerializer { - pub fn new(sink: S) -> Self { - PartSerializer { sink } - } -} - -pub trait Sink: Sized { - type Ok; - - fn serialize_static_str(self, value: &'static str) -> Result; - - fn serialize_str(self, value: &str) -> Result; - fn serialize_string(self, value: String) -> Result; - fn serialize_none(self) -> Result; - - fn serialize_some( - self, value: &T, - ) -> Result; - - fn unsupported(self) -> Error; -} - -impl serde::ser::Serializer for PartSerializer { - type Ok = S::Ok; - type Error = Error; - type SerializeSeq = serde::ser::Impossible; - type SerializeTuple = serde::ser::Impossible; - type SerializeTupleStruct = serde::ser::Impossible; - type SerializeTupleVariant = serde::ser::Impossible; - type SerializeMap = serde::ser::Impossible; - type SerializeStruct = serde::ser::Impossible; - type SerializeStructVariant = serde::ser::Impossible; - - fn serialize_bool(self, v: bool) -> Result { - self.sink - .serialize_static_str(if v { "true" } else { "false" }) - } - - fn serialize_i8(self, v: i8) -> Result { - self.serialize_integer(v) - } - - fn serialize_i16(self, v: i16) -> Result { - self.serialize_integer(v) - } - - fn serialize_i32(self, v: i32) -> Result { - self.serialize_integer(v) - } - - fn serialize_i64(self, v: i64) -> Result { - self.serialize_integer(v) - } - - fn serialize_u8(self, v: u8) -> Result { - self.serialize_integer(v) - } - - fn serialize_u16(self, v: u16) -> Result { - self.serialize_integer(v) - } - - fn serialize_u32(self, v: u32) -> Result { - self.serialize_integer(v) - } - - fn serialize_u64(self, v: u64) -> Result { - self.serialize_integer(v) - } - - fn serialize_f32(self, v: f32) -> Result { - self.serialize_floating(v) - } - - fn serialize_f64(self, v: f64) -> Result { - self.serialize_floating(v) - } - - fn serialize_char(self, v: char) -> Result { - self.sink.serialize_string(v.to_string()) - } - - fn serialize_str(self, value: &str) -> Result { - self.sink.serialize_str(value) - } - - fn serialize_bytes(self, value: &[u8]) -> Result { - match str::from_utf8(value) { - Ok(value) => self.sink.serialize_str(value), - Err(err) => Err(Error::Utf8(err)), - } - } - - fn serialize_unit(self) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_unit_struct(self, name: &'static str) -> Result { - self.sink.serialize_static_str(name) - } - - fn serialize_unit_variant( - self, _name: &'static str, _variant_index: u32, variant: &'static str, - ) -> Result { - self.sink.serialize_static_str(variant) - } - - fn serialize_newtype_struct( - self, _name: &'static str, value: &T, - ) -> Result { - value.serialize(self) - } - - fn serialize_newtype_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _value: &T, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_none(self) -> Result { - self.sink.serialize_none() - } - - fn serialize_some( - self, value: &T, - ) -> Result { - self.sink.serialize_some(value) - } - - fn serialize_seq(self, _len: Option) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_tuple(self, _len: usize) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_tuple_struct( - self, _name: &'static str, _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_tuple_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_map(self, _len: Option) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_struct( - self, _name: &'static str, _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } - - fn serialize_struct_variant( - self, _name: &'static str, _variant_index: u32, _variant: &'static str, - _len: usize, - ) -> Result { - Err(self.sink.unsupported()) - } -} - -impl PartSerializer { - fn serialize_integer(self, value: I) -> Result - where - I: itoa::Integer, - { - let mut buf = [b'\0'; 20]; - let len = itoa::write(&mut buf[..], value).unwrap(); - let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) }; - serde::ser::Serializer::serialize_str(self, part) - } - - fn serialize_floating(self, value: F) -> Result - where - F: dtoa::Floating, - { - let mut buf = [b'\0'; 24]; - let len = dtoa::write(&mut buf[..], value).unwrap(); - let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) }; - serde::ser::Serializer::serialize_str(self, part) - } -} diff --git a/src/serde_urlencoded/ser/value.rs b/src/serde_urlencoded/ser/value.rs deleted file mode 100644 index 3c47739f3..000000000 --- a/src/serde_urlencoded/ser/value.rs +++ /dev/null @@ -1,59 +0,0 @@ -use super::super::ser::part::{PartSerializer, Sink}; -use super::super::ser::Error; -use serde::ser::Serialize; -use std::str; -use url::form_urlencoded::Serializer as UrlEncodedSerializer; -use url::form_urlencoded::Target as UrlEncodedTarget; - -pub struct ValueSink<'key, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - urlencoder: &'target mut UrlEncodedSerializer, - key: &'key str, -} - -impl<'key, 'target, Target> ValueSink<'key, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - pub fn new( - urlencoder: &'target mut UrlEncodedSerializer, key: &'key str, - ) -> Self { - ValueSink { urlencoder, key } - } -} - -impl<'key, 'target, Target> Sink for ValueSink<'key, 'target, Target> -where - Target: 'target + UrlEncodedTarget, -{ - type Ok = (); - - fn serialize_str(self, value: &str) -> Result<(), Error> { - self.urlencoder.append_pair(self.key, value); - Ok(()) - } - - fn serialize_static_str(self, value: &'static str) -> Result<(), Error> { - self.serialize_str(value) - } - - fn serialize_string(self, value: String) -> Result<(), Error> { - self.serialize_str(&value) - } - - fn serialize_none(self) -> Result { - Ok(()) - } - - fn serialize_some( - self, value: &T, - ) -> Result { - value.serialize(PartSerializer::new(self)) - } - - fn unsupported(self) -> Error { - Error::Custom("unsupported value".into()) - } -} From cf54be2f1792593434021322fcacedf18c635106 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 09:39:11 -0700 Subject: [PATCH 067/219] hide new server api --- CHANGES.md | 4 +++- MIGRATION.md | 2 +- src/server/http.rs | 42 +++++++----------------------------------- src/server/mod.rs | 4 ++++ src/server/server.rs | 13 ++++++++----- 5 files changed, 23 insertions(+), 42 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9dd908aeb..fcaf25545 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.8.0] - 2018-08-xx +## [0.7.4] - 2018-08-xx ### Added @@ -15,6 +15,8 @@ ### Changed * It is allowed to use function with up to 10 parameters for handler with `extractor parameters`. + `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple + even for handler with one parameter. * native-tls - 0.2 diff --git a/MIGRATION.md b/MIGRATION.md index 910e99a4a..3c0bdd943 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,4 +1,4 @@ -## 0.8 +## 0.7.4 * `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple even for handler with one parameter. diff --git a/src/server/http.rs b/src/server/http.rs index e3740d955..f0cbacdb9 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -175,11 +175,11 @@ where } /// Disable `HTTP/2` support - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use acceptor service with proper ServerFlags parama" - )] + // #[doc(hidden)] + // #[deprecated( + // since = "0.7.4", + // note = "please use acceptor service with proper ServerFlags parama" + // )] pub fn no_http2(mut self) -> Self { self.no_http2 = true; self @@ -217,6 +217,7 @@ where self } + #[doc(hidden)] /// Use listener for accepting incoming connection requests pub fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self where @@ -234,11 +235,6 @@ where } #[cfg(feature = "tls")] - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use `actix_web::HttpServer::listen_with()` and `actix_web::server::NativeTlsAcceptor` instead" - )] /// Use listener for accepting incoming tls connection requests /// /// HttpServer does not change any configuration for TcpListener, @@ -250,11 +246,6 @@ where } #[cfg(feature = "alpn")] - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use `actix_web::HttpServer::listen_with()` and `actix_web::server::OpensslAcceptor` instead" - )] /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" @@ -274,11 +265,6 @@ where } #[cfg(feature = "rust-tls")] - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use `actix_web::HttpServer::listen_with()` and `actix_web::server::RustlsAcceptor` instead" - )] /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" @@ -313,6 +299,7 @@ where } /// Start listening for incoming connections with supplied acceptor. + #[doc(hidden)] #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result where @@ -365,11 +352,6 @@ where } #[cfg(feature = "tls")] - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use `actix_web::HttpServer::bind_with()` and `actix_web::server::NativeTlsAcceptor` instead" - )] /// The ssl socket address to bind /// /// To bind multiple addresses this method can be called multiple times. @@ -382,11 +364,6 @@ where } #[cfg(feature = "alpn")] - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use `actix_web::HttpServer::bind_with()` and `actix_web::server::OpensslAcceptor` instead" - )] /// Start listening for incoming tls connections. /// /// This method sets alpn protocols to "h2" and "http/1.1" @@ -407,11 +384,6 @@ where } #[cfg(feature = "rust-tls")] - #[doc(hidden)] - #[deprecated( - since = "0.7.4", - note = "please use `actix_web::HttpServer::bind_with()` and `actix_web::server::RustlsAcceptor` instead" - )] /// Start listening for incoming tls connections. /// /// This method sets alpn protocols to "h2" and "http/1.1" diff --git a/src/server/mod.rs b/src/server/mod.rs index cccdf8267..901260be3 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -137,11 +137,15 @@ mod worker; use actix::Message; pub use self::message::Request; + +#[doc(hidden)] pub use self::server::{ ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler, }; pub use self::settings::ServerSettings; pub use self::http::HttpServer; + +#[doc(hidden)] pub use self::ssl::*; #[doc(hidden)] diff --git a/src/server/server.rs b/src/server/server.rs index 552ba8ee2..0646c100c 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -13,8 +13,9 @@ use super::accept::{AcceptLoop, AcceptNotify, Command}; use super::worker::{StopWorker, Worker, WorkerClient, Conn}; use super::{PauseServer, ResumeServer, StopServer, Token}; -///Describes service that could be used -///with [Server](struct.Server.html) +#[doc(hidden)] +/// Describes service that could be used +/// with [Server](struct.Server.html) pub trait Service: Send + 'static { /// Clone service fn clone(&self) -> Box; @@ -33,8 +34,9 @@ impl Service for Box { } } -///Describes the way serivce handles incoming -///TCP connections. +#[doc(hidden)] +/// Describes the way serivce handles incoming +/// TCP connections. pub trait ServiceHandler { /// Handle incoming stream fn handle(&mut self, token: Token, io: net::TcpStream, peer: Option); @@ -47,7 +49,8 @@ pub(crate) enum ServerCommand { WorkerDied(usize), } -///Server +/// Generic server +#[doc(hidden)] pub struct Server { threads: usize, workers: Vec<(usize, Addr)>, From e9c139bdea7519625c491407e86cedb2938ab90f Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 09:47:32 -0700 Subject: [PATCH 068/219] clippy warnings --- src/header/common/content_disposition.rs | 70 ++++++++++-------------- 1 file changed, 30 insertions(+), 40 deletions(-) diff --git a/src/header/common/content_disposition.rs b/src/header/common/content_disposition.rs index 686cf9c67..5e8cbd67a 100644 --- a/src/header/common/content_disposition.rs +++ b/src/header/common/content_disposition.rs @@ -14,7 +14,7 @@ use regex::Regex; use std::fmt::{self, Write}; /// Split at the index of the first `needle` if it exists or at the end. -fn split_once<'a>(haystack: &'a str, needle: char) -> (&'a str, &'a str) { +fn split_once(haystack: &str, needle: char) -> (&str, &str) { haystack.find(needle).map_or_else( || (haystack, ""), |sc| { @@ -26,7 +26,7 @@ fn split_once<'a>(haystack: &'a str, needle: char) -> (&'a str, &'a str) { /// Split at the index of the first `needle` if it exists or at the end, trim the right of the /// first part and the left of the last part. -fn split_once_and_trim<'a>(haystack: &'a str, needle: char) -> (&'a str, &'a str) { +fn split_once_and_trim(haystack: &str, needle: char) -> (&str, &str) { let (first, last) = split_once(haystack, needle); (first.trim_right(), last.trim_left()) } @@ -114,20 +114,20 @@ impl DispositionParam { /// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name` #[inline] /// matches. - pub fn is_unknown<'a, T: AsRef>(&self, name: T) -> bool { + pub fn is_unknown>(&self, name: T) -> bool { self.as_unknown(name).is_some() } /// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the /// `name` matches. #[inline] - pub fn is_unknown_ext<'a, T: AsRef>(&self, name: T) -> bool { + pub fn is_unknown_ext>(&self, name: T) -> bool { self.as_unknown_ext(name).is_some() } /// Returns the name if applicable. #[inline] - pub fn as_name<'a>(&'a self) -> Option<&'a str> { + pub fn as_name(&self) -> Option<&str> { match self { DispositionParam::Name(ref name) => Some(name.as_str()), _ => None, @@ -136,18 +136,18 @@ impl DispositionParam { /// Returns the filename if applicable. #[inline] - pub fn as_filename<'a>(&'a self) -> Option<&'a str> { + pub fn as_filename(&self) -> Option<&str> { match self { - &DispositionParam::Filename(ref filename) => Some(filename.as_str()), + DispositionParam::Filename(ref filename) => Some(filename.as_str()), _ => None, } } /// Returns the filename* if applicable. #[inline] - pub fn as_filename_ext<'a>(&'a self) -> Option<&'a ExtendedValue> { + pub fn as_filename_ext(&self) -> Option<&ExtendedValue> { match self { - &DispositionParam::FilenameExt(ref value) => Some(value), + DispositionParam::FilenameExt(ref value) => Some(value), _ => None, } } @@ -155,9 +155,9 @@ impl DispositionParam { /// Returns the value of the unrecognized regular parameter if it is /// [`Unknown`](DispositionParam::Unknown) and the `name` matches. #[inline] - pub fn as_unknown<'a, T: AsRef>(&'a self, name: T) -> Option<&'a str> { + pub fn as_unknown>(&self, name: T) -> Option<&str> { match self { - &DispositionParam::Unknown(ref ext_name, ref value) + DispositionParam::Unknown(ref ext_name, ref value) if ext_name.eq_ignore_ascii_case(name.as_ref()) => { Some(value.as_str()) @@ -169,11 +169,9 @@ impl DispositionParam { /// Returns the value of the unrecognized extended parameter if it is /// [`Unknown`](DispositionParam::Unknown) and the `name` matches. #[inline] - pub fn as_unknown_ext<'a, T: AsRef>( - &'a self, name: T, - ) -> Option<&'a ExtendedValue> { + pub fn as_unknown_ext>(&self, name: T) -> Option<&ExtendedValue> { match self { - &DispositionParam::UnknownExt(ref ext_name, ref value) + DispositionParam::UnknownExt(ref ext_name, ref value) if ext_name.eq_ignore_ascii_case(name.as_ref()) => { Some(value) @@ -276,7 +274,7 @@ impl ContentDisposition { let hv = String::from_utf8(hv.as_bytes().to_vec()) .map_err(|_| ::error::ParseError::Header)?; let (disp_type, mut left) = split_once_and_trim(hv.as_str().trim(), ';'); - if disp_type.len() == 0 { + if disp_type.is_empty() { return Err(::error::ParseError::Header); } let mut cd = ContentDisposition { @@ -284,9 +282,9 @@ impl ContentDisposition { parameters: Vec::new(), }; - while left.len() > 0 { + while !left.is_empty() { let (param_name, new_left) = split_once_and_trim(left, '='); - if param_name.len() == 0 || param_name == "*" || new_left.len() == 0 { + if param_name.is_empty() || param_name == "*" || new_left.is_empty() { return Err(::error::ParseError::Header); } left = new_left; @@ -315,34 +313,28 @@ impl ContentDisposition { if escaping { escaping = false; quoted_string.push(c); - } else { - if c == 0x5c + } else if c == 0x5c { // backslash - { - escaping = true; - } else if c == 0x22 + escaping = true; + } else if c == 0x22 { // double quote - { - end = Some(i + 1); // cuz skipped 1 for the leading quote - break; - } else { - quoted_string.push(c); - } + end = Some(i + 1); // cuz skipped 1 for the leading quote + break; + } else { + quoted_string.push(c); } } left = &left[end.ok_or(::error::ParseError::Header)? + 1..]; left = split_once(left, ';').1.trim_left(); // In fact, it should not be Err if the above code is correct. - let quoted_string = String::from_utf8(quoted_string) - .map_err(|_| ::error::ParseError::Header)?; - quoted_string + String::from_utf8(quoted_string).map_err(|_| ::error::ParseError::Header)? } else { // token: won't contains semicolon according to RFC 2616 Section 2.2 let (token, new_left) = split_once_and_trim(left, ';'); left = new_left; token.to_owned() }; - if value.len() == 0 { + if value.is_empty() { return Err(::error::ParseError::Header); } @@ -397,12 +389,12 @@ impl ContentDisposition { } /// Return the value of *name* if exists. - pub fn get_name<'a>(&'a self) -> Option<&'a str> { + pub fn get_name(&self) -> Option<&str> { self.parameters.iter().filter_map(|p| p.as_name()).nth(0) } /// Return the value of *filename* if exists. - pub fn get_filename<'a>(&'a self) -> Option<&'a str> { + pub fn get_filename(&self) -> Option<&str> { self.parameters .iter() .filter_map(|p| p.as_filename()) @@ -410,7 +402,7 @@ impl ContentDisposition { } /// Return the value of *filename\** if exists. - pub fn get_filename_ext<'a>(&'a self) -> Option<&'a ExtendedValue> { + pub fn get_filename_ext(&self) -> Option<&ExtendedValue> { self.parameters .iter() .filter_map(|p| p.as_filename_ext()) @@ -418,7 +410,7 @@ impl ContentDisposition { } /// Return the value of the parameter which the `name` matches. - pub fn get_unknown<'a, T: AsRef>(&'a self, name: T) -> Option<&'a str> { + pub fn get_unknown>(&self, name: T) -> Option<&str> { let name = name.as_ref(); self.parameters .iter() @@ -427,9 +419,7 @@ impl ContentDisposition { } /// Return the value of the extended parameter which the `name` matches. - pub fn get_unknown_ext<'a, T: AsRef>( - &'a self, name: T, - ) -> Option<&'a ExtendedValue> { + pub fn get_unknown_ext>(&self, name: T) -> Option<&ExtendedValue> { let name = name.as_ref(); self.parameters .iter() From 1716380f0890a1e936d84181effeb63906c1e609 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 09:48:01 -0700 Subject: [PATCH 069/219] clippy fmt --- src/application.rs | 3 +- src/client/connector.rs | 295 +++++++++++++++++----------------- src/client/writer.rs | 7 +- src/extractor.rs | 22 +-- src/fs.rs | 16 +- src/handler.rs | 11 +- src/header/mod.rs | 3 +- src/helpers.rs | 3 +- src/httpmessage.rs | 24 ++- src/httprequest.rs | 3 +- src/httpresponse.rs | 6 +- src/info.rs | 3 +- src/json.rs | 23 +-- src/lib.rs | 8 +- src/middleware/cors.rs | 29 ++-- src/middleware/csrf.rs | 5 +- src/middleware/errhandlers.rs | 2 +- src/middleware/session.rs | 6 +- src/multipart.rs | 8 +- src/param.rs | 2 +- src/payload.rs | 24 +-- src/pipeline.rs | 151 +++++++++++------ src/pred.rs | 3 +- src/scope.rs | 66 +++----- src/server/h1decoder.rs | 6 +- src/server/h1writer.rs | 3 +- src/server/h2.rs | 65 ++++---- src/server/h2writer.rs | 4 +- src/server/http.rs | 19 ++- src/server/mod.rs | 4 +- src/server/output.rs | 7 +- src/server/server.rs | 65 +++++--- src/server/ssl/mod.rs | 2 +- src/server/ssl/nativetls.rs | 41 ++--- src/with.rs | 61 +++++-- src/ws/mod.rs | 39 ++--- tests/test_client.rs | 25 +-- tests/test_handlers.rs | 28 ++-- tests/test_middleware.rs | 69 +++----- tests/test_server.rs | 66 ++++---- tests/test_ws.rs | 6 +- 41 files changed, 616 insertions(+), 617 deletions(-) diff --git a/src/application.rs b/src/application.rs index 4c8946c4e..3ef753f5f 100644 --- a/src/application.rs +++ b/src/application.rs @@ -776,8 +776,7 @@ mod tests { .route("/test", Method::GET, |_: HttpRequest| HttpResponse::Ok()) .route("/test", Method::POST, |_: HttpRequest| { HttpResponse::Created() - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/test").method(Method::GET).request(); let resp = app.run(req); diff --git a/src/client/connector.rs b/src/client/connector.rs index 75b2e149f..61347682a 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -768,168 +768,161 @@ impl ClientConnector { ).map_err(move |_, act, _| { act.release_key(&key2); () - }) - .and_then(move |res, act, _| { - #[cfg(feature = "alpn")] - match res { - Err(err) => { - let _ = waiter.tx.send(Err(err.into())); - fut::Either::B(fut::err(())) - } - Ok(stream) => { - act.stats.opened += 1; - if conn.0.ssl { - fut::Either::A( - act.connector - .connect_async(&key.host, stream) - .into_actor(act) - .then(move |res, _, _| { - match res { - Err(e) => { - let _ = waiter.tx.send(Err( - ClientConnectorError::SslError(e), - )); - } - Ok(stream) => { - let _ = - waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - } + }).and_then(move |res, act, _| { + #[cfg(feature = "alpn")] + match res { + Err(err) => { + let _ = waiter.tx.send(Err(err.into())); + fut::Either::B(fut::err(())) + } + Ok(stream) => { + act.stats.opened += 1; + if conn.0.ssl { + fut::Either::A( + act.connector + .connect_async(&key.host, stream) + .into_actor(act) + .then(move |res, _, _| { + match res { + Err(e) => { + let _ = waiter.tx.send(Err( + ClientConnectorError::SslError(e), + )); } - fut::ok(()) - }), - ) - } else { - let _ = waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - fut::Either::B(fut::ok(())) - } - } - } - - #[cfg(all(feature = "tls", not(feature = "alpn")))] - match res { - Err(err) => { - let _ = waiter.tx.send(Err(err.into())); - fut::Either::B(fut::err(())) - } - Ok(stream) => { - act.stats.opened += 1; - if conn.0.ssl { - fut::Either::A( - act.connector - .connect_async(&conn.0.host, stream) - .into_actor(act) - .then(move |res, _, _| { - match res { - Err(e) => { - let _ = waiter.tx.send(Err( - ClientConnectorError::SslError(e), - )); - } - Ok(stream) => { - let _ = - waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - } + Ok(stream) => { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); } - fut::ok(()) - }), - ) - } else { - let _ = waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - fut::Either::B(fut::ok(())) - } + } + fut::ok(()) + }), + ) + } else { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + fut::Either::B(fut::ok(())) } } + } - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] - match res { - Err(err) => { - let _ = waiter.tx.send(Err(err.into())); - fut::Either::B(fut::err(())) - } - Ok(stream) => { - act.stats.opened += 1; - if conn.0.ssl { - let host = - DNSNameRef::try_from_ascii_str(&key.host).unwrap(); - fut::Either::A( - act.connector - .connect_async(host, stream) - .into_actor(act) - .then(move |res, _, _| { - match res { - Err(e) => { - let _ = waiter.tx.send(Err( - ClientConnectorError::SslError(e), - )); - } - Ok(stream) => { - let _ = - waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - } + #[cfg(all(feature = "tls", not(feature = "alpn")))] + match res { + Err(err) => { + let _ = waiter.tx.send(Err(err.into())); + fut::Either::B(fut::err(())) + } + Ok(stream) => { + act.stats.opened += 1; + if conn.0.ssl { + fut::Either::A( + act.connector + .connect_async(&conn.0.host, stream) + .into_actor(act) + .then(move |res, _, _| { + match res { + Err(e) => { + let _ = waiter.tx.send(Err( + ClientConnectorError::SslError(e), + )); } - fut::ok(()) - }), - ) - } else { - let _ = waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - fut::Either::B(fut::ok(())) - } + Ok(stream) => { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + } + } + fut::ok(()) + }), + ) + } else { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + fut::Either::B(fut::ok(())) } } + } - #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] - match res { - Err(err) => { - let _ = waiter.tx.send(Err(err.into())); - fut::err(()) - } - Ok(stream) => { - act.stats.opened += 1; - if conn.0.ssl { - let _ = waiter - .tx - .send(Err(ClientConnectorError::SslIsNotSupported)); - } else { - let _ = waiter.tx.send(Ok(Connection::new( - conn.0.clone(), - Some(conn), - Box::new(stream), - ))); - }; - fut::ok(()) + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] + match res { + Err(err) => { + let _ = waiter.tx.send(Err(err.into())); + fut::Either::B(fut::err(())) + } + Ok(stream) => { + act.stats.opened += 1; + if conn.0.ssl { + let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap(); + fut::Either::A( + act.connector + .connect_async(host, stream) + .into_actor(act) + .then(move |res, _, _| { + match res { + Err(e) => { + let _ = waiter.tx.send(Err( + ClientConnectorError::SslError(e), + )); + } + Ok(stream) => { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + } + } + fut::ok(()) + }), + ) + } else { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + fut::Either::B(fut::ok(())) } } - }) - .spawn(ctx); + } + + #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] + match res { + Err(err) => { + let _ = waiter.tx.send(Err(err.into())); + fut::err(()) + } + Ok(stream) => { + act.stats.opened += 1; + if conn.0.ssl { + let _ = + waiter.tx.send(Err(ClientConnectorError::SslIsNotSupported)); + } else { + let _ = waiter.tx.send(Ok(Connection::new( + conn.0.clone(), + Some(conn), + Box::new(stream), + ))); + }; + fut::ok(()) + } + } + }).spawn(ctx); } } diff --git a/src/client/writer.rs b/src/client/writer.rs index 81ad96510..45abfb773 100644 --- a/src/client/writer.rs +++ b/src/client/writer.rs @@ -302,10 +302,9 @@ fn content_encoder(buf: BytesMut, req: &mut ClientRequest) -> Output { req.replace_body(body); let enc = match encoding { #[cfg(feature = "flate2")] - ContentEncoding::Deflate => ContentEncoder::Deflate(ZlibEncoder::new( - transfer, - Compression::default(), - )), + ContentEncoding::Deflate => { + ContentEncoder::Deflate(ZlibEncoder::new(transfer, Compression::default())) + } #[cfg(feature = "flate2")] ContentEncoding::Gzip => { ContentEncoder::Gzip(GzEncoder::new(transfer, Compression::default())) diff --git a/src/extractor.rs b/src/extractor.rs index 6d156d47a..7b0b4b003 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -103,7 +103,7 @@ impl Path { impl From for Path { fn from(inner: T) -> Path { - Path{inner} + Path { inner } } } @@ -802,8 +802,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "11") - .set_payload(Bytes::from_static(b"hello=world")) - .finish(); + .set_payload(Bytes::from_static(b"hello=world")) + .finish(); let mut cfg = FormConfig::default(); cfg.limit(4096); @@ -837,8 +837,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "9") - .set_payload(Bytes::from_static(b"hello=world")) - .finish(); + .set_payload(Bytes::from_static(b"hello=world")) + .finish(); match Option::>::from_request(&req, &cfg) .poll() @@ -857,8 +857,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "9") - .set_payload(Bytes::from_static(b"bye=world")) - .finish(); + .set_payload(Bytes::from_static(b"bye=world")) + .finish(); match Option::>::from_request(&req, &cfg) .poll() @@ -875,8 +875,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "11") - .set_payload(Bytes::from_static(b"hello=world")) - .finish(); + .set_payload(Bytes::from_static(b"hello=world")) + .finish(); match Result::, Error>::from_request(&req, &FormConfig::default()) .poll() @@ -895,8 +895,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "9") - .set_payload(Bytes::from_static(b"bye=world")) - .finish(); + .set_payload(Bytes::from_static(b"bye=world")) + .finish(); match Result::, Error>::from_request(&req, &FormConfig::default()) .poll() diff --git a/src/fs.rs b/src/fs.rs index 4c8192126..10cdaff7b 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -369,11 +369,7 @@ impl Responder for NamedFile { .body("This resource only supports GET and HEAD.")); } - let etag = if C::is_use_etag() { - self.etag() - } else { - None - }; + let etag = if C::is_use_etag() { self.etag() } else { None }; let last_modified = if C::is_use_last_modifier() { self.last_modified() } else { @@ -518,7 +514,8 @@ impl Stream for ChunkedReadFile { max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize; let mut buf = Vec::with_capacity(max_bytes); file.seek(io::SeekFrom::Start(offset))?; - let nbytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?; + let nbytes = + file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?; if nbytes == 0 { return Err(io::ErrorKind::UnexpectedEof.into()); } @@ -869,8 +866,7 @@ impl HttpRange { length: length as u64, })) } - }) - .collect::>()?; + }).collect::>()?; let ranges: Vec = all_ranges.into_iter().filter_map(|x| x).collect(); @@ -986,9 +982,7 @@ mod tests { use header::{ContentDisposition, DispositionParam, DispositionType}; let cd = ContentDisposition { disposition: DispositionType::Attachment, - parameters: vec![DispositionParam::Filename( - String::from("test.png") - )], + parameters: vec![DispositionParam::Filename(String::from("test.png"))], }; let mut file = NamedFile::open("tests/test.png") .unwrap() diff --git a/src/handler.rs b/src/handler.rs index 661cd0285..2b6cc6604 100644 --- a/src/handler.rs +++ b/src/handler.rs @@ -354,15 +354,16 @@ impl> From> for AsyncResult { } impl From>, E>> for AsyncResult -where T: 'static, - E: Into + 'static +where + T: 'static, + E: Into + 'static, { #[inline] fn from(res: Result>, E>) -> Self { match res { - Ok(fut) => AsyncResult( - Some(AsyncResultItem::Future( - Box::new(fut.map_err(|e| e.into()))))), + Ok(fut) => AsyncResult(Some(AsyncResultItem::Future(Box::new( + fut.map_err(|e| e.into()), + )))), Err(err) => AsyncResult(Some(AsyncResultItem::Err(err.into()))), } } diff --git a/src/header/mod.rs b/src/header/mod.rs index cdd2ad200..74e4b03e5 100644 --- a/src/header/mod.rs +++ b/src/header/mod.rs @@ -223,8 +223,7 @@ pub fn from_comma_delimited( .filter_map(|x| match x.trim() { "" => None, y => Some(y), - }) - .filter_map(|x| x.trim().parse().ok()), + }).filter_map(|x| x.trim().parse().ok()), ) } Ok(result) diff --git a/src/helpers.rs b/src/helpers.rs index 400b12253..e82d61616 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -279,8 +279,7 @@ mod tests { true, StatusCode::MOVED_PERMANENTLY, )) - }) - .finish(); + }).finish(); // trailing slashes let params = vec![ diff --git a/src/httpmessage.rs b/src/httpmessage.rs index 5db2f075b..60f77b07e 100644 --- a/src/httpmessage.rs +++ b/src/httpmessage.rs @@ -479,8 +479,7 @@ where body.extend_from_slice(&chunk); Ok(body) } - }) - .map(|body| body.freeze()), + }).map(|body| body.freeze()), )); self.poll() } @@ -588,8 +587,7 @@ where body.extend_from_slice(&chunk); Ok(body) } - }) - .and_then(move |body| { + }).and_then(move |body| { if (encoding as *const Encoding) == UTF_8 { serde_urlencoded::from_bytes::(&body) .map_err(|_| UrlencodedError::Parse) @@ -694,8 +692,7 @@ mod tests { .header( header::TRANSFER_ENCODING, Bytes::from_static(b"some va\xadscc\xacas0xsdasdlue"), - ) - .finish(); + ).finish(); assert!(req.chunked().is_err()); } @@ -734,7 +731,7 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "xxxx") - .finish(); + .finish(); assert_eq!( req.urlencoded::().poll().err().unwrap(), UrlencodedError::UnknownLength @@ -744,7 +741,7 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "1000000") - .finish(); + .finish(); assert_eq!( req.urlencoded::().poll().err().unwrap(), UrlencodedError::Overflow @@ -765,8 +762,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded", ).header(header::CONTENT_LENGTH, "11") - .set_payload(Bytes::from_static(b"hello=world")) - .finish(); + .set_payload(Bytes::from_static(b"hello=world")) + .finish(); let result = req.urlencoded::().poll().ok().unwrap(); assert_eq!( @@ -780,8 +777,8 @@ mod tests { header::CONTENT_TYPE, "application/x-www-form-urlencoded; charset=utf-8", ).header(header::CONTENT_LENGTH, "11") - .set_payload(Bytes::from_static(b"hello=world")) - .finish(); + .set_payload(Bytes::from_static(b"hello=world")) + .finish(); let result = req.urlencoded().poll().ok().unwrap(); assert_eq!( @@ -830,8 +827,7 @@ mod tests { b"Lorem Ipsum is simply dummy text of the printing and typesetting\n\ industry. Lorem Ipsum has been the industry's standard dummy\n\ Contrary to popular belief, Lorem Ipsum is not simply random text.", - )) - .finish(); + )).finish(); let mut r = Readlines::new(&req); match r.poll().ok().unwrap() { Async::Ready(Some(s)) => assert_eq!( diff --git a/src/httprequest.rs b/src/httprequest.rs index 128dcbf17..f4de81529 100644 --- a/src/httprequest.rs +++ b/src/httprequest.rs @@ -264,7 +264,8 @@ impl HttpRequest { if self.extensions().get::().is_none() { let mut cookies = Vec::new(); for hdr in self.request().inner.headers.get_all(header::COOKIE) { - let s = str::from_utf8(hdr.as_bytes()).map_err(CookieParseError::from)?; + let s = + str::from_utf8(hdr.as_bytes()).map_err(CookieParseError::from)?; for cookie_str in s.split(';').map(|s| s.trim()) { if !cookie_str.is_empty() { cookies.push(Cookie::parse_encoded(cookie_str)?.into_owned()); diff --git a/src/httpresponse.rs b/src/httpresponse.rs index 7700d3523..f02570188 100644 --- a/src/httpresponse.rs +++ b/src/httpresponse.rs @@ -142,8 +142,7 @@ impl HttpResponse { HeaderValue::from_str(&cookie.to_string()) .map(|c| { h.append(header::SET_COOKIE, c); - }) - .map_err(|e| e.into()) + }).map_err(|e| e.into()) } /// Remove all cookies with the given name from this response. Returns @@ -1079,8 +1078,7 @@ mod tests { .http_only(true) .max_age(Duration::days(1)) .finish(), - ) - .del_cookie(&cookies[0]) + ).del_cookie(&cookies[0]) .finish(); let mut val: Vec<_> = resp diff --git a/src/info.rs b/src/info.rs index b15ba9886..aeffc5ba2 100644 --- a/src/info.rs +++ b/src/info.rs @@ -174,8 +174,7 @@ mod tests { .header( header::FORWARDED, "for=192.0.2.60; proto=https; by=203.0.113.43; host=rust-lang.org", - ) - .request(); + ).request(); let mut info = ConnectionInfo::default(); info.update(&req); diff --git a/src/json.rs b/src/json.rs index 86eefca96..178143f11 100644 --- a/src/json.rs +++ b/src/json.rs @@ -327,8 +327,7 @@ impl Future for JsonBod body.extend_from_slice(&chunk); Ok(body) } - }) - .and_then(|body| Ok(serde_json::from_slice::(&body)?)); + }).and_then(|body| Ok(serde_json::from_slice::(&body)?)); self.fut = Some(Box::new(fut)); self.poll() } @@ -388,8 +387,7 @@ mod tests { .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/text"), - ) - .finish(); + ).finish(); let mut json = req.json::(); assert_eq!(json.poll().err().unwrap(), JsonPayloadError::ContentType); @@ -397,12 +395,10 @@ mod tests { .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), - ) - .header( + ).header( header::CONTENT_LENGTH, header::HeaderValue::from_static("10000"), - ) - .finish(); + ).finish(); let mut json = req.json::().limit(100); assert_eq!(json.poll().err().unwrap(), JsonPayloadError::Overflow); @@ -410,12 +406,10 @@ mod tests { .header( header::CONTENT_TYPE, header::HeaderValue::from_static("application/json"), - ) - .header( + ).header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), - ) - .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) + ).set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) .finish(); let mut json = req.json::(); @@ -442,9 +436,8 @@ mod tests { ).header( header::CONTENT_LENGTH, header::HeaderValue::from_static("16"), - ) - .set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) - .finish(); + ).set_payload(Bytes::from_static(b"{\"name\": \"test\"}")) + .finish(); assert!(handler.handle(&req).as_err().is_none()) } } diff --git a/src/lib.rs b/src/lib.rs index 72fe26c10..4eeb5adac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -127,7 +127,6 @@ extern crate tokio_uds; extern crate url; #[macro_use] extern crate serde; -extern crate serde_urlencoded; #[cfg(feature = "brotli")] extern crate brotli2; extern crate encoding; @@ -135,6 +134,7 @@ extern crate encoding; extern crate flate2; extern crate h2 as http2; extern crate num_cpus; +extern crate serde_urlencoded; #[macro_use] extern crate percent_encoding; extern crate serde_json; @@ -256,12 +256,12 @@ pub mod dev { pub use extractor::{FormConfig, PayloadConfig}; pub use handler::{AsyncResult, Handler}; pub use httpmessage::{MessageBody, Readlines, UrlEncoded}; - pub use pipeline::Pipeline; pub use httpresponse::HttpResponseBuilder; pub use info::ConnectionInfo; pub use json::{JsonBody, JsonConfig}; pub use param::{FromParam, Params}; pub use payload::{Payload, PayloadBuffer}; + pub use pipeline::Pipeline; pub use resource::Resource; pub use route::Route; pub use router::{ResourceDef, ResourceInfo, ResourceType, Router}; @@ -283,7 +283,9 @@ pub mod http { /// Various http headers pub mod header { pub use header::*; - pub use header::{ContentDisposition, DispositionType, DispositionParam, Charset, LanguageTag}; + pub use header::{ + Charset, ContentDisposition, DispositionParam, DispositionType, LanguageTag, + }; } pub use header::ContentEncoding; pub use httpresponse::ConnectionType; diff --git a/src/middleware/cors.rs b/src/middleware/cors.rs index a61727409..e75dc73ee 100644 --- a/src/middleware/cors.rs +++ b/src/middleware/cors.rs @@ -387,12 +387,10 @@ impl Middleware for Cors { header::ACCESS_CONTROL_MAX_AGE, format!("{}", max_age).as_str(), ); - }) - .if_some(headers, |headers, resp| { + }).if_some(headers, |headers, resp| { let _ = resp.header(header::ACCESS_CONTROL_ALLOW_HEADERS, headers); - }) - .if_true(self.inner.origins.is_all(), |resp| { + }).if_true(self.inner.origins.is_all(), |resp| { if self.inner.send_wildcard { resp.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*"); } else { @@ -402,17 +400,14 @@ impl Middleware for Cors { origin.clone(), ); } - }) - .if_true(self.inner.origins.is_some(), |resp| { + }).if_true(self.inner.origins.is_some(), |resp| { resp.header( header::ACCESS_CONTROL_ALLOW_ORIGIN, self.inner.origins_str.as_ref().unwrap().clone(), ); - }) - .if_true(self.inner.supports_credentials, |resp| { + }).if_true(self.inner.supports_credentials, |resp| { resp.header(header::ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); - }) - .header( + }).header( header::ACCESS_CONTROL_ALLOW_METHODS, &self .inner @@ -420,8 +415,7 @@ impl Middleware for Cors { .iter() .fold(String::new(), |s, v| s + "," + v.as_str()) .as_str()[1..], - ) - .finish(), + ).finish(), )) } else { // Only check requests with a origin header. @@ -838,9 +832,10 @@ impl CorsBuilder { if !self.expose_hdrs.is_empty() { cors.expose_hdrs = Some( - self.expose_hdrs.iter() + self.expose_hdrs + .iter() .fold(String::new(), |s, v| format!("{}, {}", s, v.as_str()))[2..] - .to_owned() + .to_owned(), ); } Cors { @@ -977,8 +972,7 @@ mod tests { .header( header::ACCESS_CONTROL_REQUEST_HEADERS, "AUTHORIZATION,ACCEPT", - ) - .method(Method::OPTIONS) + ).method(Method::OPTIONS) .finish(); let resp = cors.start(&req).unwrap().response(); @@ -1102,7 +1096,8 @@ mod tests { ); { - let headers = resp.headers() + let headers = resp + .headers() .get(header::ACCESS_CONTROL_EXPOSE_HEADERS) .unwrap() .to_str() diff --git a/src/middleware/csrf.rs b/src/middleware/csrf.rs index cda1d324c..02cd150d5 100644 --- a/src/middleware/csrf.rs +++ b/src/middleware/csrf.rs @@ -93,8 +93,7 @@ fn origin(headers: &HeaderMap) -> Option, CsrfError>> { .to_str() .map_err(|_| CsrfError::BadOrigin) .map(|o| o.into()) - }) - .or_else(|| { + }).or_else(|| { headers.get(header::REFERER).map(|referer| { Uri::try_from(Bytes::from(referer.as_bytes())) .ok() @@ -251,7 +250,7 @@ mod tests { "Referer", "https://www.example.com/some/path?query=param", ).method(Method::POST) - .finish(); + .finish(); assert!(csrf.start(&req).is_ok()); } diff --git a/src/middleware/errhandlers.rs b/src/middleware/errhandlers.rs index 83c66aae1..c7d19d334 100644 --- a/src/middleware/errhandlers.rs +++ b/src/middleware/errhandlers.rs @@ -131,7 +131,7 @@ mod tests { ErrorHandlers::new() .handler(StatusCode::INTERNAL_SERVER_ERROR, render_500), ).middleware(MiddlewareOne) - .handler(|_| HttpResponse::Ok()) + .handler(|_| HttpResponse::Ok()) }); let request = srv.get().finish().unwrap(); diff --git a/src/middleware/session.rs b/src/middleware/session.rs index cc7aab6b4..7bf5c0e95 100644 --- a/src/middleware/session.rs +++ b/src/middleware/session.rs @@ -579,8 +579,7 @@ mod tests { App::new() .middleware(SessionStorage::new( CookieSessionBackend::signed(&[0; 32]).secure(false), - )) - .resource("/", |r| { + )).resource("/", |r| { r.f(|req| { let _ = req.session().set("counter", 100); "test" @@ -599,8 +598,7 @@ mod tests { App::new() .middleware(SessionStorage::new( CookieSessionBackend::signed(&[0; 32]).secure(false), - )) - .resource("/", |r| { + )).resource("/", |r| { r.with(|ses: Session| { let _ = ses.set("counter", 100); "test" diff --git a/src/multipart.rs b/src/multipart.rs index dbf3d179e..fe809294f 100644 --- a/src/multipart.rs +++ b/src/multipart.rs @@ -756,10 +756,7 @@ mod tests { { use http::header::{DispositionParam, DispositionType}; let cd = field.content_disposition().unwrap(); - assert_eq!( - cd.disposition, - DispositionType::FormData - ); + assert_eq!(cd.disposition, DispositionType::FormData); assert_eq!( cd.parameters[0], DispositionParam::Name("file".into()) @@ -813,7 +810,6 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } } diff --git a/src/param.rs b/src/param.rs index 2704b60d0..063159d72 100644 --- a/src/param.rs +++ b/src/param.rs @@ -236,7 +236,7 @@ macro_rules! FROM_STR { ($type:ty) => { impl FromParam for $type { type Err = InternalError<<$type as FromStr>::Err>; - + fn from_param(val: &str) -> Result { <$type as FromStr>::from_str(val) .map_err(|e| InternalError::new(e, StatusCode::BAD_REQUEST)) diff --git a/src/payload.rs b/src/payload.rs index b20bec652..1d9281f51 100644 --- a/src/payload.rs +++ b/src/payload.rs @@ -513,8 +513,7 @@ where .fold(BytesMut::new(), |mut b, c| { b.extend_from_slice(c); b - }) - .freeze() + }).freeze() } } @@ -553,8 +552,7 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } #[test] @@ -578,8 +576,7 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } #[test] @@ -596,8 +593,7 @@ mod tests { payload.readany().err().unwrap(); let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } #[test] @@ -625,8 +621,7 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } #[test] @@ -659,8 +654,7 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } #[test] @@ -693,8 +687,7 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } #[test] @@ -715,7 +708,6 @@ mod tests { let res: Result<(), ()> = Ok(()); result(res) - })) - .unwrap(); + })).unwrap(); } } diff --git a/src/pipeline.rs b/src/pipeline.rs index 7f206a9fd..1940f9308 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -52,9 +52,7 @@ impl> PipelineState { PipelineState::Finishing(ref mut state) => state.poll(info, mws), PipelineState::Completed(ref mut state) => state.poll(info), PipelineState::Response(ref mut state) => state.poll(info, mws), - PipelineState::None | PipelineState::Error => { - None - } + PipelineState::None | PipelineState::Error => None, } } } @@ -448,10 +446,16 @@ impl ProcessResponse { ) -> Option> { // connection is dead at this point match mem::replace(&mut self.iostate, IOState::Done) { - IOState::Response => - Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())), - IOState::Payload(_) => - Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())), + IOState::Response => Some(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )), + IOState::Payload(_) => Some(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )), IOState::Actor(mut ctx) => { if info.disconnected.take().is_some() { ctx.disconnected(); @@ -467,18 +471,25 @@ impl ProcessResponse { Frame::Chunk(None) => { info.context = Some(ctx); return Some(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), - )) + info, + mws, + self.resp.take().unwrap(), + )); } Frame::Chunk(Some(_)) => (), - Frame::Drain(fut) => {let _ = fut.send(());}, + Frame::Drain(fut) => { + let _ = fut.send(()); + } } } } - Ok(Async::Ready(None)) => + Ok(Async::Ready(None)) => { return Some(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), - )), + info, + mws, + self.resp.take().unwrap(), + )) + } Ok(Async::NotReady) => { self.iostate = IOState::Actor(ctx); return None; @@ -486,12 +497,20 @@ impl ProcessResponse { Err(err) => { info.context = Some(ctx); info.error = Some(err); - return Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())); + return Some(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )); } } } } - IOState::Done => Some(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())) + IOState::Done => Some(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )), } } @@ -505,22 +524,32 @@ impl ProcessResponse { 'inner: loop { let result = match mem::replace(&mut self.iostate, IOState::Done) { IOState::Response => { - let encoding = - self.resp.as_ref().unwrap().content_encoding().unwrap_or(info.encoding); + let encoding = self + .resp + .as_ref() + .unwrap() + .content_encoding() + .unwrap_or(info.encoding); - let result = - match io.start(&info.req, self.resp.as_mut().unwrap(), encoding) { - Ok(res) => res, - Err(err) => { - info.error = Some(err.into()); - return Ok(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), - )); - } - }; + let result = match io.start( + &info.req, + self.resp.as_mut().unwrap(), + encoding, + ) { + Ok(res) => res, + Err(err) => { + info.error = Some(err.into()); + return Ok(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )); + } + }; if let Some(err) = self.resp.as_ref().unwrap().error() { - if self.resp.as_ref().unwrap().status().is_server_error() { + if self.resp.as_ref().unwrap().status().is_server_error() + { error!( "Error occured during request handling, status: {} {}", self.resp.as_ref().unwrap().status(), err @@ -556,7 +585,9 @@ impl ProcessResponse { if let Err(err) = io.write_eof() { info.error = Some(err.into()); return Ok(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), + info, + mws, + self.resp.take().unwrap(), )); } break; @@ -567,7 +598,9 @@ impl ProcessResponse { Err(err) => { info.error = Some(err.into()); return Ok(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), + info, + mws, + self.resp.take().unwrap(), )); } Ok(result) => result, @@ -580,7 +613,9 @@ impl ProcessResponse { Err(err) => { info.error = Some(err); return Ok(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), + info, + mws, + self.resp.take().unwrap(), )); } }, @@ -603,26 +638,30 @@ impl ProcessResponse { info.error = Some(err.into()); return Ok( FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), + info, + mws, + self.resp.take().unwrap(), ), ); } break 'inner; } - Frame::Chunk(Some(chunk)) => { - match io.write(&chunk) { - Err(err) => { - info.context = Some(ctx); - info.error = Some(err.into()); - return Ok( - FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), - ), - ); - } - Ok(result) => res = Some(result), + Frame::Chunk(Some(chunk)) => match io + .write(&chunk) + { + Err(err) => { + info.context = Some(ctx); + info.error = Some(err.into()); + return Ok( + FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + ), + ); } - } + Ok(result) => res = Some(result), + }, Frame::Drain(fut) => self.drain = Some(fut), } } @@ -642,7 +681,9 @@ impl ProcessResponse { info.context = Some(ctx); info.error = Some(err); return Ok(FinishingMiddlewares::init( - info, mws, self.resp.take().unwrap(), + info, + mws, + self.resp.take().unwrap(), )); } } @@ -682,7 +723,11 @@ impl ProcessResponse { info.context = Some(ctx); } info.error = Some(err.into()); - return Ok(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())); + return Ok(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )); } } } @@ -696,11 +741,19 @@ impl ProcessResponse { Ok(_) => (), Err(err) => { info.error = Some(err.into()); - return Ok(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())); + return Ok(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )); } } self.resp.as_mut().unwrap().set_response_size(io.written()); - Ok(FinishingMiddlewares::init(info, mws, self.resp.take().unwrap())) + Ok(FinishingMiddlewares::init( + info, + mws, + self.resp.take().unwrap(), + )) } _ => Err(PipelineState::Response(self)), } diff --git a/src/pred.rs b/src/pred.rs index 22f12ac2a..99d6e608b 100644 --- a/src/pred.rs +++ b/src/pred.rs @@ -264,8 +264,7 @@ mod tests { .header( header::HOST, header::HeaderValue::from_static("www.rust-lang.org"), - ) - .finish(); + ).finish(); let pred = Host("www.rust-lang.org"); assert!(pred.check(&req, req.state())); diff --git a/src/scope.rs b/src/scope.rs index baf891c36..8298f534a 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -715,8 +715,7 @@ mod tests { let app = App::new() .scope("/app", |scope| { scope.resource("/path1", |r| r.f(|_| HttpResponse::Ok())) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/path1").request(); let resp = app.run(req); @@ -730,8 +729,7 @@ mod tests { scope .resource("", |r| r.f(|_| HttpResponse::Ok())) .resource("/", |r| r.f(|_| HttpResponse::Created())) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app").request(); let resp = app.run(req); @@ -747,8 +745,7 @@ mod tests { let app = App::new() .scope("/app/", |scope| { scope.resource("", |r| r.f(|_| HttpResponse::Ok())) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app").request(); let resp = app.run(req); @@ -764,8 +761,7 @@ mod tests { let app = App::new() .scope("/app/", |scope| { scope.resource("/", |r| r.f(|_| HttpResponse::Ok())) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app").request(); let resp = app.run(req); @@ -783,12 +779,12 @@ mod tests { scope .route("/path1", Method::GET, |_: HttpRequest<_>| { HttpResponse::Ok() - }) - .route("/path1", Method::DELETE, |_: HttpRequest<_>| { - HttpResponse::Ok() - }) - }) - .finish(); + }).route( + "/path1", + Method::DELETE, + |_: HttpRequest<_>| HttpResponse::Ok(), + ) + }).finish(); let req = TestRequest::with_uri("/app/path1").request(); let resp = app.run(req); @@ -814,8 +810,7 @@ mod tests { scope .filter(pred::Get()) .resource("/path1", |r| r.f(|_| HttpResponse::Ok())) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/path1") .method(Method::POST) @@ -840,8 +835,7 @@ mod tests { .body(format!("project: {}", &r.match_info()["project"])) }) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/ab-project1/path1").request(); let resp = app.run(req); @@ -869,8 +863,7 @@ mod tests { scope.with_state("/t1", State, |scope| { scope.resource("/path1", |r| r.f(|_| HttpResponse::Created())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1/path1").request(); let resp = app.run(req); @@ -888,8 +881,7 @@ mod tests { .resource("", |r| r.f(|_| HttpResponse::Ok())) .resource("/", |r| r.f(|_| HttpResponse::Created())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1").request(); let resp = app.run(req); @@ -909,8 +901,7 @@ mod tests { scope.with_state("/t1/", State, |scope| { scope.resource("", |r| r.f(|_| HttpResponse::Ok())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1").request(); let resp = app.run(req); @@ -930,8 +921,7 @@ mod tests { scope.with_state("/t1/", State, |scope| { scope.resource("/", |r| r.f(|_| HttpResponse::Ok())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1").request(); let resp = app.run(req); @@ -953,8 +943,7 @@ mod tests { .filter(pred::Get()) .resource("/path1", |r| r.f(|_| HttpResponse::Ok())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1/path1") .method(Method::POST) @@ -976,8 +965,7 @@ mod tests { scope.nested("/t1", |scope| { scope.resource("/path1", |r| r.f(|_| HttpResponse::Created())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1/path1").request(); let resp = app.run(req); @@ -993,8 +981,7 @@ mod tests { .resource("", |r| r.f(|_| HttpResponse::Ok())) .resource("/", |r| r.f(|_| HttpResponse::Created())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1").request(); let resp = app.run(req); @@ -1014,8 +1001,7 @@ mod tests { .filter(pred::Get()) .resource("/path1", |r| r.f(|_| HttpResponse::Ok())) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/t1/path1") .method(Method::POST) @@ -1044,8 +1030,7 @@ mod tests { }) }) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/project_1/path1").request(); let resp = app.run(req); @@ -1077,8 +1062,7 @@ mod tests { }) }) }) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/test/1/path1").request(); let resp = app.run(req); @@ -1104,8 +1088,7 @@ mod tests { scope .resource("/path1", |r| r.f(|_| HttpResponse::Ok())) .default_resource(|r| r.f(|_| HttpResponse::BadRequest())) - }) - .finish(); + }).finish(); let req = TestRequest::with_uri("/app/path2").request(); let resp = app.run(req); @@ -1121,8 +1104,7 @@ mod tests { let app = App::new() .scope("/app1", |scope| { scope.default_resource(|r| r.f(|_| HttpResponse::BadRequest())) - }) - .scope("/app2", |scope| scope) + }).scope("/app2", |scope| scope) .default_resource(|r| r.f(|_| HttpResponse::MethodNotAllowed())) .finish(); diff --git a/src/server/h1decoder.rs b/src/server/h1decoder.rs index d1948a0d1..084ae8b2f 100644 --- a/src/server/h1decoder.rs +++ b/src/server/h1decoder.rs @@ -166,9 +166,9 @@ impl H1Decoder { { true } else { - version == Version::HTTP_11 - && !(conn.contains("close") - || conn.contains("upgrade")) + version == Version::HTTP_11 && !(conn + .contains("close") + || conn.contains("upgrade")) } } else { false diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 8c948471f..8981f9df9 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -152,8 +152,7 @@ impl Writer for H1Writer { let reason = msg.reason().as_bytes(); if let Body::Binary(ref bytes) = body { buffer.reserve( - 256 - + msg.headers().len() * AVERAGE_HEADER_SIZE + 256 + msg.headers().len() * AVERAGE_HEADER_SIZE + bytes.len() + reason.len(), ); diff --git a/src/server/h2.rs b/src/server/h2.rs index 0835f5920..986888ff8 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -115,46 +115,51 @@ where if disconnected { item.flags.insert(EntryFlags::EOF); } else { - let retry = item.payload.need_read() == PayloadStatus::Read; - loop { - match item.task.poll_io(&mut item.stream) { - Ok(Async::Ready(ready)) => { - if ready { + let retry = item.payload.need_read() == PayloadStatus::Read; + loop { + match item.task.poll_io(&mut item.stream) { + Ok(Async::Ready(ready)) => { + if ready { + item.flags.insert( + EntryFlags::EOF | EntryFlags::FINISHED, + ); + } else { + item.flags.insert(EntryFlags::EOF); + } + not_ready = false; + } + Ok(Async::NotReady) => { + if item.payload.need_read() + == PayloadStatus::Read + && !retry + { + continue; + } + } + Err(err) => { + error!("Unhandled error: {}", err); item.flags.insert( - EntryFlags::EOF | EntryFlags::FINISHED, + EntryFlags::EOF + | EntryFlags::ERROR + | EntryFlags::WRITE_DONE, ); - } else { - item.flags.insert(EntryFlags::EOF); - } - not_ready = false; - } - Ok(Async::NotReady) => { - if item.payload.need_read() == PayloadStatus::Read - && !retry - { - continue; + item.stream.reset(Reason::INTERNAL_ERROR); } } - Err(err) => { - error!("Unhandled error: {}", err); - item.flags.insert( - EntryFlags::EOF - | EntryFlags::ERROR - | EntryFlags::WRITE_DONE, - ); - item.stream.reset(Reason::INTERNAL_ERROR); - } + break; } - break; - } } } - - if item.flags.contains(EntryFlags::EOF) && !item.flags.contains(EntryFlags::FINISHED) { + + if item.flags.contains(EntryFlags::EOF) + && !item.flags.contains(EntryFlags::FINISHED) + { match item.task.poll_completed() { Ok(Async::NotReady) => (), Ok(Async::Ready(_)) => { - item.flags.insert(EntryFlags::FINISHED | EntryFlags::WRITE_DONE); + item.flags.insert( + EntryFlags::FINISHED | EntryFlags::WRITE_DONE, + ); } Err(err) => { item.flags.insert( diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index ce61b3ed7..398e9817a 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -250,9 +250,7 @@ impl Writer for H2Writer { return Ok(Async::Ready(())); } } - Err(e) => { - return Err(io::Error::new(io::ErrorKind::Other, e)) - } + Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), } } } diff --git a/src/server/http.rs b/src/server/http.rs index f0cbacdb9..05f0b2442 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -403,19 +403,24 @@ where } } -impl Into<(Box, Vec<(Token, net::TcpListener)>)> for HttpServer { +impl Into<(Box, Vec<(Token, net::TcpListener)>)> + for HttpServer +{ fn into(mut self) -> (Box, Vec<(Token, net::TcpListener)>) { let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new()) .into_iter() .map(|item| (item.token, item.lst)) .collect(); - (Box::new(HttpService { - factory: self.factory, - host: self.host, - keep_alive: self.keep_alive, - handlers: self.handlers, - }), sockets) + ( + Box::new(HttpService { + factory: self.factory, + host: self.host, + keep_alive: self.keep_alive, + handlers: self.handlers, + }), + sockets, + ) } } diff --git a/src/server/mod.rs b/src/server/mod.rs index 901260be3..2ac933a76 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -125,12 +125,12 @@ mod h1writer; mod h2; mod h2writer; pub(crate) mod helpers; +mod http; pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; mod server; pub(crate) mod settings; -mod http; mod ssl; mod worker; @@ -138,12 +138,12 @@ use actix::Message; pub use self::message::Request; +pub use self::http::HttpServer; #[doc(hidden)] pub use self::server::{ ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler, }; pub use self::settings::ServerSettings; -pub use self::http::HttpServer; #[doc(hidden)] pub use self::ssl::*; diff --git a/src/server/output.rs b/src/server/output.rs index 970e03d8d..74b083388 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -273,10 +273,9 @@ impl Output { let enc = match encoding { #[cfg(feature = "flate2")] - ContentEncoding::Deflate => ContentEncoder::Deflate(ZlibEncoder::new( - transfer, - Compression::fast(), - )), + ContentEncoding::Deflate => { + ContentEncoder::Deflate(ZlibEncoder::new(transfer, Compression::fast())) + } #[cfg(feature = "flate2")] ContentEncoding::Gzip => { ContentEncoder::Gzip(GzEncoder::new(transfer, Compression::fast())) diff --git a/src/server/server.rs b/src/server/server.rs index 0646c100c..7bab70f03 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -1,16 +1,21 @@ -use std::{mem, net}; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use std::time::Duration; -use std::sync::{Arc, atomic::{AtomicUsize, Ordering}}; +use std::{mem, net}; -use num_cpus; -use futures::{Future, Stream, Sink}; use futures::sync::{mpsc, mpsc::unbounded}; +use futures::{Future, Sink, Stream}; +use num_cpus; -use actix::{fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, - Context, Handler, Response, System, StreamHandler, WrapFuture}; +use actix::{ + fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler, + Response, StreamHandler, System, WrapFuture, +}; use super::accept::{AcceptLoop, AcceptNotify, Command}; -use super::worker::{StopWorker, Worker, WorkerClient, Conn}; +use super::worker::{Conn, StopWorker, Worker, WorkerClient}; use super::{PauseServer, ResumeServer, StopServer, Token}; #[doc(hidden)] @@ -39,7 +44,9 @@ impl Service for Box { /// TCP connections. pub trait ServiceHandler { /// Handle incoming stream - fn handle(&mut self, token: Token, io: net::TcpStream, peer: Option); + fn handle( + &mut self, token: Token, io: net::TcpStream, peer: Option, + ); /// Shutdown open handlers fn shutdown(&self, _: bool) {} @@ -156,7 +163,7 @@ impl Server { /// Add new service to server pub fn service(mut self, srv: T) -> Self where - T: Into<(Box, Vec<(Token, net::TcpListener)>)> + T: Into<(Box, Vec<(Token, net::TcpListener)>)>, { let (srv, sockets) = srv.into(); self.services.push(srv); @@ -213,8 +220,9 @@ impl Server { info!("Starting server on http://{:?}", s.1.local_addr().ok()); } } - let rx = self.accept.start( - mem::replace(&mut self.sockets, Vec::new()), workers); + let rx = self + .accept + .start(mem::replace(&mut self.sockets, Vec::new()), workers); // start http server actor let signals = self.subscribe_to_signals(); @@ -242,7 +250,9 @@ impl Server { } } - fn start_worker(&self, idx: usize, notify: AcceptNotify) -> (Addr, WorkerClient) { + fn start_worker( + &self, idx: usize, notify: AcceptNotify, + ) -> (Addr, WorkerClient) { let (tx, rx) = unbounded::>(); let conns = Connections::new(notify, self.maxconn, self.maxconnrate); let worker = WorkerClient::new(idx, tx, conns.clone()); @@ -250,7 +260,10 @@ impl Server { let addr = Arbiter::start(move |ctx: &mut Context<_>| { ctx.add_message_stream(rx); - let handlers: Vec<_> = services.into_iter().map(|s| s.create(conns.clone())).collect(); + let handlers: Vec<_> = services + .into_iter() + .map(|s| s.create(conns.clone())) + .collect(); Worker::new(conns, handlers) }); @@ -258,8 +271,7 @@ impl Server { } } -impl Actor for Server -{ +impl Actor for Server { type Context = Context; } @@ -391,7 +403,8 @@ impl StreamHandler for Server { break; } - let (addr, worker) = self.start_worker(new_idx, self.accept.get_notify()); + let (addr, worker) = + self.start_worker(new_idx, self.accept.get_notify()); self.workers.push((new_idx, addr)); self.accept.send(Command::Worker(worker)); } @@ -413,14 +426,15 @@ impl Connections { 0 }; - Connections ( - Arc::new(ConnectionsInner { - notify, - maxconn, maxconnrate, - maxconn_low, maxconnrate_low, - conn: AtomicUsize::new(0), - connrate: AtomicUsize::new(0), - })) + Connections(Arc::new(ConnectionsInner { + notify, + maxconn, + maxconnrate, + maxconn_low, + maxconnrate_low, + conn: AtomicUsize::new(0), + connrate: AtomicUsize::new(0), + })) } pub(crate) fn available(&self) -> bool { @@ -473,7 +487,6 @@ impl ConnectionsInner { self.notify.notify(); } } - } /// Type responsible for max connection stat. @@ -498,7 +511,7 @@ impl Drop for ConnectionTag { /// Type responsible for max connection rate stat. /// /// Max connections rate stat get updated on drop. -pub struct ConnectionRateTag (Arc); +pub struct ConnectionRateTag(Arc); impl ConnectionRateTag { fn new(inner: Arc) -> Self { diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs index b29a7d4a6..bd931fb82 100644 --- a/src/server/ssl/mod.rs +++ b/src/server/ssl/mod.rs @@ -6,7 +6,7 @@ pub use self::openssl::OpensslAcceptor; #[cfg(feature = "tls")] mod nativetls; #[cfg(feature = "tls")] -pub use self::nativetls::{TlsStream, NativeTlsAcceptor}; +pub use self::nativetls::{NativeTlsAcceptor, TlsStream}; #[cfg(feature = "rust-tls")] mod rustls; diff --git a/src/server/ssl/nativetls.rs b/src/server/ssl/nativetls.rs index c3f2c38d4..e35f12d2d 100644 --- a/src/server/ssl/nativetls.rs +++ b/src/server/ssl/nativetls.rs @@ -2,7 +2,7 @@ use std::net::Shutdown; use std::{io, time}; use futures::{Async, Future, Poll}; -use native_tls::{self, TlsAcceptor, HandshakeError}; +use native_tls::{self, HandshakeError, TlsAcceptor}; use tokio_io::{AsyncRead, AsyncWrite}; use server::{AcceptorService, IoStream}; @@ -29,14 +29,16 @@ pub struct TlsStream { /// Future returned from `NativeTlsAcceptor::accept` which will resolve /// once the accept handshake has finished. -pub struct Accept{ +pub struct Accept { inner: Option, HandshakeError>>, } impl NativeTlsAcceptor { /// Create `NativeTlsAcceptor` instance pub fn new(acceptor: TlsAcceptor) -> Self { - NativeTlsAcceptor { acceptor: acceptor.into() } + NativeTlsAcceptor { + acceptor: acceptor.into(), + } } } @@ -49,7 +51,9 @@ impl AcceptorService for NativeTlsAcceptor { } fn accept(&self, io: Io) -> Self::Future { - Accept { inner: Some(self.acceptor.accept(io)) } + Accept { + inner: Some(self.acceptor.accept(io)), + } } } @@ -78,18 +82,19 @@ impl Future for Accept { fn poll(&mut self) -> Poll { match self.inner.take().expect("cannot poll MidHandshake twice") { Ok(stream) => Ok(TlsStream { inner: stream }.into()), - Err(HandshakeError::Failure(e)) => Err(io::Error::new(io::ErrorKind::Other, e)), - Err(HandshakeError::WouldBlock(s)) => { - match s.handshake() { - Ok(stream) => Ok(TlsStream { inner: stream }.into()), - Err(HandshakeError::Failure(e)) => - Err(io::Error::new(io::ErrorKind::Other, e)), - Err(HandshakeError::WouldBlock(s)) => { - self.inner = Some(Err(HandshakeError::WouldBlock(s))); - Ok(Async::NotReady) - } - } + Err(HandshakeError::Failure(e)) => { + Err(io::Error::new(io::ErrorKind::Other, e)) } + Err(HandshakeError::WouldBlock(s)) => match s.handshake() { + Ok(stream) => Ok(TlsStream { inner: stream }.into()), + Err(HandshakeError::Failure(e)) => { + Err(io::Error::new(io::ErrorKind::Other, e)) + } + Err(HandshakeError::WouldBlock(s)) => { + self.inner = Some(Err(HandshakeError::WouldBlock(s))); + Ok(Async::NotReady) + } + }, } } } @@ -124,9 +129,7 @@ impl io::Write for TlsStream { } } - -impl AsyncRead for TlsStream { -} +impl AsyncRead for TlsStream {} impl AsyncWrite for TlsStream { fn shutdown(&mut self) -> Poll<(), io::Error> { @@ -137,4 +140,4 @@ impl AsyncWrite for TlsStream { } self.inner.get_mut().shutdown() } -} \ No newline at end of file +} diff --git a/src/with.rs b/src/with.rs index caffe0acb..5e2c01414 100644 --- a/src/with.rs +++ b/src/with.rs @@ -20,8 +20,9 @@ impl R + 'static> FnWith for F { #[doc(hidden)] pub trait WithFactory: 'static -where T: FromRequest, - R: Responder, +where + T: FromRequest, + R: Responder, { fn create(self) -> With; @@ -30,10 +31,11 @@ where T: FromRequest, #[doc(hidden)] pub trait WithAsyncFactory: 'static -where T: FromRequest, - R: Future, - I: Responder, - E: Into, +where + T: FromRequest, + R: Future, + I: Responder, + E: Into, { fn create(self) -> WithAsync; @@ -305,7 +307,6 @@ where } } - macro_rules! with_factory_tuple ({$(($n:tt, $T:ident)),+} => { impl<$($T,)+ State, Func, Res> WithFactory<($($T,)+), State, Res> for Func where Func: Fn($($T,)+) -> Res + 'static, @@ -349,8 +350,27 @@ with_factory_tuple!((a, A), (b, B), (c, C), (d, D)); with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E)); with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F)); with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G)); -with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H)); -with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H), (i, I)); +with_factory_tuple!( + (a, A), + (b, B), + (c, C), + (d, D), + (e, E), + (f, F), + (g, G), + (h, H) +); +with_factory_tuple!( + (a, A), + (b, B), + (c, C), + (d, D), + (e, E), + (f, F), + (g, G), + (h, H), + (i, I) +); with_async_factory_tuple!((a, A)); with_async_factory_tuple!((a, A), (b, B)); @@ -359,5 +379,24 @@ with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D)); with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E)); with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F)); with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G)); -with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H)); -with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G), (h, H), (i, I)); +with_async_factory_tuple!( + (a, A), + (b, B), + (c, C), + (d, D), + (e, E), + (f, F), + (g, G), + (h, H) +); +with_async_factory_tuple!( + (a, A), + (b, B), + (c, C), + (d, D), + (e, E), + (f, F), + (g, G), + (h, H), + (i, I) +); diff --git a/src/ws/mod.rs b/src/ws/mod.rs index 6b37bc7e0..c16f8d6d2 100644 --- a/src/ws/mod.rs +++ b/src/ws/mod.rs @@ -387,8 +387,7 @@ mod tests { .header( header::UPGRADE, header::HeaderValue::from_static("websocket"), - ) - .finish(); + ).finish(); assert_eq!( HandshakeError::NoConnectionUpgrade, handshake(&req).err().unwrap() @@ -398,12 +397,10 @@ mod tests { .header( header::UPGRADE, header::HeaderValue::from_static("websocket"), - ) - .header( + ).header( header::CONNECTION, header::HeaderValue::from_static("upgrade"), - ) - .finish(); + ).finish(); assert_eq!( HandshakeError::NoVersionHeader, handshake(&req).err().unwrap() @@ -413,16 +410,13 @@ mod tests { .header( header::UPGRADE, header::HeaderValue::from_static("websocket"), - ) - .header( + ).header( header::CONNECTION, header::HeaderValue::from_static("upgrade"), - ) - .header( + ).header( header::SEC_WEBSOCKET_VERSION, header::HeaderValue::from_static("5"), - ) - .finish(); + ).finish(); assert_eq!( HandshakeError::UnsupportedVersion, handshake(&req).err().unwrap() @@ -432,16 +426,13 @@ mod tests { .header( header::UPGRADE, header::HeaderValue::from_static("websocket"), - ) - .header( + ).header( header::CONNECTION, header::HeaderValue::from_static("upgrade"), - ) - .header( + ).header( header::SEC_WEBSOCKET_VERSION, header::HeaderValue::from_static("13"), - ) - .finish(); + ).finish(); assert_eq!( HandshakeError::BadWebsocketKey, handshake(&req).err().unwrap() @@ -451,20 +442,16 @@ mod tests { .header( header::UPGRADE, header::HeaderValue::from_static("websocket"), - ) - .header( + ).header( header::CONNECTION, header::HeaderValue::from_static("upgrade"), - ) - .header( + ).header( header::SEC_WEBSOCKET_VERSION, header::HeaderValue::from_static("13"), - ) - .header( + ).header( header::SEC_WEBSOCKET_KEY, header::HeaderValue::from_static("13"), - ) - .finish(); + ).finish(); assert_eq!( StatusCode::SWITCHING_PROTOCOLS, handshake(&req).unwrap().finish().status() diff --git a/tests/test_client.rs b/tests/test_client.rs index 16d95bf29..d7341ce1f 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -118,8 +118,7 @@ fn test_client_gzip_encoding() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Deflate) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -148,8 +147,7 @@ fn test_client_gzip_encoding_large() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Deflate) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -181,8 +179,7 @@ fn test_client_gzip_encoding_large_random() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Deflate) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -200,7 +197,6 @@ fn test_client_gzip_encoding_large_random() { assert_eq!(bytes, Bytes::from(data)); } - #[cfg(all(unix, feature = "uds"))] #[test] fn test_compatible_with_unix_socket_stream() { @@ -218,8 +214,7 @@ fn test_client_brotli_encoding() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Gzip) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -252,8 +247,7 @@ fn test_client_brotli_encoding_large_random() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Gzip) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -282,8 +276,7 @@ fn test_client_deflate_encoding() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Br) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -316,8 +309,7 @@ fn test_client_deflate_encoding_large_random() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Br) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -346,8 +338,7 @@ fn test_client_streaming_explicit() { .chunked() .content_encoding(http::ContentEncoding::Identity) .body(body)) - }) - .responder() + }).responder() }) }); diff --git a/tests/test_handlers.rs b/tests/test_handlers.rs index 4243cd3a8..3ea709c92 100644 --- a/tests/test_handlers.rs +++ b/tests/test_handlers.rs @@ -191,8 +191,7 @@ fn test_form_extractor() { .uri(srv.url("/test1/index.html")) .form(FormData { username: "test".to_string(), - }) - .unwrap(); + }).unwrap(); let response = srv.execute(request.send()).unwrap(); assert!(response.status().is_success()); @@ -306,8 +305,7 @@ fn test_path_and_query_extractor2_async() { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", p.username, data.0)) - }) - .responder() + }).responder() }, ) }); @@ -336,8 +334,7 @@ fn test_path_and_query_extractor3_async() { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", p.username, data.0)) - }) - .responder() + }).responder() }) }); }); @@ -361,8 +358,7 @@ fn test_path_and_query_extractor4_async() { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", p.username, data.0)) - }) - .responder() + }).responder() }) }); }); @@ -387,8 +383,7 @@ fn test_path_and_query_extractor2_async2() { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", p.username, data.0)) - }) - .responder() + }).responder() }, ) }); @@ -422,15 +417,13 @@ fn test_path_and_query_extractor2_async2() { fn test_path_and_query_extractor2_async3() { let mut srv = test::TestServer::new(|app| { app.resource("/{username}/index.html", |r| { - r.route().with( - |data: Json, p: Path, _: Query| { + r.route() + .with(|data: Json, p: Path, _: Query| { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", p.username, data.0)) - }) - .responder() - }, - ) + }).responder() + }) }); }); @@ -467,8 +460,7 @@ fn test_path_and_query_extractor2_async4() { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(move |_| { Ok(format!("Welcome {} - {}!", data.1.username, (data.0).0)) - }) - .responder() + }).responder() }) }); }); diff --git a/tests/test_middleware.rs b/tests/test_middleware.rs index 4fa1c81da..6cb6ee363 100644 --- a/tests/test_middleware.rs +++ b/tests/test_middleware.rs @@ -84,11 +84,10 @@ fn test_middleware_multiple() { response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), }).middleware(MiddlewareTest { - start: Arc::clone(&act_num1), - response: Arc::clone(&act_num2), - finish: Arc::clone(&act_num3), - }) - .handler(|_| HttpResponse::Ok()) + start: Arc::clone(&act_num1), + response: Arc::clone(&act_num2), + finish: Arc::clone(&act_num3), + }).handler(|_| HttpResponse::Ok()) }); let request = srv.get().finish().unwrap(); @@ -143,11 +142,10 @@ fn test_resource_middleware_multiple() { response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), }).middleware(MiddlewareTest { - start: Arc::clone(&act_num1), - response: Arc::clone(&act_num2), - finish: Arc::clone(&act_num3), - }) - .handler(|_| HttpResponse::Ok()) + start: Arc::clone(&act_num1), + response: Arc::clone(&act_num2), + finish: Arc::clone(&act_num3), + }).handler(|_| HttpResponse::Ok()) }); let request = srv.get().finish().unwrap(); @@ -176,8 +174,7 @@ fn test_scope_middleware() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }) }); @@ -207,13 +204,11 @@ fn test_scope_middleware_multiple() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .middleware(MiddlewareTest { + }).middleware(MiddlewareTest { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }) }); @@ -242,8 +237,7 @@ fn test_middleware_async_handler() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/", |r| { + }).resource("/", |r| { r.route().a(|_| { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(|_| Ok(HttpResponse::Ok())) @@ -312,8 +306,7 @@ fn test_scope_middleware_async_handler() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| { + }).resource("/test", |r| { r.route().a(|_| { Delay::new(Instant::now() + Duration::from_millis(10)) .and_then(|_| Ok(HttpResponse::Ok())) @@ -379,8 +372,7 @@ fn test_scope_middleware_async_error() { start: Arc::clone(&act_req), response: Arc::clone(&act_resp), finish: Arc::clone(&act_fin), - }) - .resource("/test", |r| r.f(index_test_middleware_async_error)) + }).resource("/test", |r| r.f(index_test_middleware_async_error)) }) }); @@ -514,13 +506,11 @@ fn test_async_middleware_multiple() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .middleware(MiddlewareAsyncTest { + }).middleware(MiddlewareAsyncTest { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }); let request = srv.get().uri(srv.url("/test")).finish().unwrap(); @@ -550,13 +540,11 @@ fn test_async_sync_middleware_multiple() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .middleware(MiddlewareTest { + }).middleware(MiddlewareTest { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }); let request = srv.get().uri(srv.url("/test")).finish().unwrap(); @@ -587,8 +575,7 @@ fn test_async_scope_middleware() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }) }); @@ -620,13 +607,11 @@ fn test_async_scope_middleware_multiple() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .middleware(MiddlewareAsyncTest { + }).middleware(MiddlewareAsyncTest { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }) }); @@ -658,13 +643,11 @@ fn test_async_async_scope_middleware_multiple() { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .middleware(MiddlewareTest { + }).middleware(MiddlewareTest { start: Arc::clone(&act_num1), response: Arc::clone(&act_num2), finish: Arc::clone(&act_num3), - }) - .resource("/test", |r| r.f(|_| HttpResponse::Ok())) + }).resource("/test", |r| r.f(|_| HttpResponse::Ok())) }) }); @@ -1012,8 +995,7 @@ fn test_session_storage_middleware() { App::new() .middleware(SessionStorage::new( CookieSessionBackend::signed(&[0; 32]).secure(false), - )) - .resource("/index", move |r| { + )).resource("/index", move |r| { r.f(|req| { let res = req.session().set(COMPLEX_NAME, COMPLEX_PAYLOAD); assert!(res.is_ok()); @@ -1033,8 +1015,7 @@ fn test_session_storage_middleware() { HttpResponse::Ok() }) - }) - .resource("/expect_cookie", move |r| { + }).resource("/expect_cookie", move |r| { r.f(|req| { let _cookies = req.cookies().expect("To get cookies"); diff --git a/tests/test_server.rs b/tests/test_server.rs index 36c1b6e6b..c573c4e12 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -59,8 +59,8 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ #[test] #[cfg(unix)] fn test_start() { - use std::sync::mpsc; use actix::System; + use std::sync::mpsc; let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); @@ -119,9 +119,9 @@ fn test_start() { #[test] #[cfg(unix)] fn test_shutdown() { - use std::sync::mpsc; - use std::net; use actix::System; + use std::net; + use std::sync::mpsc; let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); @@ -162,8 +162,8 @@ fn test_shutdown() { #[test] #[cfg(unix)] fn test_panic() { - use std::sync::mpsc; use actix::System; + use std::sync::mpsc; let _ = test::TestServer::unused_addr(); let (tx, rx) = mpsc::channel(); @@ -176,8 +176,7 @@ fn test_panic() { r.method(http::Method::GET).f(|_| -> &'static str { panic!("error"); }); - }) - .resource("/", |r| { + }).resource("/", |r| { r.method(http::Method::GET).f(|_| HttpResponse::Ok()) }) }).workers(1); @@ -628,8 +627,7 @@ fn test_gzip_encoding() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -661,8 +659,7 @@ fn test_gzip_encoding_large() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -698,8 +695,7 @@ fn test_reading_gzip_encoding_large_random() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -731,8 +727,7 @@ fn test_reading_deflate_encoding() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -764,8 +759,7 @@ fn test_reading_deflate_encoding_large() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -801,8 +795,7 @@ fn test_reading_deflate_encoding_large_random() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -835,8 +828,7 @@ fn test_brotli_encoding() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -869,8 +861,7 @@ fn test_brotli_encoding_large() { Ok(HttpResponse::Ok() .content_encoding(http::ContentEncoding::Identity) .body(bytes)) - }) - .responder() + }).responder() }) }); @@ -946,14 +937,23 @@ fn test_server_cookies() { use actix_web::http; let mut srv = test::TestServer::with_factory(|| { - App::new().resource("/", |r| r.f(|_| HttpResponse::Ok().cookie(http::CookieBuilder::new("first", "first_value").http_only(true).finish()) - .cookie(http::Cookie::new("second", "first_value")) - .cookie(http::Cookie::new("second", "second_value")) - .finish()) - ) + App::new().resource("/", |r| { + r.f(|_| { + HttpResponse::Ok() + .cookie( + http::CookieBuilder::new("first", "first_value") + .http_only(true) + .finish(), + ).cookie(http::Cookie::new("second", "first_value")) + .cookie(http::Cookie::new("second", "second_value")) + .finish() + }) + }) }); - let first_cookie = http::CookieBuilder::new("first", "first_value").http_only(true).finish(); + let first_cookie = http::CookieBuilder::new("first", "first_value") + .http_only(true) + .finish(); let second_cookie = http::Cookie::new("second", "second_value"); let request = srv.get().finish().unwrap(); @@ -972,10 +972,12 @@ fn test_server_cookies() { let first_cookie = first_cookie.to_string(); let second_cookie = second_cookie.to_string(); //Check that we have exactly two instances of raw cookie headers - let cookies = response.headers().get_all(http::header::SET_COOKIE) - .iter() - .map(|header| header.to_str().expect("To str").to_string()) - .collect::>(); + let cookies = response + .headers() + .get_all(http::header::SET_COOKIE) + .iter() + .map(|header| header.to_str().expect("To str").to_string()) + .collect::>(); assert_eq!(cookies.len(), 2); if cookies[0] == first_cookie { assert_eq!(cookies[1], second_cookie); diff --git a/tests/test_ws.rs b/tests/test_ws.rs index aa57faf66..49118fc7f 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -71,7 +71,7 @@ fn start_ws_resource(req: &HttpRequest) -> Result { #[test] fn test_simple_path() { - const PATH:&str = "/v1/ws/"; + const PATH: &str = "/v1/ws/"; // Create a websocket at a specific path. let mut srv = test::TestServer::new(|app| { @@ -103,7 +103,6 @@ fn test_simple_path() { ); } - #[test] fn test_empty_close_code() { let mut srv = test::TestServer::new(|app| app.handler(|req| ws::start(req, Ws))); @@ -214,8 +213,7 @@ impl Ws2 { act.send(ctx); } actix::fut::ok(()) - }) - .wait(ctx); + }).wait(ctx); } } From 810995ade026935cf0de10356138aded3d8db7a6 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 10:10:13 -0700 Subject: [PATCH 070/219] fix tokio-tls dependency #480 --- Cargo.toml | 3 ++- src/client/connector.rs | 30 +++++++++++++++++++++++++----- src/lib.rs | 2 ++ 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6437ec268..bc182b16e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ path = "src/lib.rs" default = ["session", "brotli", "flate2-c"] # tls -tls = ["native-tls"] +tls = ["native-tls", "tokio-tls"] # openssl alpn = ["openssl", "tokio-openssl"] @@ -104,6 +104,7 @@ tokio-reactor = "0.1" # native-tls native-tls = { version="0.2", optional = true } +tokio-tls = { version="0.2", optional = true } # openssl openssl = { version="0.10", optional = true } diff --git a/src/client/connector.rs b/src/client/connector.rs index 61347682a..c0dbf85f3 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -22,9 +22,9 @@ use openssl::ssl::{Error as OpensslError, SslConnector, SslMethod}; use tokio_openssl::SslConnectorExt; #[cfg(all(feature = "tls", not(feature = "alpn")))] -use native_tls::{Error as TlsError, TlsConnector, TlsStream}; +use native_tls::{Error as TlsError, TlsConnector as NativeTlsConnector}; #[cfg(all(feature = "tls", not(feature = "alpn")))] -use tokio_tls::TlsConnectorExt; +use tokio_tls::{TlsConnector, TlsStream}; #[cfg( all( @@ -301,14 +301,14 @@ impl Default for ClientConnector { #[cfg(all(feature = "tls", not(feature = "alpn")))] { let (tx, rx) = mpsc::unbounded(); - let builder = TlsConnector::builder().unwrap(); + let builder = NativeTlsConnector::builder(); ClientConnector { stats: ClientConnectorStats::default(), subscriber: None, acq_tx: tx, acq_rx: Some(rx), resolver: None, - connector: builder.build().unwrap(), + connector: builder.build().unwrap().into(), conn_lifetime: Duration::from_secs(75), conn_keep_alive: Duration::from_secs(15), limit: 100, @@ -822,7 +822,7 @@ impl ClientConnector { if conn.0.ssl { fut::Either::A( act.connector - .connect_async(&conn.0.host, stream) + .connect(&conn.0.host, stream) .into_actor(act) .then(move |res, _, _| { match res { @@ -1342,3 +1342,23 @@ impl AsyncWrite for Connection { self.stream.shutdown() } } + +#[cfg(feature = "tls")] +/// This is temp solution untile actix-net migration +impl IoStream for TlsStream { + #[inline] + fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> { + let _ = self.get_mut().shutdown(); + Ok(()) + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { + self.get_mut().get_mut().set_nodelay(nodelay) + } + + #[inline] + fn set_linger(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().get_mut().set_linger(dur) + } +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 4eeb5adac..f57ab937e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -148,6 +148,8 @@ extern crate serde_derive; #[cfg(feature = "tls")] extern crate native_tls; +#[cfg(feature = "tls")] +extern crate tokio_tls; #[cfg(feature = "openssl")] extern crate openssl; From 8dfc34e7851a205cb457f067b3232dab03a1abfe Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 10:27:32 -0700 Subject: [PATCH 071/219] fix tokio-tls IoStream impl --- src/client/connector.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index c0dbf85f3..1217b5bcf 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -24,7 +24,7 @@ use tokio_openssl::SslConnectorExt; #[cfg(all(feature = "tls", not(feature = "alpn")))] use native_tls::{Error as TlsError, TlsConnector as NativeTlsConnector}; #[cfg(all(feature = "tls", not(feature = "alpn")))] -use tokio_tls::{TlsConnector, TlsStream}; +use tokio_tls::{TlsConnector}; #[cfg( all( @@ -1343,6 +1343,9 @@ impl AsyncWrite for Connection { } } +#[cfg(feature = "tls")] +use tokio_tls::{TlsStream}; + #[cfg(feature = "tls")] /// This is temp solution untile actix-net migration impl IoStream for TlsStream { From 3dafe6c251187d9813e592e3f7b5d4ed4af37620 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 11:30:07 -0700 Subject: [PATCH 072/219] hide token and server flags --- src/server/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/server/mod.rs b/src/server/mod.rs index 2ac933a76..0d10521a0 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -193,6 +193,7 @@ where HttpServer::new(factory) } +#[doc(hidden)] bitflags! { ///Flags that can be used to configure HTTP Server. pub struct ServerFlags: u8 { @@ -256,6 +257,7 @@ impl Message for StopServer { } /// Socket id token +#[doc(hidden)] #[derive(Clone, Copy)] pub struct Token(usize); From 9ef7a9c182cd38d791ce2ba32463827fa78a6f4a Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 11:30:49 -0700 Subject: [PATCH 073/219] hide AcceptorService --- src/server/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server/mod.rs b/src/server/mod.rs index 0d10521a0..36d85a787 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -336,6 +336,7 @@ impl IntoAsyncIo for net::TcpStream { } } +#[doc(hidden)] /// Trait implemented by types that could accept incomming socket connections. pub trait AcceptorService: Clone { /// Established connection type From 48ef18ffa9436260e6c5285d21c9982622d877d8 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 23 Aug 2018 12:54:59 -0700 Subject: [PATCH 074/219] update changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index fcaf25545..eaf7b42b8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.4] - 2018-08-xx +## [0.7.4] - 2018-08-23 ### Added From 471a3e98064fd511466077984953368f08efd772 Mon Sep 17 00:00:00 2001 From: 0x1793d1 <2362128+0x1793d1@users.noreply.github.com> Date: Fri, 24 Aug 2018 23:21:32 +0200 Subject: [PATCH 075/219] Fix server startup log message --- src/server/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/server.rs b/src/server/server.rs index 7bab70f03..122571fd1 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -217,7 +217,7 @@ impl Server { // start accept thread for sock in &self.sockets { for s in sock.iter() { - info!("Starting server on http://{:?}", s.1.local_addr().ok()); + info!("Starting server on http://{}", s.1.local_addr().unwrap()); } } let rx = self From c3ae9997fc126988e7aec80453886b824b9d7b36 Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Sun, 26 Aug 2018 22:21:05 +0800 Subject: [PATCH 076/219] Fix bug with http1 client disconnects. --- src/client/parser.rs | 12 ++++----- src/server/channel.rs | 26 +++++++++++++----- src/server/h1.rs | 63 ++++++++++++++++++++++--------------------- src/server/mod.rs | 6 ++--- 4 files changed, 61 insertions(+), 46 deletions(-) diff --git a/src/client/parser.rs b/src/client/parser.rs index f5390cc34..dd4e60bc5 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -41,10 +41,10 @@ impl HttpResponseParser { // if buf is empty parse_message will always return NotReady, let's avoid that if buf.is_empty() { match io.read_available(buf) { - Ok(Async::Ready(true)) => { + Ok(Async::Ready((_, true))) => { return Err(HttpResponseParserError::Disconnect) } - Ok(Async::Ready(false)) => (), + Ok(Async::Ready((_, false))) => (), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => return Err(HttpResponseParserError::Error(err.into())), } @@ -63,10 +63,10 @@ impl HttpResponseParser { return Err(HttpResponseParserError::Error(ParseError::TooLarge)); } match io.read_available(buf) { - Ok(Async::Ready(true)) => { + Ok(Async::Ready((_, true))) => { return Err(HttpResponseParserError::Disconnect) } - Ok(Async::Ready(false)) => (), + Ok(Async::Ready((_, false))) => (), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => { return Err(HttpResponseParserError::Error(err.into())) @@ -87,8 +87,8 @@ impl HttpResponseParser { loop { // read payload let (not_ready, stream_finished) = match io.read_available(buf) { - Ok(Async::Ready(true)) => (false, true), - Ok(Async::Ready(false)) => (false, false), + Ok(Async::Ready((_, true))) => (false, true), + Ok(Async::Ready((_, false))) => (false, false), Ok(Async::NotReady) => (true, false), Err(err) => return Err(err.into()), }; diff --git a/src/server/channel.rs b/src/server/channel.rs index 7de561c6b..84f301513 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -94,6 +94,7 @@ where }; } + let mut is_eof = false; let kind = match self.proto { Some(HttpProtocol::H1(ref mut h1)) => { let result = h1.poll(); @@ -120,16 +121,27 @@ where return result; } Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => { + let mut disconnect = false; match io.read_available(buf) { - Ok(Async::Ready(true)) | Err(_) => { - debug!("Ignored premature client disconnection"); - if let Some(n) = self.node.as_mut() { - n.remove() - }; - return Err(()); + Ok(Async::Ready((read_some, stream_closed))) => { + is_eof = stream_closed; + // Only disconnect if no data was read. + if is_eof && !read_some { + disconnect = true; + } + } + Err(_) => { + disconnect = true; } _ => (), } + if disconnect { + debug!("Ignored premature client disconnection"); + if let Some(n) = self.node.as_mut() { + n.remove() + }; + return Err(()); + } if buf.len() >= 14 { if buf[..14] == HTTP2_PREFACE[..] { @@ -149,7 +161,7 @@ where match kind { ProtocolKind::Http1 => { self.proto = - Some(HttpProtocol::H1(h1::Http1::new(settings, io, addr, buf))); + Some(HttpProtocol::H1(h1::Http1::new(settings, io, addr, buf, is_eof))); return self.poll(); } ProtocolKind::Http2 => { diff --git a/src/server/h1.rs b/src/server/h1.rs index 808dc11a1..f9cfb622d 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -90,10 +90,10 @@ where { pub fn new( settings: Rc>, stream: T, addr: Option, - buf: BytesMut, + buf: BytesMut, is_eof: bool, ) -> Self { Http1 { - flags: Flags::KEEPALIVE, + flags: Flags::KEEPALIVE | if is_eof { Flags::DISCONNECTED } else { Flags::empty() }, stream: H1Writer::new(stream, Rc::clone(&settings)), decoder: H1Decoder::new(), payload: None, @@ -132,6 +132,21 @@ where } } + fn client_disconnect(&mut self) { + // notify all tasks + self.notify_disconnect(); + // kill keepalive + self.keepalive_timer.take(); + + // on parse error, stop reading stream but tasks need to be + // completed + self.flags.insert(Flags::ERROR); + + if let Some(mut payload) = self.payload.take() { + payload.set_error(PayloadError::Incomplete); + } + } + #[inline] pub fn poll(&mut self) -> Poll<(), ()> { // keep-alive timer @@ -188,38 +203,21 @@ where && self.can_read() { match self.stream.get_mut().read_available(&mut self.buf) { - Ok(Async::Ready(disconnected)) => { - if disconnected { - // notify all tasks - self.notify_disconnect(); - // kill keepalive - self.keepalive_timer.take(); - - // on parse error, stop reading stream but tasks need to be - // completed - self.flags.insert(Flags::ERROR); - - if let Some(mut payload) = self.payload.take() { - payload.set_error(PayloadError::Incomplete); - } - } else { + Ok(Async::Ready((read_some, disconnected))) => { + if read_some { self.parse(); } + if disconnected { + // delay disconnect until all tasks have finished. + self.flags.insert(Flags::DISCONNECTED); + if self.tasks.is_empty() { + self.client_disconnect(); + } + } } Ok(Async::NotReady) => (), Err(_) => { - // notify all tasks - self.notify_disconnect(); - // kill keepalive - self.keepalive_timer.take(); - - // on parse error, stop reading stream but tasks need to be - // completed - self.flags.insert(Flags::ERROR); - - if let Some(mut payload) = self.payload.take() { - payload.set_error(PayloadError::Incomplete); - } + self.client_disconnect(); } } } @@ -331,8 +329,13 @@ where } } - // deal with keep-alive + // deal with keep-alive and steam eof (client-side write shutdown) if self.tasks.is_empty() { + // handle stream eof + if self.flags.contains(Flags::DISCONNECTED) { + self.client_disconnect(); + return Ok(Async::Ready(false)); + } // no keep-alive if self.flags.contains(Flags::ERROR) || (!self.flags.contains(Flags::KEEPALIVE) diff --git a/src/server/mod.rs b/src/server/mod.rs index 36d85a787..009e06ccd 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -390,7 +390,7 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { fn set_linger(&mut self, dur: Option) -> io::Result<()>; - fn read_available(&mut self, buf: &mut BytesMut) -> Poll { + fn read_available(&mut self, buf: &mut BytesMut) -> Poll<(bool, bool), io::Error> { let mut read_some = false; loop { if buf.remaining_mut() < LW_BUFFER_SIZE { @@ -400,7 +400,7 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { match self.read(buf.bytes_mut()) { Ok(n) => { if n == 0 { - return Ok(Async::Ready(!read_some)); + return Ok(Async::Ready((read_some, true))); } else { read_some = true; buf.advance_mut(n); @@ -409,7 +409,7 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { Err(e) => { return if e.kind() == io::ErrorKind::WouldBlock { if read_some { - Ok(Async::Ready(false)) + Ok(Async::Ready((read_some, false))) } else { Ok(Async::NotReady) } From 8393d09a0fea7a303362bbae676386998d960f52 Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Mon, 27 Aug 2018 00:31:31 +0800 Subject: [PATCH 077/219] Fix tests. --- src/server/h1.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index f9cfb622d..ae5dd4655 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -611,7 +611,7 @@ mod tests { let readbuf = BytesMut::new(); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); h1.poll_io(); h1.poll_io(); assert_eq!(h1.tasks.len(), 1); @@ -623,7 +623,7 @@ mod tests { let readbuf = BytesMut::new(); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); h1.poll_io(); h1.poll_io(); assert!(h1.flags.contains(Flags::ERROR)); From 4bab50c8611683e9e51c6f49130838e934155fff Mon Sep 17 00:00:00 2001 From: Markus Unterwaditzer Date: Wed, 29 Aug 2018 20:53:31 +0200 Subject: [PATCH 078/219] Add ability to pass a custom TlsConnector (#491) --- src/client/connector.rs | 217 +++++++++++++--------------------------- 1 file changed, 68 insertions(+), 149 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 1217b5bcf..430a0f752 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -17,14 +17,16 @@ use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; #[cfg(feature = "alpn")] -use openssl::ssl::{Error as OpensslError, SslConnector, SslMethod}; -#[cfg(feature = "alpn")] -use tokio_openssl::SslConnectorExt; +use { + openssl::ssl::{Error as SslError, SslConnector, SslMethod}, + tokio_openssl::SslConnectorExt +}; #[cfg(all(feature = "tls", not(feature = "alpn")))] -use native_tls::{Error as TlsError, TlsConnector as NativeTlsConnector}; -#[cfg(all(feature = "tls", not(feature = "alpn")))] -use tokio_tls::{TlsConnector}; +use { + native_tls::{Error as SslError, TlsConnector as NativeTlsConnector}, + tokio_tls::TlsConnector as SslConnector +}; #[cfg( all( @@ -32,42 +34,25 @@ use tokio_tls::{TlsConnector}; not(any(feature = "alpn", feature = "tls")) ) )] -use rustls::ClientConfig; +use { + rustls::ClientConfig, + std::io::Error as SslError, + std::sync::Arc, + tokio_rustls::ClientConfigExt, + webpki::DNSNameRef, + webpki_roots, +}; + #[cfg( all( feature = "rust-tls", not(any(feature = "alpn", feature = "tls")) ) )] -use std::io::Error as TLSError; -#[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) -)] -use std::sync::Arc; -#[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) -)] -use tokio_rustls::ClientConfigExt; -#[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) -)] -use webpki::DNSNameRef; -#[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) -)] -use webpki_roots; +type SslConnector = Arc; + +#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] +type SslConnector = (); use server::IoStream; use {HAS_OPENSSL, HAS_RUSTLS, HAS_TLS}; @@ -173,24 +158,9 @@ pub enum ClientConnectorError { SslIsNotSupported, /// SSL error - #[cfg(feature = "alpn")] + #[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))] #[fail(display = "{}", _0)] - SslError(#[cause] OpensslError), - - /// SSL error - #[cfg(all(feature = "tls", not(feature = "alpn")))] - #[fail(display = "{}", _0)] - SslError(#[cause] TlsError), - - /// SSL error - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] - #[fail(display = "{}", _0)] - SslError(#[cause] TLSError), + SslError(#[cause] SslError), /// Resolver error #[fail(display = "{}", _0)] @@ -242,17 +212,7 @@ impl Paused { /// `ClientConnector` type is responsible for transport layer of a /// client connection. pub struct ClientConnector { - #[cfg(all(feature = "alpn"))] connector: SslConnector, - #[cfg(all(feature = "tls", not(feature = "alpn")))] - connector: TlsConnector, - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] - connector: Arc, stats: ClientConnectorStats, subscriber: Option>, @@ -293,71 +253,32 @@ impl SystemService for ClientConnector {} impl Default for ClientConnector { fn default() -> ClientConnector { - #[cfg(all(feature = "alpn"))] - { - let builder = SslConnector::builder(SslMethod::tls()).unwrap(); - ClientConnector::with_connector(builder.build()) - } - #[cfg(all(feature = "tls", not(feature = "alpn")))] - { - let (tx, rx) = mpsc::unbounded(); - let builder = NativeTlsConnector::builder(); - ClientConnector { - stats: ClientConnectorStats::default(), - subscriber: None, - acq_tx: tx, - acq_rx: Some(rx), - resolver: None, - connector: builder.build().unwrap().into(), - conn_lifetime: Duration::from_secs(75), - conn_keep_alive: Duration::from_secs(15), - limit: 100, - limit_per_host: 0, - acquired: 0, - acquired_per_host: HashMap::new(), - available: HashMap::new(), - to_close: Vec::new(), - waiters: Some(HashMap::new()), - wait_timeout: None, - paused: Paused::No, - } - } - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] - { - let mut config = ClientConfig::new(); - config - .root_store - .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); - ClientConnector::with_connector(config) - } + let connector = { + #[cfg(all(feature = "alpn"))] + { SslConnector::builder(SslMethod::tls()).unwrap().build() } - #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] - { - let (tx, rx) = mpsc::unbounded(); - ClientConnector { - stats: ClientConnectorStats::default(), - subscriber: None, - acq_tx: tx, - acq_rx: Some(rx), - resolver: None, - conn_lifetime: Duration::from_secs(75), - conn_keep_alive: Duration::from_secs(15), - limit: 100, - limit_per_host: 0, - acquired: 0, - acquired_per_host: HashMap::new(), - available: HashMap::new(), - to_close: Vec::new(), - waiters: Some(HashMap::new()), - wait_timeout: None, - paused: Paused::No, + #[cfg(all(feature = "tls", not(feature = "alpn")))] + { NativeTlsConnector::builder().build().unwrap().into() } + + #[cfg( + all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ) + )] + { + let mut config = ClientConfig::new(); + config + .root_store + .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); + Arc::new(config) } - } + + #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] + { () } + }; + + ClientConnector::with_connector_impl(connector) } } @@ -402,27 +323,8 @@ impl ClientConnector { /// } /// ``` pub fn with_connector(connector: SslConnector) -> ClientConnector { - let (tx, rx) = mpsc::unbounded(); - - ClientConnector { - connector, - stats: ClientConnectorStats::default(), - subscriber: None, - acq_tx: tx, - acq_rx: Some(rx), - resolver: None, - conn_lifetime: Duration::from_secs(75), - conn_keep_alive: Duration::from_secs(15), - limit: 100, - limit_per_host: 0, - acquired: 0, - acquired_per_host: HashMap::new(), - available: HashMap::new(), - to_close: Vec::new(), - waiters: Some(HashMap::new()), - wait_timeout: None, - paused: Paused::No, - } + // keep level of indirection for docstrings matching featureflags + Self::with_connector_impl(connector) } #[cfg( @@ -476,10 +378,27 @@ impl ClientConnector { /// } /// ``` pub fn with_connector(connector: ClientConfig) -> ClientConnector { + // keep level of indirection for docstrings matching featureflags + Self::with_connector_impl(Arc::new(connector)) + } + + #[cfg( + all( + feature = "tls", + not(any(feature = "alpn", feature = "rust-tls")) + ) + )] + pub fn with_connector(connector: SslConnector) -> ClientConnector { + // keep level of indirection for docstrings matching featureflags + Self::with_connector_impl(connector) + } + + #[inline] + fn with_connector_impl(connector: SslConnector) -> ClientConnector { let (tx, rx) = mpsc::unbounded(); ClientConnector { - connector: Arc::new(connector), + connector, stats: ClientConnectorStats::default(), subscriber: None, acq_tx: tx, @@ -1364,4 +1283,4 @@ impl IoStream for TlsStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { self.get_mut().get_mut().set_linger(dur) } -} \ No newline at end of file +} From 797b52ecbf21bfd5cfec2306653af6741279b595 Mon Sep 17 00:00:00 2001 From: Armin Ronacher Date: Wed, 29 Aug 2018 20:58:23 +0200 Subject: [PATCH 079/219] Update CHANGES.md --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index eaf7b42b8..34b0a9621 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Changes +## [0.7.5] - 2018-09-xx + +### Added + +* Added the ability to pass a custom `TlsConnector`. + ## [0.7.4] - 2018-08-23 ### Added From 3ccbce6bc833959c61f9fd2eb440b2cc7370d0cd Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Sat, 1 Sep 2018 00:08:53 +0800 Subject: [PATCH 080/219] Fix issue with 'Connection: close' in ClientRequest --- src/client/parser.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/client/parser.rs b/src/client/parser.rs index dd4e60bc5..5dd163395 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -41,7 +41,8 @@ impl HttpResponseParser { // if buf is empty parse_message will always return NotReady, let's avoid that if buf.is_empty() { match io.read_available(buf) { - Ok(Async::Ready((_, true))) => { + Ok(Async::Ready((true, true))) => (), + Ok(Async::Ready((false, true))) => { return Err(HttpResponseParserError::Disconnect) } Ok(Async::Ready((_, false))) => (), @@ -63,7 +64,8 @@ impl HttpResponseParser { return Err(HttpResponseParserError::Error(ParseError::TooLarge)); } match io.read_available(buf) { - Ok(Async::Ready((_, true))) => { + Ok(Async::Ready((true, true))) => (), + Ok(Async::Ready((false, true))) => { return Err(HttpResponseParserError::Disconnect) } Ok(Async::Ready((_, false))) => (), From 487519acec5d419146a3493f03bd1fba44b56b5b Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Sat, 1 Sep 2018 00:34:19 +0800 Subject: [PATCH 081/219] Add client test for 'Connection: close' as reported in issue #495 --- tests/test_client.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test_client.rs b/tests/test_client.rs index d7341ce1f..d4a2ce1f3 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -66,6 +66,16 @@ fn test_simple() { assert_eq!(bytes, Bytes::from_static(STR.as_ref())); } +#[test] +fn test_connection_close() { + let mut srv = + test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR))); + + let request = srv.get().header("Connection", "close").finish().unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert!(response.status().is_success()); +} + #[test] fn test_with_query_parameter() { let mut srv = test::TestServer::new(|app| { From 23416561734c925ba678284d02b4e4c56b11a699 Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Sat, 1 Sep 2018 01:41:38 +0800 Subject: [PATCH 082/219] Simplify buffer reading logic. Remove duplicate code. --- src/client/parser.rs | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/src/client/parser.rs b/src/client/parser.rs index 5dd163395..7348de32a 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -38,20 +38,17 @@ impl HttpResponseParser { where T: IoStream, { - // if buf is empty parse_message will always return NotReady, let's avoid that - if buf.is_empty() { + loop { match io.read_available(buf) { - Ok(Async::Ready((true, true))) => (), Ok(Async::Ready((false, true))) => { return Err(HttpResponseParserError::Disconnect) } - Ok(Async::Ready((_, false))) => (), + Ok(Async::Ready(_)) => (), Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(err) => return Err(HttpResponseParserError::Error(err.into())), + Err(err) => { + return Err(HttpResponseParserError::Error(err.into())) + } } - } - - loop { match HttpResponseParser::parse_message(buf) .map_err(HttpResponseParserError::Error)? { @@ -63,17 +60,6 @@ impl HttpResponseParser { if buf.capacity() >= MAX_BUFFER_SIZE { return Err(HttpResponseParserError::Error(ParseError::TooLarge)); } - match io.read_available(buf) { - Ok(Async::Ready((true, true))) => (), - Ok(Async::Ready((false, true))) => { - return Err(HttpResponseParserError::Disconnect) - } - Ok(Async::Ready((_, false))) => (), - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(err) => { - return Err(HttpResponseParserError::Error(err.into())) - } - } } } } From a42a8a2321bfb1d32599206f70105b085d08387e Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Sat, 1 Sep 2018 02:15:36 +0800 Subject: [PATCH 083/219] Add some comments to clarify logic. --- src/client/parser.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/client/parser.rs b/src/client/parser.rs index 7348de32a..b6f4ea3f7 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -39,6 +39,7 @@ impl HttpResponseParser { T: IoStream, { loop { + // Read some more data into the buffer for the parser. match io.read_available(buf) { Ok(Async::Ready((false, true))) => { return Err(HttpResponseParserError::Disconnect) @@ -49,6 +50,8 @@ impl HttpResponseParser { return Err(HttpResponseParserError::Error(err.into())) } } + + // Call HTTP response parser. match HttpResponseParser::parse_message(buf) .map_err(HttpResponseParserError::Error)? { @@ -60,6 +63,7 @@ impl HttpResponseParser { if buf.capacity() >= MAX_BUFFER_SIZE { return Err(HttpResponseParserError::Error(ParseError::TooLarge)); } + // Parser needs more data. Loop and read more data. } } } From 66881d7dd196eb7a588b576e6e4654362c326cf4 Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Sat, 1 Sep 2018 02:25:05 +0800 Subject: [PATCH 084/219] If buffer is empty, read more data before calling parser. --- src/client/parser.rs | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/client/parser.rs b/src/client/parser.rs index b6f4ea3f7..5fd81da25 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -39,6 +39,23 @@ impl HttpResponseParser { T: IoStream, { loop { + // Don't call parser until we have data to parse. + if !buf.is_empty() { + match HttpResponseParser::parse_message(buf) + .map_err(HttpResponseParserError::Error)? + { + Async::Ready((msg, decoder)) => { + self.decoder = decoder; + return Ok(Async::Ready(msg)); + } + Async::NotReady => { + if buf.capacity() >= MAX_BUFFER_SIZE { + return Err(HttpResponseParserError::Error(ParseError::TooLarge)); + } + // Parser needs more data. + } + } + } // Read some more data into the buffer for the parser. match io.read_available(buf) { Ok(Async::Ready((false, true))) => { @@ -50,22 +67,6 @@ impl HttpResponseParser { return Err(HttpResponseParserError::Error(err.into())) } } - - // Call HTTP response parser. - match HttpResponseParser::parse_message(buf) - .map_err(HttpResponseParserError::Error)? - { - Async::Ready((msg, decoder)) => { - self.decoder = decoder; - return Ok(Async::Ready(msg)); - } - Async::NotReady => { - if buf.capacity() >= MAX_BUFFER_SIZE { - return Err(HttpResponseParserError::Error(ParseError::TooLarge)); - } - // Parser needs more data. Loop and read more data. - } - } } } From 2d518318993bd1c5393136371acc0a74887c4e97 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 31 Aug 2018 17:24:13 -0700 Subject: [PATCH 085/219] handle socket read disconnect --- CHANGES.md | 5 ++++ src/server/h1.rs | 60 +++++++++++++++++++++++++----------------- src/server/h1writer.rs | 20 +++++++------- 3 files changed, 51 insertions(+), 34 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 34b0a9621..d99aa8ba2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,11 @@ * Added the ability to pass a custom `TlsConnector`. +### Fixed + +* Handle socket read disconnect + + ## [0.7.4] - 2018-08-23 ### Added diff --git a/src/server/h1.rs b/src/server/h1.rs index ae5dd4655..1acae26ec 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -22,13 +22,14 @@ use super::{HttpHandler, HttpHandlerTask, IoStream}; const MAX_PIPELINED_MESSAGES: usize = 16; bitflags! { - struct Flags: u8 { - const STARTED = 0b0000_0001; - const ERROR = 0b0000_0010; - const KEEPALIVE = 0b0000_0100; - const SHUTDOWN = 0b0000_1000; - const DISCONNECTED = 0b0001_0000; - const POLLED = 0b0010_0000; + pub struct Flags: u8 { + const STARTED = 0b0000_0001; + const ERROR = 0b0000_0010; + const KEEPALIVE = 0b0000_0100; + const SHUTDOWN = 0b0000_1000; + const READ_DISCONNECTED = 0b0001_0000; + const WRITE_DISCONNECTED = 0b0010_0000; + const POLLED = 0b0100_0000; } } @@ -93,7 +94,7 @@ where buf: BytesMut, is_eof: bool, ) -> Self { Http1 { - flags: Flags::KEEPALIVE | if is_eof { Flags::DISCONNECTED } else { Flags::empty() }, + flags: if is_eof { Flags::READ_DISCONNECTED } else { Flags::KEEPALIVE }, stream: H1Writer::new(stream, Rc::clone(&settings)), decoder: H1Decoder::new(), payload: None, @@ -117,6 +118,10 @@ where #[inline] fn can_read(&self) -> bool { + if self.flags.intersects(Flags::ERROR | Flags::READ_DISCONNECTED) { + return false + } + if let Some(ref info) = self.payload { info.need_read() == PayloadStatus::Read } else { @@ -125,6 +130,8 @@ where } fn notify_disconnect(&mut self) { + self.flags.insert(Flags::WRITE_DISCONNECTED); + // notify all tasks self.stream.disconnected(); for task in &mut self.tasks { @@ -163,11 +170,15 @@ where // shutdown if self.flags.contains(Flags::SHUTDOWN) { + if self.flags.intersects( + Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED) { + return Ok(Async::Ready(())) + } match self.stream.poll_completed(true) { Ok(Async::NotReady) => return Ok(Async::NotReady), Ok(Async::Ready(_)) => return Ok(Async::Ready(())), Err(err) => { - debug!("Error sending data: {}", err); + debug!("Error sendips ng data: {}", err); return Err(()); } } @@ -197,11 +208,9 @@ where self.flags.insert(Flags::POLLED); return; } + // read io from socket - if !self.flags.intersects(Flags::ERROR) - && self.tasks.len() < MAX_PIPELINED_MESSAGES - && self.can_read() - { + if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES { match self.stream.get_mut().read_available(&mut self.buf) { Ok(Async::Ready((read_some, disconnected))) => { if read_some { @@ -209,7 +218,7 @@ where } if disconnected { // delay disconnect until all tasks have finished. - self.flags.insert(Flags::DISCONNECTED); + self.flags.insert(Flags::READ_DISCONNECTED); if self.tasks.is_empty() { self.client_disconnect(); } @@ -231,7 +240,9 @@ where let mut idx = 0; while idx < self.tasks.len() { // only one task can do io operation in http/1 - if !io && !self.tasks[idx].flags.contains(EntryFlags::EOF) { + if !io && !self.tasks[idx].flags.contains(EntryFlags::EOF) + && !self.flags.contains(Flags::WRITE_DISCONNECTED) + { // io is corrupted, send buffer if self.tasks[idx].flags.contains(EntryFlags::ERROR) { if let Ok(Async::NotReady) = self.stream.poll_completed(true) { @@ -295,7 +306,6 @@ where } // cleanup finished tasks - let max = self.tasks.len() >= MAX_PIPELINED_MESSAGES; while !self.tasks.is_empty() { if self.tasks[0] .flags @@ -306,15 +316,13 @@ where break; } } - // read more message - if max && self.tasks.len() >= MAX_PIPELINED_MESSAGES { - return Ok(Async::Ready(true)); - } // check stream state if self.flags.contains(Flags::STARTED) { match self.stream.poll_completed(false) { - Ok(Async::NotReady) => return Ok(Async::NotReady), + Ok(Async::NotReady) => { + return Ok(Async::NotReady) + }, Err(err) => { debug!("Error sending data: {}", err); self.notify_disconnect(); @@ -332,8 +340,7 @@ where // deal with keep-alive and steam eof (client-side write shutdown) if self.tasks.is_empty() { // handle stream eof - if self.flags.contains(Flags::DISCONNECTED) { - self.client_disconnect(); + if self.flags.contains(Flags::READ_DISCONNECTED) { return Ok(Async::Ready(false)); } // no keep-alive @@ -451,7 +458,12 @@ where break; } } - Ok(None) => break, + Ok(None) => { + if self.flags.contains(Flags::READ_DISCONNECTED) && self.tasks.is_empty() { + self.client_disconnect(); + } + break + }, Err(e) => { self.flags.insert(Flags::ERROR); if let Some(mut payload) = self.payload.take() { diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 8981f9df9..422f0ebc1 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -63,7 +63,9 @@ impl H1Writer { self.flags = Flags::KEEPALIVE; } - pub fn disconnected(&mut self) {} + pub fn disconnected(&mut self) { + self.flags.insert(Flags::DISCONNECTED); + } pub fn keepalive(&self) -> bool { self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE) @@ -268,10 +270,7 @@ impl Writer for H1Writer { let pl: &[u8] = payload.as_ref(); let n = match Self::write_data(&mut self.stream, pl) { Err(err) => { - if err.kind() == io::ErrorKind::WriteZero { - self.disconnected(); - } - + self.disconnected(); return Err(err); } Ok(val) => val, @@ -315,14 +314,15 @@ impl Writer for H1Writer { #[inline] fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error> { + if self.flags.contains(Flags::DISCONNECTED) { + return Err(io::Error::new(io::ErrorKind::Other, "disconnected")); + } + if !self.buffer.is_empty() { let written = { match Self::write_data(&mut self.stream, self.buffer.as_ref().as_ref()) { Err(err) => { - if err.kind() == io::ErrorKind::WriteZero { - self.disconnected(); - } - + self.disconnected(); return Err(err); } Ok(val) => val, @@ -339,7 +339,7 @@ impl Writer for H1Writer { self.stream.poll_flush()?; self.stream.shutdown() } else { - self.stream.poll_flush() + Ok(self.stream.poll_flush()?) } } } From 3fa23f5e10ce89ee7f06bddf0a8b3ac35062cd39 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 31 Aug 2018 17:25:15 -0700 Subject: [PATCH 086/219] update version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index bc182b16e..631b48dc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.4" +version = "0.7.5" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From c313c003a4b8b3526b33f782996116263cba7140 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 31 Aug 2018 17:45:29 -0700 Subject: [PATCH 087/219] Fix typo --- src/server/h1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 1acae26ec..dd8497101 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -178,7 +178,7 @@ where Ok(Async::NotReady) => return Ok(Async::NotReady), Ok(Async::Ready(_)) => return Ok(Async::Ready(())), Err(err) => { - debug!("Error sendips ng data: {}", err); + debug!("Error sending data: {}", err); return Err(()); } } From 0b42cae08254768d7b16ab95ffce1d2269ff0b05 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 31 Aug 2018 18:54:19 -0700 Subject: [PATCH 088/219] update tests --- src/server/h1.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 1acae26ec..652922973 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -291,9 +291,8 @@ where } else if !self.tasks[idx].flags.contains(EntryFlags::FINISHED) { match self.tasks[idx].pipe.poll_completed() { Ok(Async::NotReady) => (), - Ok(Async::Ready(_)) => { - self.tasks[idx].flags.insert(EntryFlags::FINISHED) - } + Ok(Async::Ready(_)) => + self.tasks[idx].flags.insert(EntryFlags::FINISHED), Err(err) => { self.notify_disconnect(); self.tasks[idx].flags.insert(EntryFlags::ERROR); @@ -618,24 +617,35 @@ mod tests { } #[test] - fn test_req_parse() { + fn test_req_parse1() { let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n"); let readbuf = BytesMut::new(); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false); h1.poll_io(); h1.poll_io(); assert_eq!(h1.tasks.len(), 1); } + #[test] + fn test_req_parse2() { + let buf = Buffer::new(""); + let readbuf = BytesMut::from(Vec::::from(&b"GET /test HTTP/1.1\r\n\r\n"[..])); + let settings = Rc::new(wrk_settings()); + + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); + h1.poll_io(); + assert_eq!(h1.tasks.len(), 1); + } + #[test] fn test_req_parse_err() { let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); let readbuf = BytesMut::new(); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false); h1.poll_io(); h1.poll_io(); assert!(h1.flags.contains(Flags::ERROR)); From a2b170fec96d0d101dcd7e1abd31c5595d88e453 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 31 Aug 2018 18:56:21 -0700 Subject: [PATCH 089/219] fmt --- src/client/connector.rs | 86 ++++++++++++++++++----------------------- src/client/parser.rs | 8 ++-- src/param.rs | 1 - src/server/channel.rs | 5 ++- src/server/h1.rs | 41 +++++++++++++------- 5 files changed, 71 insertions(+), 70 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 430a0f752..694e03bc9 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -19,36 +19,28 @@ use tokio_timer::Delay; #[cfg(feature = "alpn")] use { openssl::ssl::{Error as SslError, SslConnector, SslMethod}, - tokio_openssl::SslConnectorExt + tokio_openssl::SslConnectorExt, }; #[cfg(all(feature = "tls", not(feature = "alpn")))] use { native_tls::{Error as SslError, TlsConnector as NativeTlsConnector}, - tokio_tls::TlsConnector as SslConnector + tokio_tls::TlsConnector as SslConnector, }; -#[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) -)] +#[cfg(all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) +))] use { - rustls::ClientConfig, - std::io::Error as SslError, - std::sync::Arc, - tokio_rustls::ClientConfigExt, - webpki::DNSNameRef, - webpki_roots, + rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc, + tokio_rustls::ClientConfigExt, webpki::DNSNameRef, webpki_roots, }; -#[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) -)] +#[cfg(all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) +))] type SslConnector = Arc; #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] @@ -255,17 +247,19 @@ impl Default for ClientConnector { fn default() -> ClientConnector { let connector = { #[cfg(all(feature = "alpn"))] - { SslConnector::builder(SslMethod::tls()).unwrap().build() } + { + SslConnector::builder(SslMethod::tls()).unwrap().build() + } #[cfg(all(feature = "tls", not(feature = "alpn")))] - { NativeTlsConnector::builder().build().unwrap().into() } + { + NativeTlsConnector::builder().build().unwrap().into() + } - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] + #[cfg(all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ))] { let mut config = ClientConfig::new(); config @@ -275,7 +269,9 @@ impl Default for ClientConnector { } #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] - { () } + { + () + } }; ClientConnector::with_connector_impl(connector) @@ -327,12 +323,10 @@ impl ClientConnector { Self::with_connector_impl(connector) } - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] + #[cfg(all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ))] /// Create `ClientConnector` actor with custom `SslConnector` instance. /// /// By default `ClientConnector` uses very a simple SSL configuration. @@ -382,12 +376,10 @@ impl ClientConnector { Self::with_connector_impl(Arc::new(connector)) } - #[cfg( - all( - feature = "tls", - not(any(feature = "alpn", feature = "rust-tls")) - ) - )] + #[cfg(all( + feature = "tls", + not(any(feature = "alpn", feature = "rust-tls")) + ))] pub fn with_connector(connector: SslConnector) -> ClientConnector { // keep level of indirection for docstrings matching featureflags Self::with_connector_impl(connector) @@ -772,12 +764,10 @@ impl ClientConnector { } } - #[cfg( - all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) - ) - )] + #[cfg(all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "tls")) + ))] match res { Err(err) => { let _ = waiter.tx.send(Err(err.into())); @@ -1263,7 +1253,7 @@ impl AsyncWrite for Connection { } #[cfg(feature = "tls")] -use tokio_tls::{TlsStream}; +use tokio_tls::TlsStream; #[cfg(feature = "tls")] /// This is temp solution untile actix-net migration diff --git a/src/client/parser.rs b/src/client/parser.rs index 5fd81da25..0ee4598de 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -50,7 +50,9 @@ impl HttpResponseParser { } Async::NotReady => { if buf.capacity() >= MAX_BUFFER_SIZE { - return Err(HttpResponseParserError::Error(ParseError::TooLarge)); + return Err(HttpResponseParserError::Error( + ParseError::TooLarge, + )); } // Parser needs more data. } @@ -63,9 +65,7 @@ impl HttpResponseParser { } Ok(Async::Ready(_)) => (), Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(err) => { - return Err(HttpResponseParserError::Error(err.into())) - } + Err(err) => return Err(HttpResponseParserError::Error(err.into())), } } } diff --git a/src/param.rs b/src/param.rs index 063159d72..d0664df99 100644 --- a/src/param.rs +++ b/src/param.rs @@ -236,7 +236,6 @@ macro_rules! FROM_STR { ($type:ty) => { impl FromParam for $type { type Err = InternalError<<$type as FromStr>::Err>; - fn from_param(val: &str) -> Result { <$type as FromStr>::from_str(val) .map_err(|e| InternalError::new(e, StatusCode::BAD_REQUEST)) diff --git a/src/server/channel.rs b/src/server/channel.rs index 84f301513..bec1c4c87 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -160,8 +160,9 @@ where if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() { match kind { ProtocolKind::Http1 => { - self.proto = - Some(HttpProtocol::H1(h1::Http1::new(settings, io, addr, buf, is_eof))); + self.proto = Some(HttpProtocol::H1(h1::Http1::new( + settings, io, addr, buf, is_eof, + ))); return self.poll(); } ProtocolKind::Http2 => { diff --git a/src/server/h1.rs b/src/server/h1.rs index 652922973..f4875519e 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -94,7 +94,11 @@ where buf: BytesMut, is_eof: bool, ) -> Self { Http1 { - flags: if is_eof { Flags::READ_DISCONNECTED } else { Flags::KEEPALIVE }, + flags: if is_eof { + Flags::READ_DISCONNECTED + } else { + Flags::KEEPALIVE + }, stream: H1Writer::new(stream, Rc::clone(&settings)), decoder: H1Decoder::new(), payload: None, @@ -118,8 +122,11 @@ where #[inline] fn can_read(&self) -> bool { - if self.flags.intersects(Flags::ERROR | Flags::READ_DISCONNECTED) { - return false + if self + .flags + .intersects(Flags::ERROR | Flags::READ_DISCONNECTED) + { + return false; } if let Some(ref info) = self.payload { @@ -171,8 +178,9 @@ where // shutdown if self.flags.contains(Flags::SHUTDOWN) { if self.flags.intersects( - Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED) { - return Ok(Async::Ready(())) + Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED, + ) { + return Ok(Async::Ready(())); } match self.stream.poll_completed(true) { Ok(Async::NotReady) => return Ok(Async::NotReady), @@ -240,7 +248,8 @@ where let mut idx = 0; while idx < self.tasks.len() { // only one task can do io operation in http/1 - if !io && !self.tasks[idx].flags.contains(EntryFlags::EOF) + if !io + && !self.tasks[idx].flags.contains(EntryFlags::EOF) && !self.flags.contains(Flags::WRITE_DISCONNECTED) { // io is corrupted, send buffer @@ -291,8 +300,9 @@ where } else if !self.tasks[idx].flags.contains(EntryFlags::FINISHED) { match self.tasks[idx].pipe.poll_completed() { Ok(Async::NotReady) => (), - Ok(Async::Ready(_)) => - self.tasks[idx].flags.insert(EntryFlags::FINISHED), + Ok(Async::Ready(_)) => { + self.tasks[idx].flags.insert(EntryFlags::FINISHED) + } Err(err) => { self.notify_disconnect(); self.tasks[idx].flags.insert(EntryFlags::ERROR); @@ -319,9 +329,7 @@ where // check stream state if self.flags.contains(Flags::STARTED) { match self.stream.poll_completed(false) { - Ok(Async::NotReady) => { - return Ok(Async::NotReady) - }, + Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => { debug!("Error sending data: {}", err); self.notify_disconnect(); @@ -458,11 +466,13 @@ where } } Ok(None) => { - if self.flags.contains(Flags::READ_DISCONNECTED) && self.tasks.is_empty() { + if self.flags.contains(Flags::READ_DISCONNECTED) + && self.tasks.is_empty() + { self.client_disconnect(); } - break - }, + break; + } Err(e) => { self.flags.insert(Flags::ERROR); if let Some(mut payload) = self.payload.take() { @@ -631,7 +641,8 @@ mod tests { #[test] fn test_req_parse2() { let buf = Buffer::new(""); - let readbuf = BytesMut::from(Vec::::from(&b"GET /test HTTP/1.1\r\n\r\n"[..])); + let readbuf = + BytesMut::from(Vec::::from(&b"GET /test HTTP/1.1\r\n\r\n"[..])); let settings = Rc::new(wrk_settings()); let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); From 0425e2776f5cbcaca3489a5dd565b12e63bc688c Mon Sep 17 00:00:00 2001 From: Robert Gabriel Jakabosky Date: Sat, 1 Sep 2018 17:00:32 +0800 Subject: [PATCH 090/219] Fix Issue #490 (#498) * Add failing testcase for HTTP 404 response with no reason text. * Include canonical reason test for HTTP error responses. * Don't send a reason for unknown status codes. --- src/server/error.rs | 5 +++++ tests/test_server.rs | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/src/server/error.rs b/src/server/error.rs index 5bd0bf83b..d08ccf87f 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -21,7 +21,12 @@ impl HttpHandlerTask for ServerError { bytes.reserve(helpers::STATUS_LINE_BUF_SIZE + 1); helpers::write_status_line(self.0, self.1.as_u16(), bytes); } + // Convert Status Code to Reason. + let reason = self.1.canonical_reason().unwrap_or(""); + io.buffer().extend_from_slice(reason.as_bytes()); + // No response body. io.buffer().extend_from_slice(b"\r\ncontent-length: 0\r\n"); + // date header io.set_date(); Ok(Async::Ready(true)) } diff --git a/tests/test_server.rs b/tests/test_server.rs index c573c4e12..8235be6b6 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -932,6 +932,29 @@ fn test_application() { assert!(response.status().is_success()); } +#[test] +fn test_default_404_handler_response() { + let mut srv = test::TestServer::with_factory(|| { + App::new() + .prefix("/app") + .resource("", |r| r.f(|_| HttpResponse::Ok())) + .resource("/", |r| r.f(|_| HttpResponse::Ok())) + }); + let addr = srv.addr(); + + let mut buf = [0; 24]; + let request = TcpStream::connect(&addr) + .and_then(|sock| { + tokio::io::write_all(sock, "HEAD / HTTP/1.1\r\nHost: localhost\r\n\r\n") + .and_then(|(sock, _)| tokio::io::read_exact(sock, &mut buf)) + .and_then(|(_, buf)| Ok(buf)) + }) + .map_err(|e| panic!("{:?}", e)); + let response = srv.execute(request).unwrap(); + let rep = String::from_utf8_lossy(&response[..]); + assert!(rep.contains("HTTP/1.1 404 Not Found")); +} + #[test] fn test_server_cookies() { use actix_web::http; From 3439f552886e650ce5293575f9808e40f76909b6 Mon Sep 17 00:00:00 2001 From: Markus Unterwaditzer Date: Sat, 1 Sep 2018 17:13:52 +0200 Subject: [PATCH 091/219] doc: Add example for using custom nativetls connector (#497) --- src/client/connector.rs | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 694e03bc9..239a00c5e 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -292,7 +292,6 @@ impl ClientConnector { /// # extern crate futures; /// # use futures::{future, Future}; /// # use std::io::Write; - /// # use std::process; /// # use actix_web::actix::Actor; /// extern crate openssl; /// use actix_web::{actix, client::ClientConnector, client::Connect}; @@ -337,10 +336,8 @@ impl ClientConnector { /// # #![cfg(feature = "rust-tls")] /// # extern crate actix_web; /// # extern crate futures; - /// # extern crate tokio; /// # use futures::{future, Future}; /// # use std::io::Write; - /// # use std::process; /// # use actix_web::actix::Actor; /// extern crate rustls; /// extern crate webpki_roots; @@ -380,6 +377,42 @@ impl ClientConnector { feature = "tls", not(any(feature = "alpn", feature = "rust-tls")) ))] + /// Create `ClientConnector` actor with custom `SslConnector` instance. + /// + /// By default `ClientConnector` uses very a simple SSL configuration. + /// With `with_connector` method it is possible to use a custom + /// `SslConnector` object. + /// + /// ```rust + /// # #![cfg(feature = "tls")] + /// # extern crate actix_web; + /// # extern crate futures; + /// # use futures::{future, Future}; + /// # use std::io::Write; + /// # use actix_web::actix::Actor; + /// extern crate native_tls; + /// extern crate webpki_roots; + /// use native_tls::TlsConnector; + /// use actix_web::{actix, client::ClientConnector, client::Connect}; + /// + /// fn main() { + /// actix::run(|| { + /// let connector = TlsConnector::new().unwrap(); + /// let conn = ClientConnector::with_connector(connector.into()).start(); + /// + /// conn.send( + /// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host + /// .map_err(|_| ()) + /// .and_then(|res| { + /// if let Ok(mut stream) = res { + /// stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap(); + /// } + /// # actix::System::current().stop(); + /// Ok(()) + /// }) + /// }); + /// } + /// ``` pub fn with_connector(connector: SslConnector) -> ClientConnector { // keep level of indirection for docstrings matching featureflags Self::with_connector_impl(connector) From f2f05e77155ba08348906ed31491a9fa9ae3cc5e Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 2 Sep 2018 07:47:19 -0700 Subject: [PATCH 092/219] allow to register handlers on scope level #465 --- CHANGES.md | 3 +++ src/scope.rs | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index d99aa8ba2..b8ab0f879 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,9 @@ * Added the ability to pass a custom `TlsConnector`. +* Allow to register handlers on scope level #465 + + ### Fixed * Handle socket read disconnect diff --git a/src/scope.rs b/src/scope.rs index 8298f534a..83e43f43a 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -5,7 +5,10 @@ use std::rc::Rc; use futures::{Async, Future, Poll}; use error::Error; -use handler::{AsyncResult, AsyncResultItem, FromRequest, Responder, RouteHandler}; +use handler::{ + AsyncResult, AsyncResultItem, FromRequest, Handler, Responder, RouteHandler, + WrapHandler, +}; use http::Method; use httprequest::HttpRequest; use httpresponse::HttpResponse; @@ -286,6 +289,44 @@ impl Scope { self } + /// Configure handler for specific path prefix. + /// + /// A path prefix consists of valid path segments, i.e for the + /// prefix `/app` any request with the paths `/app`, `/app/` or + /// `/app/test` would match, but the path `/application` would + /// not. + /// + /// ```rust + /// # extern crate actix_web; + /// use actix_web::{http, App, HttpRequest, HttpResponse}; + /// + /// fn main() { + /// let app = App::new().scope("/scope-prefix", |scope| { + /// handler("/app", |req: &HttpRequest| match *req.method() { + /// http::Method::GET => HttpResponse::Ok(), + /// http::Method::POST => HttpResponse::MethodNotAllowed(), + /// _ => HttpResponse::NotFound(), + /// }) + /// }); + /// } + /// ``` + pub fn handler>(mut self, path: &str, handler: H) -> Scope { + { + let mut path = path.trim().trim_right_matches('/').to_owned(); + if !path.is_empty() && !path.starts_with('/') { + path.insert(0, '/') + } + if path.len() > 1 && path.ends_with('/') { + path.pop(); + } + + Rc::get_mut(&mut self.router) + .expect("Multiple copies of scope router") + .register_handler(&path, Box::new(WrapHandler::new(handler)), None); + } + self + } + /// Register a scope middleware /// /// This is similar to `App's` middlewares, but @@ -1120,4 +1161,32 @@ mod tests { let resp = app.run(req); assert_eq!(resp.as_msg().status(), StatusCode::METHOD_NOT_ALLOWED); } + + #[test] + fn test_handler() { + let app = App::new() + .scope("/scope", |scope| { + scope.handler("/test", |_: &_| HttpResponse::Ok()) + }).finish(); + + let req = TestRequest::with_uri("/scope/test").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::OK); + + let req = TestRequest::with_uri("/scope/test/").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::OK); + + let req = TestRequest::with_uri("/scope/test/app").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::OK); + + let req = TestRequest::with_uri("/scope/testapp").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); + + let req = TestRequest::with_uri("/scope/blah").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); + } } From 968c81e2678ee301b5f685181bac5edec7d312b2 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 2 Sep 2018 08:14:54 -0700 Subject: [PATCH 093/219] Handling scoped paths without leading slashes #460 --- CHANGES.md | 4 ++- src/scope.rs | 84 +++++++++++++++++++++++++++++++++++--------- tests/test_server.rs | 3 +- 3 files changed, 71 insertions(+), 20 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b8ab0f879..dd6cdcd20 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.5] - 2018-09-xx +## [0.7.5] - 2018-09-02 ### Added @@ -13,6 +13,8 @@ * Handle socket read disconnect +* Handling scoped paths without leading slashes #460 + ## [0.7.4] - 2018-08-23 diff --git a/src/scope.rs b/src/scope.rs index 83e43f43a..a1fd907a1 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -183,7 +183,7 @@ impl Scope { where F: FnOnce(Scope) -> Scope, { - let rdef = ResourceDef::prefix(&path); + let rdef = ResourceDef::prefix(&insert_slash(path)); let scope = Scope { rdef: rdef.clone(), filters: Vec::new(), @@ -230,9 +230,11 @@ impl Scope { R: Responder + 'static, T: FromRequest + 'static, { - Rc::get_mut(&mut self.router) - .unwrap() - .register_route(path, method, f); + Rc::get_mut(&mut self.router).unwrap().register_route( + &insert_slash(path), + method, + f, + ); self } @@ -264,7 +266,7 @@ impl Scope { F: FnOnce(&mut Resource) -> R + 'static, { // add resource - let mut resource = Resource::new(ResourceDef::new(path)); + let mut resource = Resource::new(ResourceDef::new(&insert_slash(path))); f(&mut resource); Rc::get_mut(&mut self.router) @@ -311,19 +313,17 @@ impl Scope { /// } /// ``` pub fn handler>(mut self, path: &str, handler: H) -> Scope { - { - let mut path = path.trim().trim_right_matches('/').to_owned(); - if !path.is_empty() && !path.starts_with('/') { - path.insert(0, '/') - } - if path.len() > 1 && path.ends_with('/') { - path.pop(); - } - - Rc::get_mut(&mut self.router) - .expect("Multiple copies of scope router") - .register_handler(&path, Box::new(WrapHandler::new(handler)), None); + let mut path = path.trim().trim_right_matches('/').to_owned(); + if !path.is_empty() && !path.starts_with('/') { + path.insert(0, '/') } + if path.len() > 1 && path.ends_with('/') { + path.pop(); + } + + Rc::get_mut(&mut self.router) + .expect("Multiple copies of scope router") + .register_handler(&path, Box::new(WrapHandler::new(handler)), None); self } @@ -342,6 +342,14 @@ impl Scope { } } +fn insert_slash(path: &str) -> String { + let mut path = path.to_owned(); + if !path.is_empty() && !path.starts_with('/') { + path.insert(0, '/'); + }; + path +} + impl RouteHandler for Scope { fn handle(&self, req: &HttpRequest) -> AsyncResult { let tail = req.match_info().tail as usize; @@ -844,6 +852,34 @@ mod tests { assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); } + #[test] + fn test_scope_route_without_leading_slash() { + let app = App::new() + .scope("app", |scope| { + scope + .route("path1", Method::GET, |_: HttpRequest<_>| HttpResponse::Ok()) + .route("path1", Method::DELETE, |_: HttpRequest<_>| { + HttpResponse::Ok() + }) + }).finish(); + + let req = TestRequest::with_uri("/app/path1").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::OK); + + let req = TestRequest::with_uri("/app/path1") + .method(Method::DELETE) + .request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::OK); + + let req = TestRequest::with_uri("/app/path1") + .method(Method::POST) + .request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND); + } + #[test] fn test_scope_filter() { let app = App::new() @@ -1013,6 +1049,20 @@ mod tests { assert_eq!(resp.as_msg().status(), StatusCode::CREATED); } + #[test] + fn test_nested_scope_no_slash() { + let app = App::new() + .scope("/app", |scope| { + scope.nested("t1", |scope| { + scope.resource("/path1", |r| r.f(|_| HttpResponse::Created())) + }) + }).finish(); + + let req = TestRequest::with_uri("/app/t1/path1").request(); + let resp = app.run(req); + assert_eq!(resp.as_msg().status(), StatusCode::CREATED); + } + #[test] fn test_nested_scope_root() { let app = App::new() diff --git a/tests/test_server.rs b/tests/test_server.rs index 8235be6b6..97161a30f 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -948,8 +948,7 @@ fn test_default_404_handler_response() { tokio::io::write_all(sock, "HEAD / HTTP/1.1\r\nHost: localhost\r\n\r\n") .and_then(|(sock, _)| tokio::io::read_exact(sock, &mut buf)) .and_then(|(_, buf)| Ok(buf)) - }) - .map_err(|e| panic!("{:?}", e)); + }).map_err(|e| panic!("{:?}", e)); let response = srv.execute(request).unwrap(); let rep = String::from_utf8_lossy(&response[..]); assert!(rep.contains("HTTP/1.1 404 Not Found")); From b7a73e0a4fd62a53da7fa0ee638f7e019ade390e Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 2 Sep 2018 08:51:26 -0700 Subject: [PATCH 094/219] fix Scope::handler doc test --- src/scope.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/scope.rs b/src/scope.rs index a1fd907a1..6e7f28985 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -304,7 +304,7 @@ impl Scope { /// /// fn main() { /// let app = App::new().scope("/scope-prefix", |scope| { - /// handler("/app", |req: &HttpRequest| match *req.method() { + /// scope.handler("/app", |req: &HttpRequest| match *req.method() { /// http::Method::GET => HttpResponse::Ok(), /// http::Method::POST => HttpResponse::MethodNotAllowed(), /// _ => HttpResponse::NotFound(), From 24d12289435db12517741e818a23cb811cc5301b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 3 Sep 2018 11:28:47 -0700 Subject: [PATCH 095/219] simplify handler path processing --- src/application.rs | 7 ++----- src/scope.rs | 9 +-------- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/src/application.rs b/src/application.rs index 3ef753f5f..407268322 100644 --- a/src/application.rs +++ b/src/application.rs @@ -447,11 +447,8 @@ where { let mut path = path.trim().trim_right_matches('/').to_owned(); if !path.is_empty() && !path.starts_with('/') { - path.insert(0, '/') - } - if path.len() > 1 && path.ends_with('/') { - path.pop(); - } + path.insert(0, '/'); + }; self.parts .as_mut() .expect("Use after finish") diff --git a/src/scope.rs b/src/scope.rs index 6e7f28985..4ce4901af 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -313,14 +313,7 @@ impl Scope { /// } /// ``` pub fn handler>(mut self, path: &str, handler: H) -> Scope { - let mut path = path.trim().trim_right_matches('/').to_owned(); - if !path.is_empty() && !path.starts_with('/') { - path.insert(0, '/') - } - if path.len() > 1 && path.ends_with('/') { - path.pop(); - } - + let path = insert_slash(path.trim().trim_right_matches('/')); Rc::get_mut(&mut self.router) .expect("Multiple copies of scope router") .register_handler(&path, Box::new(WrapHandler::new(handler)), None); From f0f67072aece8f7cacb6be8fcf24d147ecfe1ee7 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 3 Sep 2018 21:35:11 -0700 Subject: [PATCH 096/219] Read client response until eof if connection header set to close #464 --- CHANGES.md | 4 ++++ src/client/parser.rs | 37 ++++++++++++++++++++++++++++++------- tests/test_client.rs | 34 +++++++++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index dd6cdcd20..b48c743c8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,10 @@ * Handling scoped paths without leading slashes #460 +### Changed + +* Read client response until eof if connection header set to close #464 + ## [0.7.4] - 2018-08-23 diff --git a/src/client/parser.rs b/src/client/parser.rs index 0ee4598de..11252fa52 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -20,6 +20,7 @@ const MAX_HEADERS: usize = 96; #[derive(Default)] pub struct HttpResponseParser { decoder: Option, + eof: bool, // indicate that we read payload until stream eof } #[derive(Debug, Fail)] @@ -44,8 +45,14 @@ impl HttpResponseParser { match HttpResponseParser::parse_message(buf) .map_err(HttpResponseParserError::Error)? { - Async::Ready((msg, decoder)) => { - self.decoder = decoder; + Async::Ready((msg, info)) => { + if let Some((decoder, eof)) = info { + self.eof = eof; + self.decoder = Some(decoder); + } else { + self.eof = false; + self.decoder = None; + } return Ok(Async::Ready(msg)); } Async::NotReady => { @@ -97,7 +104,12 @@ impl HttpResponseParser { return Ok(Async::NotReady); } if stream_finished { - return Err(PayloadError::Incomplete); + // read untile eof? + if self.eof { + return Ok(Async::Ready(None)); + } else { + return Err(PayloadError::Incomplete); + } } } Err(err) => return Err(err.into()), @@ -110,7 +122,7 @@ impl HttpResponseParser { fn parse_message( buf: &mut BytesMut, - ) -> Poll<(ClientResponse, Option), ParseError> { + ) -> Poll<(ClientResponse, Option<(EncodingDecoder, bool)>), ParseError> { // Unsafe: we read only this data only after httparse parses headers into. // performance bump for pipeline benchmarks. let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() }; @@ -156,12 +168,12 @@ impl HttpResponseParser { } let decoder = if status == StatusCode::SWITCHING_PROTOCOLS { - Some(EncodingDecoder::eof()) + Some((EncodingDecoder::eof(), true)) } else if let Some(len) = hdrs.get(header::CONTENT_LENGTH) { // Content-Length if let Ok(s) = len.to_str() { if let Ok(len) = s.parse::() { - Some(EncodingDecoder::length(len)) + Some((EncodingDecoder::length(len), false)) } else { debug!("illegal Content-Length: {:?}", len); return Err(ParseError::Header); @@ -172,7 +184,18 @@ impl HttpResponseParser { } } else if chunked(&hdrs)? { // Chunked encoding - Some(EncodingDecoder::chunked()) + Some((EncodingDecoder::chunked(), false)) + } else if let Some(value) = hdrs.get(header::CONNECTION) { + let close = if let Ok(s) = value.to_str() { + s == "close" + } else { + false + }; + if close { + Some((EncodingDecoder::eof(), true)) + } else { + None + } } else { None }; diff --git a/tests/test_client.rs b/tests/test_client.rs index d4a2ce1f3..8707114fa 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -8,7 +8,8 @@ extern crate rand; #[cfg(all(unix, feature = "uds"))] extern crate tokio_uds; -use std::io::Read; +use std::io::{Read, Write}; +use std::{net, thread}; use bytes::Bytes; use flate2::read::GzDecoder; @@ -470,3 +471,34 @@ fn test_default_headers() { "\"" ))); } + +#[test] +fn client_read_until_eof() { + let addr = test::TestServer::unused_addr(); + + thread::spawn(move || { + let lst = net::TcpListener::bind(addr).unwrap(); + + for stream in lst.incoming() { + let mut stream = stream.unwrap(); + let mut b = [0; 1000]; + let _ = stream.read(&mut b).unwrap(); + let _ = stream + .write_all(b"HTTP/1.1 200 OK\r\nconnection: close\r\n\r\nwelcome!"); + } + }); + + let mut sys = actix::System::new("test"); + + // client request + let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) + .finish() + .unwrap(); + println!("TEST: {:?}", req); + let response = sys.block_on(req.send()).unwrap(); + assert!(response.status().is_success()); + + // read response + let bytes = sys.block_on(response.body()).unwrap(); + assert_eq!(bytes, Bytes::from_static(b"welcome!")); +} From 4ca9fd2ad165118d79be478fac0a6bd5750c1cc7 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 3 Sep 2018 22:09:12 -0700 Subject: [PATCH 097/219] remove debug print --- CHANGES.md | 3 ++- tests/test_client.rs | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b48c743c8..954a6c313 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.5] - 2018-09-02 +## [0.7.5] - 2018-09-04 ### Added @@ -15,6 +15,7 @@ * Handling scoped paths without leading slashes #460 + ### Changed * Read client response until eof if connection header set to close #464 diff --git a/tests/test_client.rs b/tests/test_client.rs index 8707114fa..28d60faf0 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -494,7 +494,6 @@ fn client_read_until_eof() { let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) .finish() .unwrap(); - println!("TEST: {:?}", req); let response = sys.block_on(req.send()).unwrap(); assert!(response.status().is_success()); From 86fdbb47a59f7b963ed2d03720420d22a2732c50 Mon Sep 17 00:00:00 2001 From: Jan Michael Auer Date: Wed, 5 Sep 2018 10:41:23 +0200 Subject: [PATCH 098/219] Fix system_exit in HttpServer (#501) --- src/server/http.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/http.rs b/src/server/http.rs index 05f0b2442..ed463f75d 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -73,7 +73,7 @@ where backlog: 2048, keep_alive: KeepAlive::Os, shutdown_timeout: 30, - exit: true, + exit: false, no_http2: false, no_signals: false, maxconn: 102_400, From 42f3773becb285ef4caff000540914b2f3282f6a Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 5 Sep 2018 09:03:58 -0700 Subject: [PATCH 099/219] update changes --- CHANGES.md | 7 +++++++ Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 954a6c313..2f236d84d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.7.6] - 2018-09-xx + +### Fixed + +* Fix system_exit in HttpServer #501 + + ## [0.7.5] - 2018-09-04 ### Added diff --git a/Cargo.toml b/Cargo.toml index 631b48dc9..704eac47a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.5" +version = "0.7.6" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From 4251b0bc10cd4f650d7470d297c563e0d02e3678 Mon Sep 17 00:00:00 2001 From: Maciej Piechotka Date: Wed, 5 Sep 2018 16:14:54 +0200 Subject: [PATCH 100/219] Refactor resource route parsing to allow repetition in the regexes --- CHANGES.md | 2 +- src/router.rs | 121 ++++++++++++++++++++++++-------------------------- 2 files changed, 58 insertions(+), 65 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2f236d84d..3a5a68de4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ ### Fixed * Fix system_exit in HttpServer #501 - +* Fix parsing of route param containin regexes with repetition #500 ## [0.7.5] - 2018-09-04 diff --git a/src/router.rs b/src/router.rs index 6dc6224ac..4a0f672c5 100644 --- a/src/router.rs +++ b/src/router.rs @@ -815,73 +815,56 @@ impl ResourceDef { Ok(()) } - fn parse( - pattern: &str, for_prefix: bool, - ) -> (String, Vec, bool, usize) { + fn parse_param( + pattern: &str, + ) -> (PatternElement, String, &str) { const DEFAULT_PATTERN: &str = "[^/]+"; - - let mut re1 = String::from("^"); - let mut re2 = String::new(); - let mut el = String::new(); - let mut in_param = false; - let mut in_param_pattern = false; - let mut param_name = String::new(); - let mut param_pattern = String::from(DEFAULT_PATTERN); - let mut is_dynamic = false; - let mut elems = Vec::new(); - let mut len = 0; - - for ch in pattern.chars() { - if in_param { - // In parameter segment: `{....}` - if ch == '}' { - elems.push(PatternElement::Var(param_name.clone())); - re1.push_str(&format!(r"(?P<{}>{})", ¶m_name, ¶m_pattern)); - - param_name.clear(); - param_pattern = String::from(DEFAULT_PATTERN); - - len = 0; - in_param_pattern = false; - in_param = false; - } else if ch == ':' { - // The parameter name has been determined; custom pattern land - in_param_pattern = true; - param_pattern.clear(); - } else if in_param_pattern { - // Ignore leading whitespace for pattern - if !(ch == ' ' && param_pattern.is_empty()) { - param_pattern.push(ch); - } - } else { - param_name.push(ch); - } - } else if ch == '{' { - in_param = true; - is_dynamic = true; - elems.push(PatternElement::Str(el.clone())); - el.clear(); - } else { - re1.push_str(escape(&ch.to_string()).as_str()); - re2.push(ch); - el.push(ch); - len += 1; + let mut params_nesting = 0usize; + let close_idx = pattern.find(|c| match c { + '{' => {params_nesting += 1; false}, + '}' => {params_nesting -= 1; params_nesting == 0}, + _ => false + }).expect("malformed param"); + let (mut param, rem) = pattern.split_at(close_idx + 1); + param = ¶m[1..param.len() - 1]; // Remove outer brackets + let (name, pattern) = match param.find(":") { + Some(idx) => { + let (name, pattern) = param.split_at(idx); + (name, &pattern[1..]) } - } - - if !el.is_empty() { - elems.push(PatternElement::Str(el.clone())); - } - - let re = if is_dynamic { - if !for_prefix { - re1.push('$'); - } - re1 - } else { - re2 + None => (param, DEFAULT_PATTERN) }; - (re, elems, is_dynamic, len) + (PatternElement::Var(name.to_string()), format!(r"(?P<{}>{})", &name, &pattern), rem) + } + + fn parse( + mut pattern: &str, for_prefix: bool, + ) -> (String, Vec, bool, usize) { + if pattern.find("{").is_none() { + return (String::from(pattern), vec![PatternElement::Str(String::from(pattern))], false, pattern.chars().count()) + }; + + let mut elems = Vec::new(); + let mut re = String::from("^"); + + while let Some(idx) = pattern.find("{") { + let (prefix, rem) = pattern.split_at(idx); + elems.push(PatternElement::Str(String::from(prefix))); + re.push_str(&escape(prefix)); + let (param_pattern, re_part, rem) = Self::parse_param(rem); + elems.push(param_pattern); + re.push_str(&re_part); + pattern = rem; + } + + elems.push(PatternElement::Str(String::from(pattern))); + re.push_str(&escape(pattern)); + + if !for_prefix { + re.push_str("$"); + } + + (re, elems, true, pattern.chars().count()) } } @@ -1072,6 +1055,16 @@ mod tests { let info = re.match_with_params(&req, 0).unwrap(); assert_eq!(info.get("version").unwrap(), "151"); assert_eq!(info.get("id").unwrap(), "adahg32"); + + let re = ResourceDef::new("/{id:[[:digit:]]{6}}"); + assert!(re.is_match("/012345")); + assert!(!re.is_match("/012")); + assert!(!re.is_match("/01234567")); + assert!(!re.is_match("/XXXXXX")); + + let req = TestRequest::with_uri("/012345").finish(); + let info = re.match_with_params(&req, 0).unwrap(); + assert_eq!(info.get("id").unwrap(), "012345"); } #[test] From 002bb24b26fbdfa6a664a10a109d763ca2f3f989 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 7 Sep 2018 20:46:43 -0700 Subject: [PATCH 101/219] unhide SessionBackend and SessionImpl traits and cleanup warnings --- CHANGES.md | 6 ++++++ Cargo.toml | 1 + src/client/connector.rs | 1 + src/lib.rs | 1 + src/middleware/session.rs | 8 ++++++-- src/router.rs | 36 +++++++++++++++++++++++++----------- src/server/http.rs | 12 ++++++------ 7 files changed, 46 insertions(+), 19 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 3a5a68de4..e5de591f0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,8 +5,14 @@ ### Fixed * Fix system_exit in HttpServer #501 + * Fix parsing of route param containin regexes with repetition #500 +### Changes + +* Unhide `SessionBackend` and `SessionImpl` traits #455 + + ## [0.7.5] - 2018-09-04 ### Added diff --git a/Cargo.toml b/Cargo.toml index 704eac47a..6855c0ead 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,6 +101,7 @@ tokio-io = "0.1" tokio-tcp = "0.1" tokio-timer = "0.2" tokio-reactor = "0.1" +tokio-current-thread = "0.1" # native-tls native-tls = { version="0.2", optional = true } diff --git a/src/client/connector.rs b/src/client/connector.rs index 239a00c5e..896f98a41 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -204,6 +204,7 @@ impl Paused { /// `ClientConnector` type is responsible for transport layer of a /// client connection. pub struct ClientConnector { + #[allow(dead_code)] connector: SslConnector, stats: ClientConnectorStats, diff --git a/src/lib.rs b/src/lib.rs index f57ab937e..2559f6460 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -118,6 +118,7 @@ extern crate parking_lot; extern crate rand; extern crate slab; extern crate tokio; +extern crate tokio_current_thread; extern crate tokio_io; extern crate tokio_reactor; extern crate tokio_tcp; diff --git a/src/middleware/session.rs b/src/middleware/session.rs index 7bf5c0e95..e8b0e5558 100644 --- a/src/middleware/session.rs +++ b/src/middleware/session.rs @@ -270,14 +270,17 @@ impl> Middleware for SessionStorage { } /// A simple key-value storage interface that is internally used by `Session`. -#[doc(hidden)] pub trait SessionImpl: 'static { + /// Get session value by key fn get(&self, key: &str) -> Option<&str>; + /// Set session value fn set(&mut self, key: &str, value: String); + /// Remove specific key from session fn remove(&mut self, key: &str); + /// Remove all values from session fn clear(&mut self); /// Write session to storage backend. @@ -285,9 +288,10 @@ pub trait SessionImpl: 'static { } /// Session's storage backend trait definition. -#[doc(hidden)] pub trait SessionBackend: Sized + 'static { + /// Session item type Session: SessionImpl; + /// Future that reads session type ReadFuture: Future; /// Parse the session from request and load data from a storage backend. diff --git a/src/router.rs b/src/router.rs index 4a0f672c5..ab84838f1 100644 --- a/src/router.rs +++ b/src/router.rs @@ -815,16 +815,21 @@ impl ResourceDef { Ok(()) } - fn parse_param( - pattern: &str, - ) -> (PatternElement, String, &str) { + fn parse_param(pattern: &str) -> (PatternElement, String, &str) { const DEFAULT_PATTERN: &str = "[^/]+"; let mut params_nesting = 0usize; - let close_idx = pattern.find(|c| match c { - '{' => {params_nesting += 1; false}, - '}' => {params_nesting -= 1; params_nesting == 0}, - _ => false - }).expect("malformed param"); + let close_idx = pattern + .find(|c| match c { + '{' => { + params_nesting += 1; + false + } + '}' => { + params_nesting -= 1; + params_nesting == 0 + } + _ => false, + }).expect("malformed param"); let (mut param, rem) = pattern.split_at(close_idx + 1); param = ¶m[1..param.len() - 1]; // Remove outer brackets let (name, pattern) = match param.find(":") { @@ -832,16 +837,25 @@ impl ResourceDef { let (name, pattern) = param.split_at(idx); (name, &pattern[1..]) } - None => (param, DEFAULT_PATTERN) + None => (param, DEFAULT_PATTERN), }; - (PatternElement::Var(name.to_string()), format!(r"(?P<{}>{})", &name, &pattern), rem) + ( + PatternElement::Var(name.to_string()), + format!(r"(?P<{}>{})", &name, &pattern), + rem, + ) } fn parse( mut pattern: &str, for_prefix: bool, ) -> (String, Vec, bool, usize) { if pattern.find("{").is_none() { - return (String::from(pattern), vec![PatternElement::Str(String::from(pattern))], false, pattern.chars().count()) + return ( + String::from(pattern), + vec![PatternElement::Str(String::from(pattern))], + false, + pattern.chars().count(), + ); }; let mut elems = Vec::new(); diff --git a/src/server/http.rs b/src/server/http.rs index ed463f75d..eafd45a3f 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -3,12 +3,12 @@ use std::rc::Rc; use std::sync::Arc; use std::{io, mem, net, time}; -use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; +use actix::{Actor, Addr, AsyncContext, Context, Handler, System}; use futures::{Future, Stream}; use net2::{TcpBuilder, TcpStreamExt}; use num_cpus; -use tokio::executor::current_thread; +use tokio_current_thread::spawn; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_tcp::TcpStream; @@ -585,7 +585,7 @@ where type Result = (); fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { - Arbiter::spawn(HttpChannel::new( + spawn(HttpChannel::new( Rc::clone(&self.settings), msg.io, msg.peer, @@ -693,7 +693,7 @@ where }; let _ = io.set_nodelay(true); - current_thread::spawn(HttpChannel::new(h, io, peer)); + spawn(HttpChannel::new(h, io, peer)); } } @@ -753,10 +753,10 @@ where let _ = io.set_nodelay(true); let rate = h.connection_rate(); - current_thread::spawn(self.acceptor.accept(io).then(move |res| { + spawn(self.acceptor.accept(io).then(move |res| { drop(rate); match res { - Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer)), + Ok(io) => spawn(HttpChannel::new(h, io, peer)), Err(err) => trace!("Can not establish connection: {}", err), } Ok(()) From cdb57b840e138a60b6b733648008ca877a916e2b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 7 Sep 2018 20:47:54 -0700 Subject: [PATCH 102/219] prepare release --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e5de591f0..0eb92dad1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.6] - 2018-09-xx +## [0.7.6] - 2018-09-07 ### Fixed From 003b05b095e9b6f63b03341df5ba4dcae7554215 Mon Sep 17 00:00:00 2001 From: Maciej Piechotka Date: Tue, 11 Sep 2018 13:57:55 +0200 Subject: [PATCH 103/219] Don't ignore errors in std::fmt::Debug implementations (#506) --- src/client/request.rs | 20 ++++++++++---------- src/client/response.rs | 8 ++++---- src/httprequest.rs | 14 +++++++------- src/multipart.rs | 10 +++++----- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/src/client/request.rs b/src/client/request.rs index aff4ab485..76fb1be59 100644 --- a/src/client/request.rs +++ b/src/client/request.rs @@ -254,16 +254,16 @@ impl ClientRequest { impl fmt::Debug for ClientRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let res = writeln!( + writeln!( f, "\nClientRequest {:?} {}:{}", self.version, self.method, self.uri - ); - let _ = writeln!(f, " headers:"); + )?; + writeln!(f, " headers:")?; for (key, val) in self.headers.iter() { - let _ = writeln!(f, " {:?}: {:?}", key, val); + writeln!(f, " {:?}: {:?}", key, val)?; } - res + Ok(()) } } @@ -750,16 +750,16 @@ fn parts<'a>( impl fmt::Debug for ClientRequestBuilder { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(ref parts) = self.request { - let res = writeln!( + writeln!( f, "\nClientRequestBuilder {:?} {}:{}", parts.version, parts.method, parts.uri - ); - let _ = writeln!(f, " headers:"); + )?; + writeln!(f, " headers:")?; for (key, val) in parts.headers.iter() { - let _ = writeln!(f, " {:?}: {:?}", key, val); + writeln!(f, " {:?}: {:?}", key, val)?; } - res + Ok(()) } else { write!(f, "ClientRequestBuilder(Consumed)") } diff --git a/src/client/response.rs b/src/client/response.rs index 0c094a2aa..5f1f42649 100644 --- a/src/client/response.rs +++ b/src/client/response.rs @@ -95,12 +95,12 @@ impl ClientResponse { impl fmt::Debug for ClientResponse { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let res = writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status()); - let _ = writeln!(f, " headers:"); + writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status())?; + writeln!(f, " headers:")?; for (key, val) in self.headers().iter() { - let _ = writeln!(f, " {:?}: {:?}", key, val); + writeln!(f, " {:?}: {:?}", key, val)?; } - res + Ok(()) } } diff --git a/src/httprequest.rs b/src/httprequest.rs index f4de81529..d8c49496a 100644 --- a/src/httprequest.rs +++ b/src/httprequest.rs @@ -354,24 +354,24 @@ impl FromRequest for HttpRequest { impl fmt::Debug for HttpRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let res = writeln!( + writeln!( f, "\nHttpRequest {:?} {}:{}", self.version(), self.method(), self.path() - ); + )?; if !self.query_string().is_empty() { - let _ = writeln!(f, " query: ?{:?}", self.query_string()); + writeln!(f, " query: ?{:?}", self.query_string())?; } if !self.match_info().is_empty() { - let _ = writeln!(f, " params: {:?}", self.match_info()); + writeln!(f, " params: {:?}", self.match_info())?; } - let _ = writeln!(f, " headers:"); + writeln!(f, " headers:")?; for (key, val) in self.headers().iter() { - let _ = writeln!(f, " {:?}: {:?}", key, val); + writeln!(f, " {:?}: {:?}", key, val)?; } - res + Ok(()) } } diff --git a/src/multipart.rs b/src/multipart.rs index fe809294f..862f60ecb 100644 --- a/src/multipart.rs +++ b/src/multipart.rs @@ -441,13 +441,13 @@ where impl fmt::Debug for Field { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let res = writeln!(f, "\nMultipartField: {}", self.ct); - let _ = writeln!(f, " boundary: {}", self.inner.borrow().boundary); - let _ = writeln!(f, " headers:"); + writeln!(f, "\nMultipartField: {}", self.ct)?; + writeln!(f, " boundary: {}", self.inner.borrow().boundary)?; + writeln!(f, " headers:")?; for (key, val) in self.headers.iter() { - let _ = writeln!(f, " {:?}: {:?}", key, val); + writeln!(f, " {:?}: {:?}", key, val)?; } - res + Ok(()) } } From e0ae6b10cdefe597870496bb81a2d68078d8f92c Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Mon, 10 Sep 2018 01:49:12 +0800 Subject: [PATCH 104/219] Fix bug with HttpChannel linked list. --- src/server/channel.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index bec1c4c87..79f9da40f 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -77,7 +77,7 @@ where type Error = (); fn poll(&mut self) -> Poll { - if self.node.is_some() { + if self.node.is_none() { let el = self as *mut _; self.node = Some(Node::new(el)); let _ = match self.proto { From 70b45659e235c53d81d2eb0814761ed14002e151 Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Mon, 10 Sep 2018 01:51:03 +0800 Subject: [PATCH 105/219] Make Node's `traverse` method take a closure instead of calling `shutdown` on each HttpChannel. --- src/server/channel.rs | 6 +++--- src/server/http.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index 79f9da40f..7b63125e5 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -55,7 +55,7 @@ where } } - fn shutdown(&mut self) { + pub(crate) fn shutdown(&mut self) { match self.proto { Some(HttpProtocol::H1(ref mut h1)) => { let io = h1.io(); @@ -232,7 +232,7 @@ impl Node<()> { } } - pub(crate) fn traverse(&self) + pub(crate) fn traverse)>(&self, f: F) where T: IoStream, H: HttpHandler + 'static, @@ -247,7 +247,7 @@ impl Node<()> { if !n.element.is_null() { let ch: &mut HttpChannel = &mut *(&mut *(n.element as *mut _) as *mut () as *mut _); - ch.shutdown(); + f(ch); } } } else { diff --git a/src/server/http.rs b/src/server/http.rs index eafd45a3f..f83b74f37 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -637,7 +637,7 @@ where fn shutdown(&self, force: bool) { if force { - self.settings.head().traverse::(); + self.settings.head().traverse(|ch: &mut HttpChannel| ch.shutdown()); } } } From 04608b2ea6bb925a09b979cce473068e5658a327 Mon Sep 17 00:00:00 2001 From: "Robert G. Jakabosky" Date: Wed, 12 Sep 2018 00:24:10 +0800 Subject: [PATCH 106/219] Update changes. --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 0eb92dad1..ccb2f1328 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Changes +## [0.7.7] - 2018-09-xx + +### Fixed + +* Fix linked list of HttpChannels #504 + ## [0.7.6] - 2018-09-07 ### Fixed From 70a3f317d35b81089faf7dc0095cb336206e7a98 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 11 Sep 2018 11:24:05 -0700 Subject: [PATCH 107/219] fix failing requests to test server #508 --- CHANGES.md | 5 ++++- Cargo.toml | 2 +- src/server/http.rs | 11 +++++------ 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ccb2f1328..77cac1fe2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,11 +1,14 @@ # Changes -## [0.7.7] - 2018-09-xx +## [0.7.7] - 2018-09-11 ### Fixed * Fix linked list of HttpChannels #504 +* Fix requests to TestServer fail #508 + + ## [0.7.6] - 2018-09-07 ### Fixed diff --git a/Cargo.toml b/Cargo.toml index 6855c0ead..12a1ecf9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.6" +version = "0.7.7" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" diff --git a/src/server/http.rs b/src/server/http.rs index f83b74f37..f9b2689ef 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -3,12 +3,11 @@ use std::rc::Rc; use std::sync::Arc; use std::{io, mem, net, time}; -use actix::{Actor, Addr, AsyncContext, Context, Handler, System}; +use actix::{Arbiter, Actor, Addr, AsyncContext, Context, Handler, System}; use futures::{Future, Stream}; use net2::{TcpBuilder, TcpStreamExt}; use num_cpus; -use tokio_current_thread::spawn; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_tcp::TcpStream; @@ -585,7 +584,7 @@ where type Result = (); fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { - spawn(HttpChannel::new( + Arbiter::spawn(HttpChannel::new( Rc::clone(&self.settings), msg.io, msg.peer, @@ -693,7 +692,7 @@ where }; let _ = io.set_nodelay(true); - spawn(HttpChannel::new(h, io, peer)); + Arbiter::spawn(HttpChannel::new(h, io, peer)); } } @@ -753,10 +752,10 @@ where let _ = io.set_nodelay(true); let rate = h.connection_rate(); - spawn(self.acceptor.accept(io).then(move |res| { + Arbiter::spawn(self.acceptor.accept(io).then(move |res| { drop(rate); match res { - Ok(io) => spawn(HttpChannel::new(h, io, peer)), + Ok(io) => Arbiter::spawn(HttpChannel::new(h, io, peer)), Err(err) => trace!("Can not establish connection: {}", err), } Ok(()) From c3f8b5cf22c7d4b6c903a2a930d1cdd7c155c449 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 11 Sep 2018 11:25:32 -0700 Subject: [PATCH 108/219] clippy warnings --- src/router.rs | 6 +++--- src/server/http.rs | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/router.rs b/src/router.rs index ab84838f1..aa15e46d2 100644 --- a/src/router.rs +++ b/src/router.rs @@ -832,7 +832,7 @@ impl ResourceDef { }).expect("malformed param"); let (mut param, rem) = pattern.split_at(close_idx + 1); param = ¶m[1..param.len() - 1]; // Remove outer brackets - let (name, pattern) = match param.find(":") { + let (name, pattern) = match param.find(':') { Some(idx) => { let (name, pattern) = param.split_at(idx); (name, &pattern[1..]) @@ -849,7 +849,7 @@ impl ResourceDef { fn parse( mut pattern: &str, for_prefix: bool, ) -> (String, Vec, bool, usize) { - if pattern.find("{").is_none() { + if pattern.find('{').is_none() { return ( String::from(pattern), vec![PatternElement::Str(String::from(pattern))], @@ -861,7 +861,7 @@ impl ResourceDef { let mut elems = Vec::new(); let mut re = String::from("^"); - while let Some(idx) = pattern.find("{") { + while let Some(idx) = pattern.find('{') { let (prefix, rem) = pattern.split_at(idx); elems.push(PatternElement::Str(String::from(prefix))); re.push_str(&escape(prefix)); diff --git a/src/server/http.rs b/src/server/http.rs index f9b2689ef..948889f42 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -3,7 +3,7 @@ use std::rc::Rc; use std::sync::Arc; use std::{io, mem, net, time}; -use actix::{Arbiter, Actor, Addr, AsyncContext, Context, Handler, System}; +use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; use futures::{Future, Stream}; use net2::{TcpBuilder, TcpStreamExt}; @@ -636,7 +636,9 @@ where fn shutdown(&self, force: bool) { if force { - self.settings.head().traverse(|ch: &mut HttpChannel| ch.shutdown()); + self.settings + .head() + .traverse(|ch: &mut HttpChannel| ch.shutdown()); } } } From d65c72b44d24cb098031284eaedbd8a8e8c50c0b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 15 Sep 2018 09:55:38 -0700 Subject: [PATCH 109/219] use server keep-alive timer as slow request timer --- CHANGES.md | 7 +++++++ src/scope.rs | 8 +++----- src/server/accept.rs | 11 +++++++---- src/server/channel.rs | 27 ++++++++++++++++++++++++++- src/server/h1.rs | 12 ++++++------ src/server/h2.rs | 3 ++- src/server/settings.rs | 12 +++++++++++- src/test.rs | 2 ++ tests/test_client.rs | 29 +++++++++++++++++------------ 9 files changed, 81 insertions(+), 30 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 77cac1fe2..c764a5926 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.7.8] - 2018-09-xx + +### Added + +* Use server `Keep-Alive` setting as slow request timeout. + + ## [0.7.7] - 2018-09-11 ### Fixed diff --git a/src/scope.rs b/src/scope.rs index 4ce4901af..bd3daf163 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -821,11 +821,9 @@ mod tests { scope .route("/path1", Method::GET, |_: HttpRequest<_>| { HttpResponse::Ok() - }).route( - "/path1", - Method::DELETE, - |_: HttpRequest<_>| HttpResponse::Ok(), - ) + }).route("/path1", Method::DELETE, |_: HttpRequest<_>| { + HttpResponse::Ok() + }) }).finish(); let req = TestRequest::with_uri("/app/path1").request(); diff --git a/src/server/accept.rs b/src/server/accept.rs index d642c40f6..307a2a2f1 100644 --- a/src/server/accept.rs +++ b/src/server/accept.rs @@ -451,10 +451,13 @@ impl Accept { Delay::new( Instant::now() + Duration::from_millis(510), ).map_err(|_| ()) - .and_then(move |_| { - let _ = r.set_readiness(mio::Ready::readable()); - Ok(()) - }), + .and_then( + move |_| { + let _ = + r.set_readiness(mio::Ready::readable()); + Ok(()) + }, + ), ); Ok(()) }, diff --git a/src/server/channel.rs b/src/server/channel.rs index 7b63125e5..5119eb5f1 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -5,6 +5,7 @@ use std::{io, ptr, time}; use bytes::{Buf, BufMut, BytesMut}; use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_timer::Delay; use super::settings::WorkerSettings; use super::{h1, h2, ConnectionTag, HttpHandler, IoStream}; @@ -30,6 +31,7 @@ where { proto: Option>, node: Option>>, + ka_timeout: Option, _tag: ConnectionTag, } @@ -42,9 +44,11 @@ where settings: Rc>, io: T, peer: Option, ) -> HttpChannel { let _tag = settings.connection(); + let ka_timeout = settings.keep_alive_timer(); HttpChannel { _tag, + ka_timeout, node: None, proto: Some(HttpProtocol::Unknown( settings, @@ -77,6 +81,21 @@ where type Error = (); fn poll(&mut self) -> Poll { + // keep-alive timer + if let Some(ref mut timer) = self.ka_timeout { + match timer.poll() { + Ok(Async::Ready(_)) => { + trace!("Slow request timed out, close connection"); + if let Some(n) = self.node.as_mut() { + n.remove() + }; + return Ok(Async::Ready(())); + } + Ok(Async::NotReady) => (), + Err(_) => panic!("Something is really wrong"), + } + } + if self.node.is_none() { let el = self as *mut _; self.node = Some(Node::new(el)); @@ -161,7 +180,12 @@ where match kind { ProtocolKind::Http1 => { self.proto = Some(HttpProtocol::H1(h1::Http1::new( - settings, io, addr, buf, is_eof, + settings, + io, + addr, + buf, + is_eof, + self.ka_timeout.take(), ))); return self.poll(); } @@ -171,6 +195,7 @@ where io, addr, buf.freeze(), + self.ka_timeout.take(), ))); return self.poll(); } diff --git a/src/server/h1.rs b/src/server/h1.rs index dc88cac9f..d6e13e227 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -91,7 +91,7 @@ where { pub fn new( settings: Rc>, stream: T, addr: Option, - buf: BytesMut, is_eof: bool, + buf: BytesMut, is_eof: bool, keepalive_timer: Option, ) -> Self { Http1 { flags: if is_eof { @@ -103,10 +103,10 @@ where decoder: H1Decoder::new(), payload: None, tasks: VecDeque::new(), - keepalive_timer: None, addr, buf, settings, + keepalive_timer, } } @@ -364,7 +364,7 @@ where if self.keepalive_timer.is_none() && keep_alive > 0 { trace!("Start keep-alive timer"); let mut timer = - Delay::new(Instant::now() + Duration::new(keep_alive, 0)); + Delay::new(Instant::now() + Duration::from_secs(keep_alive)); // register timer let _ = timer.poll(); self.keepalive_timer = Some(timer); @@ -632,7 +632,7 @@ mod tests { let readbuf = BytesMut::new(); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false, None); h1.poll_io(); h1.poll_io(); assert_eq!(h1.tasks.len(), 1); @@ -645,7 +645,7 @@ mod tests { BytesMut::from(Vec::::from(&b"GET /test HTTP/1.1\r\n\r\n"[..])); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true, None); h1.poll_io(); assert_eq!(h1.tasks.len(), 1); } @@ -656,7 +656,7 @@ mod tests { let readbuf = BytesMut::new(); let settings = Rc::new(wrk_settings()); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false); + let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false, None); h1.poll_io(); h1.poll_io(); assert!(h1.flags.contains(Flags::ERROR)); diff --git a/src/server/h2.rs b/src/server/h2.rs index 986888ff8..913e2cd70 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -59,6 +59,7 @@ where { pub fn new( settings: Rc>, io: T, addr: Option, buf: Bytes, + keepalive_timer: Option, ) -> Self { let extensions = io.extensions(); Http2 { @@ -68,10 +69,10 @@ where unread: if buf.is_empty() { None } else { Some(buf) }, inner: io, })), - keepalive_timer: None, addr, settings, extensions, + keepalive_timer, } } diff --git a/src/server/settings.rs b/src/server/settings.rs index e9ca0f851..fc0d931f0 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -13,7 +13,7 @@ use http::StatusCode; use lazycell::LazyCell; use parking_lot::Mutex; use time; -use tokio_timer::Interval; +use tokio_timer::{Delay, Interval}; use super::channel::Node; use super::message::{Request, RequestPool}; @@ -197,6 +197,16 @@ impl WorkerSettings { &self.h } + pub fn keep_alive_timer(&self) -> Option { + if self.keep_alive != 0 { + Some(Delay::new( + Instant::now() + Duration::from_secs(self.keep_alive), + )) + } else { + None + } + } + pub fn keep_alive(&self) -> u64 { self.keep_alive } diff --git a/src/test.rs b/src/test.rs index 64aef6638..c068086d5 100644 --- a/src/test.rs +++ b/src/test.rs @@ -120,6 +120,7 @@ impl TestServer { HttpServer::new(factory) .disable_signals() .listen(tcp) + .keep_alive(5) .start(); tx.send((System::current(), local_addr, TestServer::get_conn())) @@ -328,6 +329,7 @@ impl TestServerBuilder { config(&mut app); vec![app] }).workers(1) + .keep_alive(5) .disable_signals(); tx.send((System::current(), addr, TestServer::get_conn())) diff --git a/tests/test_client.rs b/tests/test_client.rs index 28d60faf0..8c5d5819d 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -407,24 +407,29 @@ fn test_client_cookie_handling() { let cookie2 = cookie2b.clone(); app.handler(move |req: &HttpRequest| { // Check cookies were sent correctly - req.cookie("cookie1").ok_or_else(err) - .and_then(|c1| if c1.value() == "value1" { + req.cookie("cookie1") + .ok_or_else(err) + .and_then(|c1| { + if c1.value() == "value1" { Ok(()) } else { Err(err()) - }) - .and_then(|()| req.cookie("cookie2").ok_or_else(err)) - .and_then(|c2| if c2.value() == "value2" { + } + }).and_then(|()| req.cookie("cookie2").ok_or_else(err)) + .and_then(|c2| { + if c2.value() == "value2" { Ok(()) } else { Err(err()) - }) - // Send some cookies back - .map(|_| HttpResponse::Ok() - .cookie(cookie1.clone()) - .cookie(cookie2.clone()) - .finish() - ) + } + }) + // Send some cookies back + .map(|_| { + HttpResponse::Ok() + .cookie(cookie1.clone()) + .cookie(cookie2.clone()) + .finish() + }) }) }); From 9d1eefc38ff3bd1fa79ad33518c701b8e320f88b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 15 Sep 2018 09:57:54 -0700 Subject: [PATCH 110/219] use 5 seconds keep-alive timer by default --- CHANGES.md | 4 ++++ src/server/http.rs | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index c764a5926..91e34ae34 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,10 @@ * Use server `Keep-Alive` setting as slow request timeout. +### Changed + +* Use 5 seconds keep-alive timer by default. + ## [0.7.7] - 2018-09-11 diff --git a/src/server/http.rs b/src/server/http.rs index 948889f42..b6f577b02 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -70,7 +70,7 @@ where factory: Arc::new(f), host: None, backlog: 2048, - keep_alive: KeepAlive::Os, + keep_alive: KeepAlive::Timeout(5), shutdown_timeout: 30, exit: false, no_http2: false, @@ -131,7 +131,7 @@ where /// Set server keep-alive setting. /// - /// By default keep alive is set to a `Os`. + /// By default keep alive is set to a 5 seconds. pub fn keep_alive>(mut self, val: T) -> Self { self.keep_alive = val.into(); self From bbe69e5b8d914f6cc638db8fdeab6c1edbba3cfe Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 15 Sep 2018 10:00:54 -0700 Subject: [PATCH 111/219] update version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 12a1ecf9c..4a985016f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.7" +version = "0.7.8" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From 7449884ce3b7bb6a741b481bdb903622e90fb0aa Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 15 Sep 2018 17:09:07 -0700 Subject: [PATCH 112/219] fix wrong error message for path deserialize for i32 #510 --- CHANGES.md | 4 ++++ src/de.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 91e34ae34..6fdecc243 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,6 +10,10 @@ * Use 5 seconds keep-alive timer by default. +### Fixed + +* Fixed wrong error message for i16 type #510 + ## [0.7.7] - 2018-09-11 diff --git a/src/de.rs b/src/de.rs index ecb2fa9ae..59ab79ba9 100644 --- a/src/de.rs +++ b/src/de.rs @@ -175,7 +175,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> { parse_single_value!(deserialize_bool, visit_bool, "bool"); parse_single_value!(deserialize_i8, visit_i8, "i8"); parse_single_value!(deserialize_i16, visit_i16, "i16"); - parse_single_value!(deserialize_i32, visit_i32, "i16"); + parse_single_value!(deserialize_i32, visit_i32, "i32"); parse_single_value!(deserialize_i64, visit_i64, "i64"); parse_single_value!(deserialize_u8, visit_u8, "u8"); parse_single_value!(deserialize_u16, visit_u16, "u16"); From 03e318f44649aed6c00de19bdb93cc6b0377b1fd Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 15 Sep 2018 17:10:53 -0700 Subject: [PATCH 113/219] update changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 6fdecc243..03fe9fbcf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ ### Added -* Use server `Keep-Alive` setting as slow request timeout. +* Use server `Keep-Alive` setting as slow request timeout #439 ### Changed From 599e6b3385e5d433779937d21229935c7d90220e Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 17 Sep 2018 05:29:07 -0700 Subject: [PATCH 114/219] refactor channel node remove operation --- src/server/channel.rs | 40 ++++++++++++++-------------------------- 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index 5119eb5f1..193c8e6e4 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -72,6 +72,18 @@ where } } +impl Drop for HttpChannel +where + T: IoStream, + H: HttpHandler + 'static, +{ + fn drop(&mut self) { + if let Some(mut node) = self.node.take() { + node.remove() + } + } +} + impl Future for HttpChannel where T: IoStream, @@ -86,9 +98,6 @@ where match timer.poll() { Ok(Async::Ready(_)) => { trace!("Slow request timed out, close connection"); - if let Some(n) = self.node.as_mut() { - n.remove() - }; return Ok(Async::Ready(())); } Ok(Async::NotReady) => (), @@ -116,28 +125,10 @@ where let mut is_eof = false; let kind = match self.proto { Some(HttpProtocol::H1(ref mut h1)) => { - let result = h1.poll(); - match result { - Ok(Async::Ready(())) | Err(_) => { - if let Some(n) = self.node.as_mut() { - n.remove() - }; - } - _ => (), - } - return result; + return h1.poll(); } Some(HttpProtocol::H2(ref mut h2)) => { - let result = h2.poll(); - match result { - Ok(Async::Ready(())) | Err(_) => { - if let Some(n) = self.node.as_mut() { - n.remove() - }; - } - _ => (), - } - return result; + return h2.poll(); } Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => { let mut disconnect = false; @@ -156,9 +147,6 @@ where } if disconnect { debug!("Ignored premature client disconnection"); - if let Some(n) = self.node.as_mut() { - n.remove() - }; return Err(()); } From bfb2f2e9e1ad254098c712eb7951273b9c997dce Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 17 Sep 2018 10:25:45 -0700 Subject: [PATCH 115/219] fix node.remove(), update next node pointer --- src/server/channel.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index 193c8e6e4..89fd55b46 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -226,12 +226,15 @@ impl Node { fn remove(&mut self) { unsafe { self.element = ptr::null_mut(); - let next = self.next.take(); + let mut next = self.next.take(); let mut prev = self.prev.take(); if let Some(ref mut prev) = prev { prev.as_mut().unwrap().next = next; } + if let Some(ref mut next) = next { + next.as_mut().unwrap().prev = prev; + } } } } From 764103566d7e8eeb23304702179304ef1a9a1d89 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 17 Sep 2018 10:48:37 -0700 Subject: [PATCH 116/219] update changes --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 03fe9fbcf..3a28a82ea 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.8] - 2018-09-xx +## [0.7.8] - 2018-09-17 ### Added From f40153fca4374d30ee285b857f332664c96a765c Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 17 Sep 2018 11:39:03 -0700 Subject: [PATCH 117/219] fix node::insert() method, missing next element --- src/server/channel.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index 89fd55b46..1795f8c27 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -208,13 +208,14 @@ impl Node { } } - fn insert(&mut self, next: &mut Node) { + fn insert(&mut self, next_el: &mut Node) { unsafe { - let next: *mut Node = next as *const _ as *mut _; + let next: *mut Node = next_el as *const _ as *mut _; - if let Some(ref mut next2) = self.next { + if let Some(next2) = self.next { let n = next2.as_mut().unwrap(); n.prev = Some(next); + next_el.next = Some(next2 as *mut _); } self.next = Some(next); @@ -226,13 +227,13 @@ impl Node { fn remove(&mut self) { unsafe { self.element = ptr::null_mut(); - let mut next = self.next.take(); - let mut prev = self.prev.take(); + let next = self.next.take(); + let prev = self.prev.take(); - if let Some(ref mut prev) = prev { + if let Some(prev) = prev { prev.as_mut().unwrap().next = next; } - if let Some(ref mut next) = next { + if let Some(next) = next { next.as_mut().unwrap().prev = prev; } } From 0dc96658f24f3e61842e1b5461a8492ef1c90649 Mon Sep 17 00:00:00 2001 From: Douman Date: Fri, 21 Sep 2018 07:24:10 +0300 Subject: [PATCH 118/219] Send response to inform client of error (#515) --- CHANGES.md | 6 ++++++ src/payload.rs | 4 +++- src/server/h1.rs | 24 +++++++++++++++++------- tests/test_server.rs | 2 +- 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 3a28a82ea..36b6dc765 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Changes +## [0.7.9] - 2018-09-x + +### Fixed + +* HTTP1 decoding errors are reported to the client. #512 + ## [0.7.8] - 2018-09-17 ### Added diff --git a/src/payload.rs b/src/payload.rs index 1d9281f51..382c0b0f5 100644 --- a/src/payload.rs +++ b/src/payload.rs @@ -1,6 +1,8 @@ //! Payload stream use bytes::{Bytes, BytesMut}; -use futures::task::{current as current_task, Task}; +use futures::task::Task; +#[cfg(not(test))] +use futures::task::current as current_task; use futures::{Async, Poll, Stream}; use std::cell::RefCell; use std::cmp; diff --git a/src/server/h1.rs b/src/server/h1.rs index d6e13e227..b715dfb6a 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -373,6 +373,16 @@ where Ok(Async::NotReady) } + fn push_response_entry(&mut self, status: StatusCode) { + self.tasks.push_back(Entry { + pipe: EntryPipe::Error(ServerError::err( + Version::HTTP_11, + status, + )), + flags: EntryFlags::empty(), + }); + } + pub fn parse(&mut self) { 'outer: loop { match self.decoder.decode(&mut self.buf, &self.settings) { @@ -439,13 +449,7 @@ where } // handler is not found - self.tasks.push_back(Entry { - pipe: EntryPipe::Error(ServerError::err( - Version::HTTP_11, - StatusCode::NOT_FOUND, - )), - flags: EntryFlags::empty(), - }); + self.push_response_entry(StatusCode::NOT_FOUND); } Ok(Some(Message::Chunk(chunk))) => { if let Some(ref mut payload) = self.payload { @@ -453,6 +457,7 @@ where } else { error!("Internal server error: unexpected payload chunk"); self.flags.insert(Flags::ERROR); + self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); break; } } @@ -462,6 +467,7 @@ where } else { error!("Internal server error: unexpected eof"); self.flags.insert(Flags::ERROR); + self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); break; } } @@ -482,6 +488,9 @@ where }; payload.set_error(e); } + + //Malformed requests should be responded with 400 + self.push_response_entry(StatusCode::BAD_REQUEST); break; } } @@ -660,6 +669,7 @@ mod tests { h1.poll_io(); h1.poll_io(); assert!(h1.flags.contains(Flags::ERROR)); + assert_eq!(h1.tasks.len(), 1); } #[test] diff --git a/tests/test_server.rs b/tests/test_server.rs index 97161a30f..52c47dd27 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -11,6 +11,7 @@ extern crate rand; extern crate tokio; extern crate tokio_reactor; extern crate tokio_tcp; +extern crate tokio_current_thread as current_thread; use std::io::{Read, Write}; use std::sync::Arc; @@ -28,7 +29,6 @@ use h2::client as h2client; use modhttp::Request; use rand::distributions::Alphanumeric; use rand::Rng; -use tokio::executor::current_thread; use tokio::runtime::current_thread::Runtime; use tokio_tcp::TcpStream; From 1b298142e3b954003b419db015796bbcc702adcd Mon Sep 17 00:00:00 2001 From: Douman Date: Fri, 21 Sep 2018 08:45:22 +0300 Subject: [PATCH 119/219] Correct composing of multiple origins in cors (#518) --- CHANGES.md | 1 + src/middleware/cors.rs | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 36b6dc765..ecdb65ef1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,6 +5,7 @@ ### Fixed * HTTP1 decoding errors are reported to the client. #512 +* Correctly compose multiple allowed origins in CORS. #517 ## [0.7.8] - 2018-09-17 diff --git a/src/middleware/cors.rs b/src/middleware/cors.rs index e75dc73ee..f1adf0c4b 100644 --- a/src/middleware/cors.rs +++ b/src/middleware/cors.rs @@ -826,8 +826,8 @@ impl CorsBuilder { if let AllOrSome::Some(ref origins) = cors.origins { let s = origins .iter() - .fold(String::new(), |s, v| s + &v.to_string()); - cors.origins_str = Some(HeaderValue::try_from(s.as_str()).unwrap()); + .fold(String::new(), |s, v| format!("{}, {}", s, v)); + cors.origins_str = Some(HeaderValue::try_from(&s[2..]).unwrap()); } if !self.expose_hdrs.is_empty() { @@ -1122,16 +1122,18 @@ mod tests { let cors = Cors::build() .disable_vary_header() .allowed_origin("https://www.example.com") + .allowed_origin("https://www.google.com") .finish(); let resp: HttpResponse = HttpResponse::Ok().into(); let resp = cors.response(&req, resp).unwrap().response(); - assert_eq!( - &b"https://www.example.com"[..], - resp.headers() - .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) - .unwrap() - .as_bytes() - ); + + let origins_str = resp.headers().get(header::ACCESS_CONTROL_ALLOW_ORIGIN).unwrap().to_str().unwrap(); + + if origins_str.starts_with("https://www.example.com") { + assert_eq!("https://www.example.com, https://www.google.com", origins_str); + } else { + assert_eq!("https://www.google.com, https://www.example.com", origins_str); + } } #[test] From 782eeb5ded9bda6f41ad957315ce1e413873f83c Mon Sep 17 00:00:00 2001 From: Ashley Date: Wed, 26 Sep 2018 20:56:34 +1200 Subject: [PATCH 120/219] Reduced unsafe converage (#520) --- src/server/channel.rs | 26 +++++++------ src/server/h1writer.rs | 70 ++++++++++++++++++---------------- src/server/helpers.rs | 86 ++++++++++++++++++++++++------------------ src/server/mod.rs | 36 +++++++++--------- src/uri.rs | 2 +- 5 files changed, 121 insertions(+), 99 deletions(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index 1795f8c27..3d753f655 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -209,31 +209,35 @@ impl Node { } fn insert(&mut self, next_el: &mut Node) { - unsafe { - let next: *mut Node = next_el as *const _ as *mut _; + let next: *mut Node = next_el as *const _ as *mut _; - if let Some(next2) = self.next { + if let Some(next2) = self.next { + unsafe { let n = next2.as_mut().unwrap(); n.prev = Some(next); - next_el.next = Some(next2 as *mut _); } - self.next = Some(next); + next_el.next = Some(next2 as *mut _); + } + self.next = Some(next); + unsafe { let next: &mut Node = &mut *next; next.prev = Some(self as *mut _); } } fn remove(&mut self) { - unsafe { - self.element = ptr::null_mut(); - let next = self.next.take(); - let prev = self.prev.take(); + self.element = ptr::null_mut(); + let next = self.next.take(); + let prev = self.prev.take(); - if let Some(prev) = prev { + if let Some(prev) = prev { + unsafe { prev.as_mut().unwrap().next = next; } - if let Some(next) = next { + } + if let Some(next) = next { + unsafe { next.as_mut().unwrap().prev = prev; } } diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 422f0ebc1..72a68aeb0 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -196,45 +196,49 @@ impl Writer for H1Writer { let mut pos = 0; let mut has_date = false; let mut remaining = buffer.remaining_mut(); - unsafe { - let mut buf = &mut *(buffer.bytes_mut() as *mut [u8]); - for (key, value) in msg.headers() { - match *key { - TRANSFER_ENCODING => continue, - CONTENT_ENCODING => if encoding != ContentEncoding::Identity { - continue; - }, - CONTENT_LENGTH => match info.length { - ResponseLength::None => (), - _ => continue, - }, - DATE => { - has_date = true; - } - _ => (), + let mut buf = unsafe { &mut *(buffer.bytes_mut() as *mut [u8]) }; + for (key, value) in msg.headers() { + match *key { + TRANSFER_ENCODING => continue, + CONTENT_ENCODING => if encoding != ContentEncoding::Identity { + continue; + }, + CONTENT_LENGTH => match info.length { + ResponseLength::None => (), + _ => continue, + }, + DATE => { + has_date = true; } + _ => (), + } - let v = value.as_ref(); - let k = key.as_str().as_bytes(); - let len = k.len() + v.len() + 4; - if len > remaining { + let v = value.as_ref(); + let k = key.as_str().as_bytes(); + let len = k.len() + v.len() + 4; + if len > remaining { + unsafe { buffer.advance_mut(pos); - pos = 0; - buffer.reserve(len); - remaining = buffer.remaining_mut(); + } + pos = 0; + buffer.reserve(len); + remaining = buffer.remaining_mut(); + unsafe { buf = &mut *(buffer.bytes_mut() as *mut _); } - - buf[pos..pos + k.len()].copy_from_slice(k); - pos += k.len(); - buf[pos..pos + 2].copy_from_slice(b": "); - pos += 2; - buf[pos..pos + v.len()].copy_from_slice(v); - pos += v.len(); - buf[pos..pos + 2].copy_from_slice(b"\r\n"); - pos += 2; - remaining -= len; } + + buf[pos..pos + k.len()].copy_from_slice(k); + pos += k.len(); + buf[pos..pos + 2].copy_from_slice(b": "); + pos += 2; + buf[pos..pos + v.len()].copy_from_slice(v); + pos += v.len(); + buf[pos..pos + 2].copy_from_slice(b"\r\n"); + pos += 2; + remaining -= len; + } + unsafe { buffer.advance_mut(pos); } diff --git a/src/server/helpers.rs b/src/server/helpers.rs index f7e030f2d..9c0b7f40c 100644 --- a/src/server/helpers.rs +++ b/src/server/helpers.rs @@ -29,20 +29,24 @@ pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesM let lut_ptr = DEC_DIGITS_LUT.as_ptr(); let four = n > 999; + // decode 2 more chars, if > 2 chars + let d1 = (n % 100) << 1; + n /= 100; + curr -= 2; unsafe { - // decode 2 more chars, if > 2 chars - let d1 = (n % 100) << 1; - n /= 100; - curr -= 2; ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2); + } - // decode last 1 or 2 chars - if n < 10 { - curr -= 1; + // decode last 1 or 2 chars + if n < 10 { + curr -= 1; + unsafe { *buf_ptr.offset(curr) = (n as u8) + b'0'; - } else { - let d1 = n << 1; - curr -= 2; + } + } else { + let d1 = n << 1; + curr -= 2; + unsafe { ptr::copy_nonoverlapping( lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), @@ -107,47 +111,55 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) { } pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) { - unsafe { - let mut curr: isize = 39; - let mut buf: [u8; 41] = mem::uninitialized(); - buf[39] = b'\r'; - buf[40] = b'\n'; - let buf_ptr = buf.as_mut_ptr(); - let lut_ptr = DEC_DIGITS_LUT.as_ptr(); + let mut curr: isize = 39; + let mut buf: [u8; 41] = unsafe { mem::uninitialized() }; + buf[39] = b'\r'; + buf[40] = b'\n'; + let buf_ptr = buf.as_mut_ptr(); + let lut_ptr = DEC_DIGITS_LUT.as_ptr(); - // eagerly decode 4 characters at a time - while n >= 10_000 { - let rem = (n % 10_000) as isize; - n /= 10_000; + // eagerly decode 4 characters at a time + while n >= 10_000 { + let rem = (n % 10_000) as isize; + n /= 10_000; - let d1 = (rem / 100) << 1; - let d2 = (rem % 100) << 1; - curr -= 4; + let d1 = (rem / 100) << 1; + let d2 = (rem % 100) << 1; + curr -= 4; + unsafe { ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); } + } - // if we reach here numbers are <= 9999, so at most 4 chars long - let mut n = n as isize; // possibly reduce 64bit math + // if we reach here numbers are <= 9999, so at most 4 chars long + let mut n = n as isize; // possibly reduce 64bit math - // decode 2 more chars, if > 2 chars - if n >= 100 { - let d1 = (n % 100) << 1; - n /= 100; - curr -= 2; + // decode 2 more chars, if > 2 chars + if n >= 100 { + let d1 = (n % 100) << 1; + n /= 100; + curr -= 2; + unsafe { ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); } + } - // decode last 1 or 2 chars - if n < 10 { - curr -= 1; + // decode last 1 or 2 chars + if n < 10 { + curr -= 1; + unsafe { *buf_ptr.offset(curr) = (n as u8) + b'0'; - } else { - let d1 = n << 1; - curr -= 2; + } + } else { + let d1 = n << 1; + curr -= 2; + unsafe { ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); } + } + unsafe { bytes.extend_from_slice(slice::from_raw_parts( buf_ptr.offset(curr), 41 - curr as usize, diff --git a/src/server/mod.rs b/src/server/mod.rs index 009e06ccd..96ec570a1 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -396,27 +396,29 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { if buf.remaining_mut() < LW_BUFFER_SIZE { buf.reserve(HW_BUFFER_SIZE); } - unsafe { - match self.read(buf.bytes_mut()) { - Ok(n) => { - if n == 0 { - return Ok(Async::Ready((read_some, true))); - } else { - read_some = true; + + let read = unsafe { self.read(buf.bytes_mut()) }; + match read { + Ok(n) => { + if n == 0 { + return Ok(Async::Ready((read_some, true))); + } else { + read_some = true; + unsafe { buf.advance_mut(n); } } - Err(e) => { - return if e.kind() == io::ErrorKind::WouldBlock { - if read_some { - Ok(Async::Ready((read_some, false))) - } else { - Ok(Async::NotReady) - } + } + Err(e) => { + return if e.kind() == io::ErrorKind::WouldBlock { + if read_some { + Ok(Async::Ready((read_some, false))) } else { - Err(e) - }; - } + Ok(Async::NotReady) + } + } else { + Err(e) + }; } } } diff --git a/src/uri.rs b/src/uri.rs index 752ddad86..881cf20a8 100644 --- a/src/uri.rs +++ b/src/uri.rs @@ -148,7 +148,7 @@ impl Quoter { if let Some(data) = cloned { // Unsafe: we get data from http::Uri, which does utf-8 checks already // this code only decodes valid pct encoded values - Some(unsafe { Rc::new(String::from_utf8_unchecked(data)) }) + Some(Rc::new(unsafe { String::from_utf8_unchecked(data) })) } else { None } From 59deb4b40d770d53690e354cde8de071d94d86a8 Mon Sep 17 00:00:00 2001 From: sapir Date: Fri, 28 Sep 2018 04:15:02 +0300 Subject: [PATCH 121/219] Try to separate HTTP/1 read & write disconnect handling, to fix #511. (#514) --- src/server/h1.rs | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index b715dfb6a..afe143b4a 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -136,7 +136,7 @@ where } } - fn notify_disconnect(&mut self) { + fn write_disconnected(&mut self) { self.flags.insert(Flags::WRITE_DISCONNECTED); // notify all tasks @@ -144,17 +144,18 @@ where for task in &mut self.tasks { task.pipe.disconnected(); } - } - fn client_disconnect(&mut self) { - // notify all tasks - self.notify_disconnect(); // kill keepalive self.keepalive_timer.take(); + } - // on parse error, stop reading stream but tasks need to be - // completed - self.flags.insert(Flags::ERROR); + fn read_disconnected(&mut self) { + self.flags.insert( + Flags::READ_DISCONNECTED + // on parse error, stop reading stream but tasks need to be + // completed + | Flags::ERROR, + ); if let Some(mut payload) = self.payload.take() { payload.set_error(PayloadError::Incomplete); @@ -225,16 +226,17 @@ where self.parse(); } if disconnected { + self.read_disconnected(); // delay disconnect until all tasks have finished. - self.flags.insert(Flags::READ_DISCONNECTED); if self.tasks.is_empty() { - self.client_disconnect(); + self.write_disconnected(); } } } Ok(Async::NotReady) => (), Err(_) => { - self.client_disconnect(); + self.read_disconnected(); + self.write_disconnected(); } } } @@ -291,7 +293,8 @@ where Err(err) => { // it is not possible to recover from error // during pipe handling, so just drop connection - self.notify_disconnect(); + self.read_disconnected(); + self.write_disconnected(); self.tasks[idx].flags.insert(EntryFlags::ERROR); error!("Unhandled error1: {}", err); continue; @@ -304,7 +307,8 @@ where self.tasks[idx].flags.insert(EntryFlags::FINISHED) } Err(err) => { - self.notify_disconnect(); + self.read_disconnected(); + self.write_disconnected(); self.tasks[idx].flags.insert(EntryFlags::ERROR); error!("Unhandled error: {}", err); continue; @@ -332,7 +336,8 @@ where Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => { debug!("Error sending data: {}", err); - self.notify_disconnect(); + self.read_disconnected(); + self.write_disconnected(); return Err(()); } Ok(Async::Ready(_)) => { @@ -472,10 +477,11 @@ where } } Ok(None) => { - if self.flags.contains(Flags::READ_DISCONNECTED) - && self.tasks.is_empty() - { - self.client_disconnect(); + if self.flags.contains(Flags::READ_DISCONNECTED) { + self.read_disconnected(); + if self.tasks.is_empty() { + self.write_disconnected(); + } } break; } From 52195bbf167618039ef5b39c9e83c06643052e0b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 18:17:58 -0700 Subject: [PATCH 122/219] update version --- CHANGES.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ecdb65ef1..517f8cbe5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,8 +5,12 @@ ### Fixed * HTTP1 decoding errors are reported to the client. #512 + * Correctly compose multiple allowed origins in CORS. #517 +* Websocket server finished() isn't called if client disconnects #511 + + ## [0.7.8] - 2018-09-17 ### Added diff --git a/Cargo.toml b/Cargo.toml index 4a985016f..59a48a0e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.8" +version = "0.7.9" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From 1907102685a7a1b09a4689b304038f69b8f4b7ef Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 7 Sep 2018 23:34:27 -0700 Subject: [PATCH 123/219] switch to actix-net server --- Cargo.toml | 3 +- src/lib.rs | 2 + src/server/accept.rs | 475 -------------------------- src/server/channel.rs | 5 +- src/server/http.rs | 743 ++++++++++++++++++----------------------- src/server/mod.rs | 47 +-- src/server/server.rs | 528 ----------------------------- src/server/settings.rs | 18 +- src/server/worker.rs | 139 -------- 9 files changed, 341 insertions(+), 1619 deletions(-) delete mode 100644 src/server/accept.rs delete mode 100644 src/server/server.rs delete mode 100644 src/server/worker.rs diff --git a/Cargo.toml b/Cargo.toml index 59a48a0e9..d4ea4fc1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ default = ["session", "brotli", "flate2-c"] tls = ["native-tls", "tokio-tls"] # openssl -alpn = ["openssl", "tokio-openssl"] +alpn = ["openssl", "tokio-openssl", "actix-net/ssl"] # rustls rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"] @@ -57,6 +57,7 @@ flate2-rust = ["flate2/rust_backend"] [dependencies] actix = "0.7.0" +actix-net = { git="https://github.com/actix/actix-net.git" } base64 = "0.9" bitflags = "1.0" diff --git a/src/lib.rs b/src/lib.rs index 2559f6460..1dfe143ef 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -140,6 +140,8 @@ extern crate serde_urlencoded; extern crate percent_encoding; extern crate serde_json; extern crate smallvec; + +extern crate actix_net; #[macro_use] extern crate actix as actix_inner; diff --git a/src/server/accept.rs b/src/server/accept.rs deleted file mode 100644 index 307a2a2f1..000000000 --- a/src/server/accept.rs +++ /dev/null @@ -1,475 +0,0 @@ -use std::sync::mpsc as sync_mpsc; -use std::time::{Duration, Instant}; -use std::{io, net, thread}; - -use futures::{sync::mpsc, Future}; -use mio; -use slab::Slab; -use tokio_timer::Delay; - -use actix::{msgs::Execute, Arbiter, System}; - -use super::server::ServerCommand; -use super::worker::{Conn, WorkerClient}; -use super::Token; - -pub(crate) enum Command { - Pause, - Resume, - Stop, - Worker(WorkerClient), -} - -struct ServerSocketInfo { - addr: net::SocketAddr, - token: Token, - handler: Token, - sock: mio::net::TcpListener, - timeout: Option, -} - -#[derive(Clone)] -pub(crate) struct AcceptNotify(mio::SetReadiness); - -impl AcceptNotify { - pub(crate) fn new(ready: mio::SetReadiness) -> Self { - AcceptNotify(ready) - } - - pub(crate) fn notify(&self) { - let _ = self.0.set_readiness(mio::Ready::readable()); - } -} - -impl Default for AcceptNotify { - fn default() -> Self { - AcceptNotify::new(mio::Registration::new2().1) - } -} - -pub(crate) struct AcceptLoop { - cmd_reg: Option, - cmd_ready: mio::SetReadiness, - notify_reg: Option, - notify_ready: mio::SetReadiness, - tx: sync_mpsc::Sender, - rx: Option>, - srv: Option<( - mpsc::UnboundedSender, - mpsc::UnboundedReceiver, - )>, -} - -impl AcceptLoop { - pub fn new() -> AcceptLoop { - let (tx, rx) = sync_mpsc::channel(); - let (cmd_reg, cmd_ready) = mio::Registration::new2(); - let (notify_reg, notify_ready) = mio::Registration::new2(); - - AcceptLoop { - tx, - cmd_ready, - cmd_reg: Some(cmd_reg), - notify_ready, - notify_reg: Some(notify_reg), - rx: Some(rx), - srv: Some(mpsc::unbounded()), - } - } - - pub fn send(&self, msg: Command) { - let _ = self.tx.send(msg); - let _ = self.cmd_ready.set_readiness(mio::Ready::readable()); - } - - pub fn get_notify(&self) -> AcceptNotify { - AcceptNotify::new(self.notify_ready.clone()) - } - - pub(crate) fn start( - &mut self, socks: Vec>, - workers: Vec, - ) -> mpsc::UnboundedReceiver { - let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo"); - - Accept::start( - self.rx.take().expect("Can not re-use AcceptInfo"), - self.cmd_reg.take().expect("Can not re-use AcceptInfo"), - self.notify_reg.take().expect("Can not re-use AcceptInfo"), - socks, - tx, - workers, - ); - rx - } -} - -struct Accept { - poll: mio::Poll, - rx: sync_mpsc::Receiver, - sockets: Slab, - workers: Vec, - srv: mpsc::UnboundedSender, - timer: (mio::Registration, mio::SetReadiness), - next: usize, - backpressure: bool, -} - -const DELTA: usize = 100; -const CMD: mio::Token = mio::Token(0); -const TIMER: mio::Token = mio::Token(1); -const NOTIFY: mio::Token = mio::Token(2); - -/// This function defines errors that are per-connection. Which basically -/// means that if we get this error from `accept()` system call it means -/// next connection might be ready to be accepted. -/// -/// All other errors will incur a timeout before next `accept()` is performed. -/// The timeout is useful to handle resource exhaustion errors like ENFILE -/// and EMFILE. Otherwise, could enter into tight loop. -fn connection_error(e: &io::Error) -> bool { - e.kind() == io::ErrorKind::ConnectionRefused - || e.kind() == io::ErrorKind::ConnectionAborted - || e.kind() == io::ErrorKind::ConnectionReset -} - -impl Accept { - #![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))] - pub(crate) fn start( - rx: sync_mpsc::Receiver, cmd_reg: mio::Registration, - notify_reg: mio::Registration, socks: Vec>, - srv: mpsc::UnboundedSender, workers: Vec, - ) { - let sys = System::current(); - - // start accept thread - let _ = thread::Builder::new() - .name("actix-web accept loop".to_owned()) - .spawn(move || { - System::set_current(sys); - let mut accept = Accept::new(rx, socks, workers, srv); - - // Start listening for incoming commands - if let Err(err) = accept.poll.register( - &cmd_reg, - CMD, - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - panic!("Can not register Registration: {}", err); - } - - // Start listening for notify updates - if let Err(err) = accept.poll.register( - ¬ify_reg, - NOTIFY, - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - panic!("Can not register Registration: {}", err); - } - - accept.poll(); - }); - } - - fn new( - rx: sync_mpsc::Receiver, socks: Vec>, - workers: Vec, srv: mpsc::UnboundedSender, - ) -> Accept { - // Create a poll instance - let poll = match mio::Poll::new() { - Ok(poll) => poll, - Err(err) => panic!("Can not create mio::Poll: {}", err), - }; - - // Start accept - let mut sockets = Slab::new(); - for (idx, srv_socks) in socks.into_iter().enumerate() { - for (hnd_token, lst) in srv_socks { - let addr = lst.local_addr().unwrap(); - let server = mio::net::TcpListener::from_std(lst) - .expect("Can not create mio::net::TcpListener"); - - let entry = sockets.vacant_entry(); - let token = entry.key(); - - // Start listening for incoming connections - if let Err(err) = poll.register( - &server, - mio::Token(token + DELTA), - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - panic!("Can not register io: {}", err); - } - - entry.insert(ServerSocketInfo { - addr, - token: hnd_token, - handler: Token(idx), - sock: server, - timeout: None, - }); - } - } - - // Timer - let (tm, tmr) = mio::Registration::new2(); - if let Err(err) = - poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge()) - { - panic!("Can not register Registration: {}", err); - } - - Accept { - poll, - rx, - sockets, - workers, - srv, - next: 0, - timer: (tm, tmr), - backpressure: false, - } - } - - fn poll(&mut self) { - // Create storage for events - let mut events = mio::Events::with_capacity(128); - - loop { - if let Err(err) = self.poll.poll(&mut events, None) { - panic!("Poll error: {}", err); - } - - for event in events.iter() { - let token = event.token(); - match token { - CMD => if !self.process_cmd() { - return; - }, - TIMER => self.process_timer(), - NOTIFY => self.backpressure(false), - _ => { - let token = usize::from(token); - if token < DELTA { - continue; - } - self.accept(token - DELTA); - } - } - } - } - } - - fn process_timer(&mut self) { - let now = Instant::now(); - for (token, info) in self.sockets.iter_mut() { - if let Some(inst) = info.timeout.take() { - if now > inst { - if let Err(err) = self.poll.register( - &info.sock, - mio::Token(token + DELTA), - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - error!("Can not register server socket {}", err); - } else { - info!("Resume accepting connections on {}", info.addr); - } - } else { - info.timeout = Some(inst); - } - } - } - } - - fn process_cmd(&mut self) -> bool { - loop { - match self.rx.try_recv() { - Ok(cmd) => match cmd { - Command::Pause => { - for (_, info) in self.sockets.iter_mut() { - if let Err(err) = self.poll.deregister(&info.sock) { - error!("Can not deregister server socket {}", err); - } else { - info!("Paused accepting connections on {}", info.addr); - } - } - } - Command::Resume => { - for (token, info) in self.sockets.iter() { - if let Err(err) = self.poll.register( - &info.sock, - mio::Token(token + DELTA), - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - error!("Can not resume socket accept process: {}", err); - } else { - info!( - "Accepting connections on {} has been resumed", - info.addr - ); - } - } - } - Command::Stop => { - for (_, info) in self.sockets.iter() { - let _ = self.poll.deregister(&info.sock); - } - return false; - } - Command::Worker(worker) => { - self.backpressure(false); - self.workers.push(worker); - } - }, - Err(err) => match err { - sync_mpsc::TryRecvError::Empty => break, - sync_mpsc::TryRecvError::Disconnected => { - for (_, info) in self.sockets.iter() { - let _ = self.poll.deregister(&info.sock); - } - return false; - } - }, - } - } - true - } - - fn backpressure(&mut self, on: bool) { - if self.backpressure { - if !on { - self.backpressure = false; - for (token, info) in self.sockets.iter() { - if let Err(err) = self.poll.register( - &info.sock, - mio::Token(token + DELTA), - mio::Ready::readable(), - mio::PollOpt::edge(), - ) { - error!("Can not resume socket accept process: {}", err); - } else { - info!("Accepting connections on {} has been resumed", info.addr); - } - } - } - } else if on { - self.backpressure = true; - for (_, info) in self.sockets.iter() { - let _ = self.poll.deregister(&info.sock); - } - } - } - - fn accept_one(&mut self, mut msg: Conn) { - if self.backpressure { - while !self.workers.is_empty() { - match self.workers[self.next].send(msg) { - Ok(_) => (), - Err(err) => { - let _ = self.srv.unbounded_send(ServerCommand::WorkerDied( - self.workers[self.next].idx, - )); - msg = err.into_inner(); - self.workers.swap_remove(self.next); - if self.workers.is_empty() { - error!("No workers"); - return; - } else if self.workers.len() <= self.next { - self.next = 0; - } - continue; - } - } - self.next = (self.next + 1) % self.workers.len(); - break; - } - } else { - let mut idx = 0; - while idx < self.workers.len() { - idx += 1; - if self.workers[self.next].available() { - match self.workers[self.next].send(msg) { - Ok(_) => { - self.next = (self.next + 1) % self.workers.len(); - return; - } - Err(err) => { - let _ = self.srv.unbounded_send(ServerCommand::WorkerDied( - self.workers[self.next].idx, - )); - msg = err.into_inner(); - self.workers.swap_remove(self.next); - if self.workers.is_empty() { - error!("No workers"); - self.backpressure(true); - return; - } else if self.workers.len() <= self.next { - self.next = 0; - } - continue; - } - } - } - self.next = (self.next + 1) % self.workers.len(); - } - // enable backpressure - self.backpressure(true); - self.accept_one(msg); - } - } - - fn accept(&mut self, token: usize) { - loop { - let msg = if let Some(info) = self.sockets.get_mut(token) { - match info.sock.accept_std() { - Ok((io, addr)) => Conn { - io, - token: info.token, - handler: info.handler, - peer: Some(addr), - }, - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return, - Err(ref e) if connection_error(e) => continue, - Err(e) => { - error!("Error accepting connection: {}", e); - if let Err(err) = self.poll.deregister(&info.sock) { - error!("Can not deregister server socket {}", err); - } - - // sleep after error - info.timeout = Some(Instant::now() + Duration::from_millis(500)); - - let r = self.timer.1.clone(); - System::current().arbiter().do_send(Execute::new( - move || -> Result<(), ()> { - Arbiter::spawn( - Delay::new( - Instant::now() + Duration::from_millis(510), - ).map_err(|_| ()) - .and_then( - move |_| { - let _ = - r.set_readiness(mio::Ready::readable()); - Ok(()) - }, - ), - ); - Ok(()) - }, - )); - return; - } - } - } else { - return; - }; - - self.accept_one(msg); - } - } -} diff --git a/src/server/channel.rs b/src/server/channel.rs index 3d753f655..d83e9a38e 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -8,7 +8,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; use super::settings::WorkerSettings; -use super::{h1, h2, ConnectionTag, HttpHandler, IoStream}; +use super::{h1, h2, HttpHandler, IoStream}; const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; @@ -32,7 +32,6 @@ where proto: Option>, node: Option>>, ka_timeout: Option, - _tag: ConnectionTag, } impl HttpChannel @@ -43,11 +42,9 @@ where pub(crate) fn new( settings: Rc>, io: T, peer: Option, ) -> HttpChannel { - let _tag = settings.connection(); let ka_timeout = settings.keep_alive_timer(); HttpChannel { - _tag, ka_timeout, node: None, proto: Some(HttpProtocol::Unknown( diff --git a/src/server/http.rs b/src/server/http.rs index b6f577b02..5059b1326 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -5,29 +5,31 @@ use std::{io, mem, net, time}; use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; -use futures::{Future, Stream}; -use net2::{TcpBuilder, TcpStreamExt}; +use futures::future::{ok, FutureResult}; +use futures::{Async, Poll, Stream}; +use net2::TcpBuilder; use num_cpus; -use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_tcp::TcpStream; -#[cfg(feature = "tls")] -use native_tls::TlsAcceptor; +use actix_net::{ssl, NewService, Service, Server}; + +//#[cfg(feature = "tls")] +//use native_tls::TlsAcceptor; #[cfg(feature = "alpn")] use openssl::ssl::SslAcceptorBuilder; -#[cfg(feature = "rust-tls")] -use rustls::ServerConfig; +//#[cfg(feature = "rust-tls")] +//use rustls::ServerConfig; -use super::channel::{HttpChannel, WrapperStream}; -use super::server::{Connections, Server, Service, ServiceHandler}; +use super::channel::HttpChannel; use super::settings::{ServerSettings, WorkerSettings}; -use super::worker::{Conn, Socket}; -use super::{ - AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive, - Token, -}; +use super::{HttpHandler, IntoHttpHandler, IoStream, KeepAlive}; + +struct Socket { + lst: net::TcpListener, + addr: net::SocketAddr, + handler: Box>, +} /// An HTTP Server /// @@ -49,8 +51,7 @@ where no_signals: bool, maxconn: usize, maxconnrate: usize, - sockets: Vec, - handlers: Vec>>, + sockets: Vec>, } impl HttpServer @@ -75,11 +76,9 @@ where exit: false, no_http2: false, no_signals: false, - maxconn: 102_400, + maxconn: 25_600, maxconnrate: 256, - // settings: None, sockets: Vec::new(), - handlers: Vec::new(), } } @@ -112,7 +111,7 @@ where /// All socket listeners will stop accepting connections when this limit is reached /// for each worker. /// - /// By default max connections is set to a 100k. + /// By default max connections is set to a 25k. pub fn maxconn(mut self, num: usize) -> Self { self.maxconn = num; self @@ -196,9 +195,9 @@ where /// and the user should be presented with an enumeration of which /// socket requires which protocol. pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { - self.handlers + self.sockets .iter() - .map(|s| (s.addr(), s.scheme())) + .map(|s| (s.addr, s.handler.scheme())) .collect() } @@ -207,78 +206,82 @@ where /// HttpServer does not change any configuration for TcpListener, /// it needs to be configured before passing it to listen() method. pub fn listen(mut self, lst: net::TcpListener) -> Self { - let token = Token(self.handlers.len()); let addr = lst.local_addr().unwrap(); - self.handlers - .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); - self.sockets.push(Socket { lst, addr, token }); + self.sockets.push(Socket { + lst, + addr, + handler: Box::new(SimpleHandler { + addr, + factory: self.factory.clone(), + }), + }); self } - #[doc(hidden)] - /// Use listener for accepting incoming connection requests - pub fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self - where - A: AcceptorService + Send + 'static, - { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers.push(Box::new(StreamHandler::new( - lst.local_addr().unwrap(), - acceptor, - ))); - self.sockets.push(Socket { lst, addr, token }); + // #[doc(hidden)] + // /// Use listener for accepting incoming connection requests + // pub fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self + // where + // A: AcceptorService + Send + 'static, + // { + // let token = Token(self.handlers.len()); + // let addr = lst.local_addr().unwrap(); + // self.handlers.push(Box::new(StreamHandler::new( + // lst.local_addr().unwrap(), + // acceptor, + // ))); + // self.sockets.push(Socket { lst, addr, token }); - self - } + // self + // } - #[cfg(feature = "tls")] - /// Use listener for accepting incoming tls connection requests - /// - /// HttpServer does not change any configuration for TcpListener, - /// it needs to be configured before passing it to listen() method. - pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { - use super::NativeTlsAcceptor; + // #[cfg(feature = "tls")] + // /// Use listener for accepting incoming tls connection requests + // /// + // /// HttpServer does not change any configuration for TcpListener, + // /// it needs to be configured before passing it to listen() method. + // pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { + // use super::NativeTlsAcceptor; + // + // self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) + // } - self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) - } + // #[cfg(feature = "alpn")] + // /// Use listener for accepting incoming tls connection requests + // /// + // /// This method sets alpn protocols to "h2" and "http/1.1" + // pub fn listen_ssl( + // self, lst: net::TcpListener, builder: SslAcceptorBuilder, + // ) -> io::Result { + // use super::{OpensslAcceptor, ServerFlags}; - #[cfg(feature = "alpn")] - /// Use listener for accepting incoming tls connection requests - /// - /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn listen_ssl( - self, lst: net::TcpListener, builder: SslAcceptorBuilder, - ) -> io::Result { - use super::{OpensslAcceptor, ServerFlags}; + // alpn support + // let flags = if self.no_http2 { + // ServerFlags::HTTP1 + // } else { + // ServerFlags::HTTP1 | ServerFlags::HTTP2 + // }; - // alpn support - let flags = if self.no_http2 { - ServerFlags::HTTP1 - } else { - ServerFlags::HTTP1 | ServerFlags::HTTP2 - }; + // Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?)) + // } - Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?)) - } + // #[cfg(feature = "rust-tls")] + // /// Use listener for accepting incoming tls connection requests + // /// + // /// This method sets alpn protocols to "h2" and "http/1.1" + // pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self { + // use super::{RustlsAcceptor, ServerFlags}; - #[cfg(feature = "rust-tls")] - /// Use listener for accepting incoming tls connection requests - /// - /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self { - use super::{RustlsAcceptor, ServerFlags}; - - // alpn support - let flags = if self.no_http2 { - ServerFlags::HTTP1 - } else { - ServerFlags::HTTP1 | ServerFlags::HTTP2 - }; - - self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags)) - } + // // alpn support + // let flags = if self.no_http2 { + // ServerFlags::HTTP1 + // } else { + // ServerFlags::HTTP1 | ServerFlags::HTTP2 + // }; + // + // self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags)) + // } /// The socket address to bind /// @@ -287,38 +290,34 @@ where let sockets = self.bind2(addr)?; for lst in sockets { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers - .push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); - self.sockets.push(Socket { lst, addr, token }) + self = self.listen(lst); } Ok(self) } - /// Start listening for incoming connections with supplied acceptor. - #[doc(hidden)] - #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] - pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result - where - S: net::ToSocketAddrs, - A: AcceptorService + Send + 'static, - { - let sockets = self.bind2(addr)?; + // /// Start listening for incoming connections with supplied acceptor. + // #[doc(hidden)] + // #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] + // pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result + // where + // S: net::ToSocketAddrs, + // A: AcceptorService + Send + 'static, + // { + // let sockets = self.bind2(addr)?; - for lst in sockets { - let token = Token(self.handlers.len()); - let addr = lst.local_addr().unwrap(); - self.handlers.push(Box::new(StreamHandler::new( - lst.local_addr().unwrap(), - acceptor.clone(), - ))); - self.sockets.push(Socket { lst, addr, token }) - } + // for lst in sockets { + // let token = Token(self.handlers.len()); + // let addr = lst.local_addr().unwrap(); + // self.handlers.push(Box::new(StreamHandler::new( + // lst.local_addr().unwrap(), + // acceptor.clone(), + // ))); + // self.sockets.push(Socket { lst, addr, token }) + // } - Ok(self) - } + // Ok(self) + // } fn bind2( &self, addr: S, @@ -350,112 +349,109 @@ where } } - #[cfg(feature = "tls")] - /// The ssl socket address to bind - /// - /// To bind multiple addresses this method can be called multiple times. - pub fn bind_tls( - self, addr: S, acceptor: TlsAcceptor, - ) -> io::Result { - use super::NativeTlsAcceptor; + // #[cfg(feature = "tls")] + // /// The ssl socket address to bind + // /// + // /// To bind multiple addresses this method can be called multiple times. + // pub fn bind_tls( + // self, addr: S, acceptor: TlsAcceptor, + // ) -> io::Result { + // use super::NativeTlsAcceptor; - self.bind_with(addr, NativeTlsAcceptor::new(acceptor)) - } + // self.bind_with(addr, NativeTlsAcceptor::new(acceptor)) + // } - #[cfg(feature = "alpn")] - /// Start listening for incoming tls connections. - /// - /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn bind_ssl(self, addr: S, builder: SslAcceptorBuilder) -> io::Result - where - S: net::ToSocketAddrs, - { - use super::{OpensslAcceptor, ServerFlags}; + // #[cfg(feature = "alpn")] + // /// Start listening for incoming tls connections. + // /// + // /// This method sets alpn protocols to "h2" and "http/1.1" + // pub fn bind_ssl(self, addr: S, builder: SslAcceptorBuilder) -> io::Result + // where + // S: net::ToSocketAddrs, + // { + // use super::{OpensslAcceptor, ServerFlags}; - // alpn support - let flags = if !self.no_http2 { - ServerFlags::HTTP1 - } else { - ServerFlags::HTTP1 | ServerFlags::HTTP2 - }; + // // alpn support + // let flags = if !self.no_http2 { + // ServerFlags::HTTP1 + // } else { + // ServerFlags::HTTP1 | ServerFlags::HTTP2 + // }; - self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?) - } + // self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?) + // } - #[cfg(feature = "rust-tls")] - /// Start listening for incoming tls connections. - /// - /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn bind_rustls( - self, addr: S, builder: ServerConfig, - ) -> io::Result { - use super::{RustlsAcceptor, ServerFlags}; + // #[cfg(feature = "rust-tls")] + // /// Start listening for incoming tls connections. + // /// + // /// This method sets alpn protocols to "h2" and "http/1.1" + // pub fn bind_rustls( + // self, addr: S, builder: ServerConfig, + // ) -> io::Result { + // use super::{RustlsAcceptor, ServerFlags}; - // alpn support - let flags = if !self.no_http2 { - ServerFlags::HTTP1 - } else { - ServerFlags::HTTP1 | ServerFlags::HTTP2 - }; + // // alpn support + // let flags = if !self.no_http2 { + // ServerFlags::HTTP1 + // } else { + // ServerFlags::HTTP1 | ServerFlags::HTTP2 + // }; - self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags)) - } + // self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags)) + // } } -impl Into<(Box, Vec<(Token, net::TcpListener)>)> - for HttpServer +struct HttpService +where + H: HttpHandler, + F: IntoHttpHandler, + Io: IoStream, { - fn into(mut self) -> (Box, Vec<(Token, net::TcpListener)>) { - let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new()) - .into_iter() - .map(|item| (item.token, item.lst)) - .collect(); - - ( - Box::new(HttpService { - factory: self.factory, - host: self.host, - keep_alive: self.keep_alive, - handlers: self.handlers, - }), - sockets, - ) - } -} - -struct HttpService { - factory: Arc Vec + Send + Sync>, + factory: Arc Vec + Send + Sync>, + addr: net::SocketAddr, host: Option, keep_alive: KeepAlive, - handlers: Vec>>, + _t: PhantomData<(H, Io)>, } -impl Service for HttpService { - fn clone(&self) -> Box { - Box::new(HttpService { - factory: self.factory.clone(), - host: self.host.clone(), - keep_alive: self.keep_alive, - handlers: self.handlers.iter().map(|v| v.clone()).collect(), - }) - } +impl NewService for HttpService +where + H: HttpHandler, + F: IntoHttpHandler, + Io: IoStream, +{ + type Request = Io; + type Response = (); + type Error = (); + type InitError = (); + type Service = HttpServiceHandler; + type Future = FutureResult; - fn create(&self, conns: Connections) -> Box { - let addr = self.handlers[0].addr(); - let s = ServerSettings::new(Some(addr), &self.host, false); + fn new_service(&self) -> Self::Future { + let s = ServerSettings::new(Some(self.addr), &self.host, false); let apps: Vec<_> = (*self.factory)() .into_iter() .map(|h| h.into_handler()) .collect(); - let handlers = self.handlers.iter().map(|h| h.clone()).collect(); - Box::new(HttpServiceHandler::new( - apps, - handlers, - self.keep_alive, - s, - conns, - )) + ok(HttpServiceHandler::new(apps, self.keep_alive, s)) + } +} + +impl Clone for HttpService +where + H: HttpHandler, + F: IntoHttpHandler, + Io: IoStream, +{ + fn clone(&self) -> HttpService { + HttpService { + addr: self.addr, + factory: self.factory.clone(), + host: self.host.clone(), + keep_alive: self.keep_alive, + _t: PhantomData, + } } } @@ -485,11 +481,12 @@ impl HttpServer { /// sys.run(); // <- Run actix system, this method starts all async processes /// } /// ``` - pub fn start(self) -> Addr { + pub fn start(mut self) -> Addr { + ssl::max_concurrent_ssl_connect(self.maxconnrate); + let mut srv = Server::new() .workers(self.threads) .maxconn(self.maxconn) - .maxconnrate(self.maxconnrate) .shutdown_timeout(self.shutdown_timeout); srv = if self.exit { srv.system_exit() } else { srv }; @@ -499,7 +496,17 @@ impl HttpServer { srv }; - srv.service(self).start() + let sockets = mem::replace(&mut self.sockets, Vec::new()); + + for socket in sockets { + let Socket { + lst, + addr: _, + handler, + } = socket; + srv = handler.register(srv, lst, self.host.clone(), self.keep_alive); + } + srv.start() } /// Spawn new thread and start listening for incoming connections. @@ -529,277 +536,187 @@ impl HttpServer { } } -impl HttpServer { - /// Start listening for incoming connections from a stream. - /// - /// This method uses only one thread for handling incoming connections. - pub fn start_incoming(self, stream: S, secure: bool) - where - S: Stream + Send + 'static, - T: AsyncRead + AsyncWrite + Send + 'static, - { - // set server settings - let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); - let srv_settings = ServerSettings::new(Some(addr), &self.host, secure); - let apps: Vec<_> = (*self.factory)() - .into_iter() - .map(|h| h.into_handler()) - .collect(); - let settings = WorkerSettings::create( - apps, - self.keep_alive, - srv_settings, - Connections::default(), - ); +// impl HttpServer { +// /// Start listening for incoming connections from a stream. +// /// +// /// This method uses only one thread for handling incoming connections. +// pub fn start_incoming(self, stream: S, secure: bool) +// where +// S: Stream + Send + 'static, +// T: AsyncRead + AsyncWrite + Send + 'static, +// { +// // set server settings +// let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); +// let srv_settings = ServerSettings::new(Some(addr), &self.host, secure); +// let apps: Vec<_> = (*self.factory)() +// .into_iter() +// .map(|h| h.into_handler()) +// .collect(); +// let settings = WorkerSettings::create( +// apps, +// self.keep_alive, +// srv_settings, +// ); - // start server - HttpIncoming::create(move |ctx| { - ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn { - io: WrapperStream::new(t), - handler: Token::new(0), - token: Token::new(0), - peer: None, - })); - HttpIncoming { settings } - }); - } -} +// // start server +// HttpIncoming::create(move |ctx| { +// ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn { +// io: WrapperStream::new(t), +// handler: Token::new(0), +// token: Token::new(0), +// peer: None, +// })); +// HttpIncoming { settings } +// }); +// } +// } -struct HttpIncoming { - settings: Rc>, -} +// struct HttpIncoming { +// settings: Rc>, +// } -impl Actor for HttpIncoming +// impl Actor for HttpIncoming +// where +// H: HttpHandler, +// { +// type Context = Context; +// } + +// impl Handler> for HttpIncoming +// where +// T: IoStream, +// H: HttpHandler, +// { +// type Result = (); + +// fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { +// spawn(HttpChannel::new( +// Rc::clone(&self.settings), +// msg.io, +// msg.peer, +// )); +// } +// } + +struct HttpServiceHandler where H: HttpHandler, -{ - type Context = Context; -} - -impl Handler> for HttpIncoming -where - T: IoStream, - H: HttpHandler, -{ - type Result = (); - - fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { - Arbiter::spawn(HttpChannel::new( - Rc::clone(&self.settings), - msg.io, - msg.peer, - )); - } -} - -struct HttpServiceHandler -where - H: HttpHandler + 'static, + Io: IoStream, { settings: Rc>, - handlers: Vec>>, tcp_ka: Option, + _t: PhantomData, } -impl HttpServiceHandler { +impl HttpServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ fn new( - apps: Vec, handlers: Vec>>, - keep_alive: KeepAlive, settings: ServerSettings, conns: Connections, - ) -> HttpServiceHandler { + apps: Vec, keep_alive: KeepAlive, settings: ServerSettings, + ) -> HttpServiceHandler { let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { Some(time::Duration::new(val as u64, 0)) } else { None }; - let settings = WorkerSettings::create(apps, keep_alive, settings, conns); + let settings = WorkerSettings::create(apps, keep_alive, settings); HttpServiceHandler { - handlers, tcp_ka, settings, + _t: PhantomData, } } } -impl ServiceHandler for HttpServiceHandler +impl Service for HttpServiceHandler where - H: HttpHandler + 'static, + H: HttpHandler, + Io: IoStream, { - fn handle( - &mut self, token: Token, io: net::TcpStream, peer: Option, - ) { - if self.tcp_ka.is_some() && io.set_keepalive(self.tcp_ka).is_err() { - error!("Can not set socket keep-alive option"); - } - self.handlers[token.0].handle(Rc::clone(&self.settings), io, peer); + type Request = Io; + type Response = (); + type Error = (); + type Future = HttpChannel; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + Ok(Async::Ready(())) } - fn shutdown(&self, force: bool) { - if force { - self.settings - .head() - .traverse(|ch: &mut HttpChannel| ch.shutdown()); - } + fn call(&mut self, mut req: Self::Request) -> Self::Future { + let _ = req.set_nodelay(true); + HttpChannel::new(Rc::clone(&self.settings), req, None) } + + // fn shutdown(&self, force: bool) { + // if force { + // self.settings.head().traverse::(); + // } + // } } -struct SimpleHandler { - addr: net::SocketAddr, - io: PhantomData, +trait IoStreamHandler: Send +where + H: IntoHttpHandler, +{ + fn addr(&self) -> net::SocketAddr; + + fn scheme(&self) -> &'static str; + + fn register( + &self, server: Server, lst: net::TcpListener, host: Option, + keep_alive: KeepAlive, + ) -> Server; } -impl Clone for SimpleHandler { +struct SimpleHandler +where + H: IntoHttpHandler, +{ + pub addr: net::SocketAddr, + pub factory: Arc Vec + Send + Sync>, +} + +impl Clone for SimpleHandler { fn clone(&self) -> Self { SimpleHandler { addr: self.addr, - io: PhantomData, + factory: self.factory.clone(), } } } -impl SimpleHandler { - fn new(addr: net::SocketAddr) -> Self { - SimpleHandler { - addr, - io: PhantomData, - } - } -} - -impl IoStreamHandler for SimpleHandler +impl IoStreamHandler for SimpleHandler where - H: HttpHandler, - Io: IntoAsyncIo + Send + 'static, - Io::Io: IoStream, + H: IntoHttpHandler + 'static, { fn addr(&self) -> net::SocketAddr { self.addr } - fn clone(&self) -> Box> { - Box::new(Clone::clone(self)) - } - fn scheme(&self) -> &'static str { "http" } - fn handle(&self, h: Rc>, io: Io, peer: Option) { - let mut io = match io.into_async_io() { - Ok(io) => io, - Err(err) => { - trace!("Failed to create async io: {}", err); - return; - } - }; - let _ = io.set_nodelay(true); + fn register( + &self, server: Server, lst: net::TcpListener, host: Option, + keep_alive: KeepAlive, + ) -> Server { + let addr = self.addr; + let factory = self.factory.clone(); - Arbiter::spawn(HttpChannel::new(h, io, peer)); - } -} - -struct StreamHandler { - acceptor: A, - addr: net::SocketAddr, - io: PhantomData, -} - -impl> StreamHandler { - fn new(addr: net::SocketAddr, acceptor: A) -> Self { - StreamHandler { + server.listen(lst, move || HttpService { + keep_alive, addr, - acceptor, - io: PhantomData, - } + host: host.clone(), + factory: factory.clone(), + _t: PhantomData, + }) } } -impl> Clone for StreamHandler { - fn clone(&self) -> Self { - StreamHandler { - addr: self.addr, - acceptor: self.acceptor.clone(), - io: PhantomData, - } - } -} - -impl IoStreamHandler for StreamHandler -where - H: HttpHandler, - Io: IntoAsyncIo + Send + 'static, - Io::Io: IoStream, - A: AcceptorService + Send + 'static, -{ - fn addr(&self) -> net::SocketAddr { - self.addr - } - - fn clone(&self) -> Box> { - Box::new(Clone::clone(self)) - } - - fn scheme(&self) -> &'static str { - self.acceptor.scheme() - } - - fn handle(&self, h: Rc>, io: Io, peer: Option) { - let mut io = match io.into_async_io() { - Ok(io) => io, - Err(err) => { - trace!("Failed to create async io: {}", err); - return; - } - }; - let _ = io.set_nodelay(true); - - let rate = h.connection_rate(); - Arbiter::spawn(self.acceptor.accept(io).then(move |res| { - drop(rate); - match res { - Ok(io) => Arbiter::spawn(HttpChannel::new(h, io, peer)), - Err(err) => trace!("Can not establish connection: {}", err), - } - Ok(()) - })) - } -} - -impl IoStreamHandler for Box> -where - H: HttpHandler, - Io: IntoAsyncIo, -{ - fn addr(&self) -> net::SocketAddr { - self.as_ref().addr() - } - - fn clone(&self) -> Box> { - self.as_ref().clone() - } - - fn scheme(&self) -> &'static str { - self.as_ref().scheme() - } - - fn handle(&self, h: Rc>, io: Io, peer: Option) { - self.as_ref().handle(h, io, peer) - } -} - -trait IoStreamHandler: Send -where - H: HttpHandler, -{ - fn clone(&self) -> Box>; - - fn addr(&self) -> net::SocketAddr; - - fn scheme(&self) -> &'static str; - - fn handle(&self, h: Rc>, io: Io, peer: Option); -} - fn create_tcp_listener( addr: net::SocketAddr, backlog: i32, ) -> io::Result { diff --git a/src/server/mod.rs b/src/server/mod.rs index 96ec570a1..25eca3a71 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -108,15 +108,13 @@ //! ``` use std::net::Shutdown; use std::rc::Rc; -use std::{io, net, time}; +use std::{io, time}; use bytes::{BufMut, BytesMut}; -use futures::{Async, Future, Poll}; +use futures::{Async, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; -use tokio_reactor::Handle; use tokio_tcp::TcpStream; -pub(crate) mod accept; mod channel; mod error; pub(crate) mod h1; @@ -129,25 +127,15 @@ mod http; pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; -mod server; pub(crate) mod settings; mod ssl; -mod worker; use actix::Message; -pub use self::message::Request; - pub use self::http::HttpServer; -#[doc(hidden)] -pub use self::server::{ - ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler, -}; +pub use self::message::Request; pub use self::settings::ServerSettings; -#[doc(hidden)] -pub use self::ssl::*; - #[doc(hidden)] pub use self::helpers::write_content_length; @@ -322,35 +310,6 @@ impl IntoHttpHandler for T { } } -pub(crate) trait IntoAsyncIo { - type Io: AsyncRead + AsyncWrite; - - fn into_async_io(self) -> Result; -} - -impl IntoAsyncIo for net::TcpStream { - type Io = TcpStream; - - fn into_async_io(self) -> Result { - TcpStream::from_std(self, &Handle::default()) - } -} - -#[doc(hidden)] -/// Trait implemented by types that could accept incomming socket connections. -pub trait AcceptorService: Clone { - /// Established connection type - type Accepted: IoStream; - /// Future describes async accept process. - type Future: Future + 'static; - - /// Establish new connection - fn accept(&self, io: Io) -> Self::Future; - - /// Scheme - fn scheme(&self) -> &'static str; -} - #[doc(hidden)] #[derive(Debug)] pub enum WriterState { diff --git a/src/server/server.rs b/src/server/server.rs deleted file mode 100644 index 122571fd1..000000000 --- a/src/server/server.rs +++ /dev/null @@ -1,528 +0,0 @@ -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; -use std::time::Duration; -use std::{mem, net}; - -use futures::sync::{mpsc, mpsc::unbounded}; -use futures::{Future, Sink, Stream}; -use num_cpus; - -use actix::{ - fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler, - Response, StreamHandler, System, WrapFuture, -}; - -use super::accept::{AcceptLoop, AcceptNotify, Command}; -use super::worker::{Conn, StopWorker, Worker, WorkerClient}; -use super::{PauseServer, ResumeServer, StopServer, Token}; - -#[doc(hidden)] -/// Describes service that could be used -/// with [Server](struct.Server.html) -pub trait Service: Send + 'static { - /// Clone service - fn clone(&self) -> Box; - - /// Create service handler for this service - fn create(&self, conn: Connections) -> Box; -} - -impl Service for Box { - fn clone(&self) -> Box { - self.as_ref().clone() - } - - fn create(&self, conn: Connections) -> Box { - self.as_ref().create(conn) - } -} - -#[doc(hidden)] -/// Describes the way serivce handles incoming -/// TCP connections. -pub trait ServiceHandler { - /// Handle incoming stream - fn handle( - &mut self, token: Token, io: net::TcpStream, peer: Option, - ); - - /// Shutdown open handlers - fn shutdown(&self, _: bool) {} -} - -pub(crate) enum ServerCommand { - WorkerDied(usize), -} - -/// Generic server -#[doc(hidden)] -pub struct Server { - threads: usize, - workers: Vec<(usize, Addr)>, - services: Vec>, - sockets: Vec>, - accept: AcceptLoop, - exit: bool, - shutdown_timeout: u16, - signals: Option>, - no_signals: bool, - maxconn: usize, - maxconnrate: usize, -} - -impl Default for Server { - fn default() -> Self { - Self::new() - } -} - -impl Server { - /// Create new Server instance - pub fn new() -> Server { - Server { - threads: num_cpus::get(), - workers: Vec::new(), - services: Vec::new(), - sockets: Vec::new(), - accept: AcceptLoop::new(), - exit: false, - shutdown_timeout: 30, - signals: None, - no_signals: false, - maxconn: 102_400, - maxconnrate: 256, - } - } - - /// Set number of workers to start. - /// - /// By default http server uses number of available logical cpu as threads - /// count. - pub fn workers(mut self, num: usize) -> Self { - self.threads = num; - self - } - - /// Sets the maximum per-worker number of concurrent connections. - /// - /// All socket listeners will stop accepting connections when this limit is reached - /// for each worker. - /// - /// By default max connections is set to a 100k. - pub fn maxconn(mut self, num: usize) -> Self { - self.maxconn = num; - self - } - - /// Sets the maximum per-worker concurrent connection establish process. - /// - /// All listeners will stop accepting connections when this limit is reached. It - /// can be used to limit the global SSL CPU usage. - /// - /// By default max connections is set to a 256. - pub fn maxconnrate(mut self, num: usize) -> Self { - self.maxconnrate = num; - self - } - - /// Stop actix system. - /// - /// `SystemExit` message stops currently running system. - pub fn system_exit(mut self) -> Self { - self.exit = true; - self - } - - #[doc(hidden)] - /// Set alternative address for `ProcessSignals` actor. - pub fn signals(mut self, addr: Addr) -> Self { - self.signals = Some(addr); - self - } - - /// Disable signal handling - pub fn disable_signals(mut self) -> Self { - self.no_signals = true; - self - } - - /// Timeout for graceful workers shutdown. - /// - /// After receiving a stop signal, workers have this much time to finish - /// serving requests. Workers still alive after the timeout are force - /// dropped. - /// - /// By default shutdown timeout sets to 30 seconds. - pub fn shutdown_timeout(mut self, sec: u16) -> Self { - self.shutdown_timeout = sec; - self - } - - /// Add new service to server - pub fn service(mut self, srv: T) -> Self - where - T: Into<(Box, Vec<(Token, net::TcpListener)>)>, - { - let (srv, sockets) = srv.into(); - self.services.push(srv); - self.sockets.push(sockets); - self - } - - /// Spawn new thread and start listening for incoming connections. - /// - /// This method spawns new thread and starts new actix system. Other than - /// that it is similar to `start()` method. This method blocks. - /// - /// This methods panics if no socket addresses get bound. - /// - /// ```rust,ignore - /// # extern crate futures; - /// # extern crate actix_web; - /// # use futures::Future; - /// use actix_web::*; - /// - /// fn main() { - /// Server::new(). - /// .service( - /// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok()))) - /// .bind("127.0.0.1:0") - /// .expect("Can not bind to 127.0.0.1:0")) - /// .run(); - /// } - /// ``` - pub fn run(self) { - let sys = System::new("http-server"); - self.start(); - sys.run(); - } - - /// Starts Server Actor and returns its address - pub fn start(mut self) -> Addr { - if self.sockets.is_empty() { - panic!("Service should have at least one bound socket"); - } else { - info!("Starting {} http workers", self.threads); - - // start workers - let mut workers = Vec::new(); - for idx in 0..self.threads { - let (addr, worker) = self.start_worker(idx, self.accept.get_notify()); - workers.push(worker); - self.workers.push((idx, addr)); - } - - // start accept thread - for sock in &self.sockets { - for s in sock.iter() { - info!("Starting server on http://{}", s.1.local_addr().unwrap()); - } - } - let rx = self - .accept - .start(mem::replace(&mut self.sockets, Vec::new()), workers); - - // start http server actor - let signals = self.subscribe_to_signals(); - let addr = Actor::create(move |ctx| { - ctx.add_stream(rx); - self - }); - if let Some(signals) = signals { - signals.do_send(signal::Subscribe(addr.clone().recipient())) - } - addr - } - } - - // subscribe to os signals - fn subscribe_to_signals(&self) -> Option> { - if !self.no_signals { - if let Some(ref signals) = self.signals { - Some(signals.clone()) - } else { - Some(System::current().registry().get::()) - } - } else { - None - } - } - - fn start_worker( - &self, idx: usize, notify: AcceptNotify, - ) -> (Addr, WorkerClient) { - let (tx, rx) = unbounded::>(); - let conns = Connections::new(notify, self.maxconn, self.maxconnrate); - let worker = WorkerClient::new(idx, tx, conns.clone()); - let services: Vec<_> = self.services.iter().map(|v| v.clone()).collect(); - - let addr = Arbiter::start(move |ctx: &mut Context<_>| { - ctx.add_message_stream(rx); - let handlers: Vec<_> = services - .into_iter() - .map(|s| s.create(conns.clone())) - .collect(); - Worker::new(conns, handlers) - }); - - (addr, worker) - } -} - -impl Actor for Server { - type Context = Context; -} - -/// Signals support -/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system -/// message to `System` actor. -impl Handler for Server { - type Result = (); - - fn handle(&mut self, msg: signal::Signal, ctx: &mut Context) { - match msg.0 { - signal::SignalType::Int => { - info!("SIGINT received, exiting"); - self.exit = true; - Handler::::handle(self, StopServer { graceful: false }, ctx); - } - signal::SignalType::Term => { - info!("SIGTERM received, stopping"); - self.exit = true; - Handler::::handle(self, StopServer { graceful: true }, ctx); - } - signal::SignalType::Quit => { - info!("SIGQUIT received, exiting"); - self.exit = true; - Handler::::handle(self, StopServer { graceful: false }, ctx); - } - _ => (), - } - } -} - -impl Handler for Server { - type Result = (); - - fn handle(&mut self, _: PauseServer, _: &mut Context) { - self.accept.send(Command::Pause); - } -} - -impl Handler for Server { - type Result = (); - - fn handle(&mut self, _: ResumeServer, _: &mut Context) { - self.accept.send(Command::Resume); - } -} - -impl Handler for Server { - type Result = Response<(), ()>; - - fn handle(&mut self, msg: StopServer, ctx: &mut Context) -> Self::Result { - // stop accept thread - self.accept.send(Command::Stop); - - // stop workers - let (tx, rx) = mpsc::channel(1); - - let dur = if msg.graceful { - Some(Duration::new(u64::from(self.shutdown_timeout), 0)) - } else { - None - }; - for worker in &self.workers { - let tx2 = tx.clone(); - ctx.spawn( - worker - .1 - .send(StopWorker { graceful: dur }) - .into_actor(self) - .then(move |_, slf, ctx| { - slf.workers.pop(); - if slf.workers.is_empty() { - let _ = tx2.send(()); - - // we need to stop system if server was spawned - if slf.exit { - ctx.run_later(Duration::from_millis(300), |_, _| { - System::current().stop(); - }); - } - } - - fut::ok(()) - }), - ); - } - - if !self.workers.is_empty() { - Response::async(rx.into_future().map(|_| ()).map_err(|_| ())) - } else { - // we need to stop system if server was spawned - if self.exit { - ctx.run_later(Duration::from_millis(300), |_, _| { - System::current().stop(); - }); - } - Response::reply(Ok(())) - } - } -} - -/// Commands from accept threads -impl StreamHandler for Server { - fn finished(&mut self, _: &mut Context) {} - - fn handle(&mut self, msg: ServerCommand, _: &mut Context) { - match msg { - ServerCommand::WorkerDied(idx) => { - let mut found = false; - for i in 0..self.workers.len() { - if self.workers[i].0 == idx { - self.workers.swap_remove(i); - found = true; - break; - } - } - - if found { - error!("Worker has died {:?}, restarting", idx); - - let mut new_idx = self.workers.len(); - 'found: loop { - for i in 0..self.workers.len() { - if self.workers[i].0 == new_idx { - new_idx += 1; - continue 'found; - } - } - break; - } - - let (addr, worker) = - self.start_worker(new_idx, self.accept.get_notify()); - self.workers.push((new_idx, addr)); - self.accept.send(Command::Worker(worker)); - } - } - } - } -} - -#[derive(Clone, Default)] -///Contains information about connection. -pub struct Connections(Arc); - -impl Connections { - fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self { - let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 }; - let maxconnrate_low = if maxconnrate > 10 { - maxconnrate - 10 - } else { - 0 - }; - - Connections(Arc::new(ConnectionsInner { - notify, - maxconn, - maxconnrate, - maxconn_low, - maxconnrate_low, - conn: AtomicUsize::new(0), - connrate: AtomicUsize::new(0), - })) - } - - pub(crate) fn available(&self) -> bool { - self.0.available() - } - - pub(crate) fn num_connections(&self) -> usize { - self.0.conn.load(Ordering::Relaxed) - } - - /// Report opened connection - pub fn connection(&self) -> ConnectionTag { - ConnectionTag::new(self.0.clone()) - } - - /// Report rate connection, rate is usually ssl handshake - pub fn connection_rate(&self) -> ConnectionRateTag { - ConnectionRateTag::new(self.0.clone()) - } -} - -#[derive(Default)] -struct ConnectionsInner { - notify: AcceptNotify, - conn: AtomicUsize, - connrate: AtomicUsize, - maxconn: usize, - maxconnrate: usize, - maxconn_low: usize, - maxconnrate_low: usize, -} - -impl ConnectionsInner { - fn available(&self) -> bool { - if self.maxconnrate <= self.connrate.load(Ordering::Relaxed) { - false - } else { - self.maxconn > self.conn.load(Ordering::Relaxed) - } - } - - fn notify_maxconn(&self, maxconn: usize) { - if maxconn > self.maxconn_low && maxconn <= self.maxconn { - self.notify.notify(); - } - } - - fn notify_maxconnrate(&self, connrate: usize) { - if connrate > self.maxconnrate_low && connrate <= self.maxconnrate { - self.notify.notify(); - } - } -} - -/// Type responsible for max connection stat. -/// -/// Max connections stat get updated on drop. -pub struct ConnectionTag(Arc); - -impl ConnectionTag { - fn new(inner: Arc) -> Self { - inner.conn.fetch_add(1, Ordering::Relaxed); - ConnectionTag(inner) - } -} - -impl Drop for ConnectionTag { - fn drop(&mut self) { - let conn = self.0.conn.fetch_sub(1, Ordering::Relaxed); - self.0.notify_maxconn(conn); - } -} - -/// Type responsible for max connection rate stat. -/// -/// Max connections rate stat get updated on drop. -pub struct ConnectionRateTag(Arc); - -impl ConnectionRateTag { - fn new(inner: Arc) -> Self { - inner.connrate.fetch_add(1, Ordering::Relaxed); - ConnectionRateTag(inner) - } -} - -impl Drop for ConnectionRateTag { - fn drop(&mut self) { - let connrate = self.0.connrate.fetch_sub(1, Ordering::Relaxed); - self.0.notify_maxconnrate(connrate); - } -} diff --git a/src/server/settings.rs b/src/server/settings.rs index fc0d931f0..2ca0b9b95 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -17,7 +17,7 @@ use tokio_timer::{Delay, Interval}; use super::channel::Node; use super::message::{Request, RequestPool}; -use super::server::{ConnectionRateTag, ConnectionTag, Connections}; +// use super::server::{ConnectionRateTag, ConnectionTag, Connections}; use super::KeepAlive; use body::Body; use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool}; @@ -140,7 +140,6 @@ pub(crate) struct WorkerSettings { ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, - conns: Connections, node: RefCell>, date: UnsafeCell, } @@ -148,9 +147,8 @@ pub(crate) struct WorkerSettings { impl WorkerSettings { pub(crate) fn create( apps: Vec, keep_alive: KeepAlive, settings: ServerSettings, - conns: Connections, ) -> Rc> { - let settings = Rc::new(Self::new(apps, keep_alive, settings, conns)); + let settings = Rc::new(Self::new(apps, keep_alive, settings)); // periodic date update let s = settings.clone(); @@ -169,7 +167,7 @@ impl WorkerSettings { impl WorkerSettings { pub(crate) fn new( - h: Vec, keep_alive: KeepAlive, settings: ServerSettings, conns: Connections, + h: Vec, keep_alive: KeepAlive, settings: ServerSettings, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -185,7 +183,6 @@ impl WorkerSettings { date: UnsafeCell::new(Date::new()), keep_alive, ka_enabled, - conns, } } @@ -227,10 +224,6 @@ impl WorkerSettings { RequestPool::get(self.messages) } - pub fn connection(&self) -> ConnectionTag { - self.conns.connection() - } - fn update_date(&self) { // Unsafe: WorkerSetting is !Sync and !Send unsafe { &mut *self.date.get() }.update(); @@ -249,11 +242,6 @@ impl WorkerSettings { dst.extend_from_slice(date_bytes); } } - - #[allow(dead_code)] - pub(crate) fn connection_rate(&self) -> ConnectionRateTag { - self.conns.connection_rate() - } } struct Date { diff --git a/src/server/worker.rs b/src/server/worker.rs deleted file mode 100644 index 77128adc0..000000000 --- a/src/server/worker.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::{net, time}; - -use futures::sync::mpsc::{SendError, UnboundedSender}; -use futures::sync::oneshot; -use futures::Future; - -use actix::msgs::StopArbiter; -use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response}; - -use super::server::{Connections, ServiceHandler}; -use super::Token; - -#[derive(Message)] -pub(crate) struct Conn { - pub io: T, - pub handler: Token, - pub token: Token, - pub peer: Option, -} - -pub(crate) struct Socket { - pub lst: net::TcpListener, - pub addr: net::SocketAddr, - pub token: Token, -} - -#[derive(Clone)] -pub(crate) struct WorkerClient { - pub idx: usize, - tx: UnboundedSender>, - conns: Connections, -} - -impl WorkerClient { - pub fn new( - idx: usize, tx: UnboundedSender>, conns: Connections, - ) -> Self { - WorkerClient { idx, tx, conns } - } - - pub fn send( - &self, msg: Conn, - ) -> Result<(), SendError>> { - self.tx.unbounded_send(msg) - } - - pub fn available(&self) -> bool { - self.conns.available() - } -} - -/// Stop worker message. Returns `true` on successful shutdown -/// and `false` if some connections still alive. -pub(crate) struct StopWorker { - pub graceful: Option, -} - -impl Message for StopWorker { - type Result = Result; -} - -/// Http worker -/// -/// Worker accepts Socket objects via unbounded channel and start requests -/// processing. -pub(crate) struct Worker { - conns: Connections, - handlers: Vec>, -} - -impl Actor for Worker { - type Context = Context; -} - -impl Worker { - pub(crate) fn new(conns: Connections, handlers: Vec>) -> Self { - Worker { conns, handlers } - } - - fn shutdown(&self, force: bool) { - self.handlers.iter().for_each(|h| h.shutdown(force)); - } - - fn shutdown_timeout( - &self, ctx: &mut Context, tx: oneshot::Sender, dur: time::Duration, - ) { - // sleep for 1 second and then check again - ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| { - let num = slf.conns.num_connections(); - if num == 0 { - let _ = tx.send(true); - Arbiter::current().do_send(StopArbiter(0)); - } else if let Some(d) = dur.checked_sub(time::Duration::new(1, 0)) { - slf.shutdown_timeout(ctx, tx, d); - } else { - info!("Force shutdown http worker, {} connections", num); - slf.shutdown(true); - let _ = tx.send(false); - Arbiter::current().do_send(StopArbiter(0)); - } - }); - } -} - -impl Handler> for Worker { - type Result = (); - - fn handle(&mut self, msg: Conn, _: &mut Context) { - self.handlers[msg.handler.0].handle(msg.token, msg.io, msg.peer) - } -} - -/// `StopWorker` message handler -impl Handler for Worker { - type Result = Response; - - fn handle(&mut self, msg: StopWorker, ctx: &mut Context) -> Self::Result { - let num = self.conns.num_connections(); - if num == 0 { - info!("Shutting down http worker, 0 connections"); - Response::reply(Ok(true)) - } else if let Some(dur) = msg.graceful { - self.shutdown(false); - let (tx, rx) = oneshot::channel(); - let num = self.conns.num_connections(); - if num != 0 { - info!("Graceful http worker shutdown, {} connections", num); - self.shutdown_timeout(ctx, tx, dur); - Response::reply(Ok(true)) - } else { - Response::async(rx.map_err(|_| ())) - } - } else { - info!("Force shutdown http worker, {} connections", num); - self.shutdown(true); - Response::reply(Ok(false)) - } - } -} From c9a52e3197d3d34e41732f54cb99983b8d1bd8e7 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 8 Sep 2018 09:20:18 -0700 Subject: [PATCH 124/219] refactor date generatioin --- src/server/channel.rs | 5 ++- src/server/h1.rs | 9 +++-- src/server/h1writer.rs | 5 ++- src/server/h2.rs | 12 +++---- src/server/h2writer.rs | 11 +++--- src/server/http.rs | 9 +++-- src/server/settings.rs | 80 ++++++++++++++++++++++-------------------- 7 files changed, 64 insertions(+), 67 deletions(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index d83e9a38e..6d0992bc9 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -1,5 +1,4 @@ use std::net::{Shutdown, SocketAddr}; -use std::rc::Rc; use std::{io, ptr, time}; use bytes::{Buf, BufMut, BytesMut}; @@ -15,7 +14,7 @@ const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; enum HttpProtocol { H1(h1::Http1), H2(h2::Http2), - Unknown(Rc>, Option, T, BytesMut), + Unknown(WorkerSettings, Option, T, BytesMut), } enum ProtocolKind { @@ -40,7 +39,7 @@ where H: HttpHandler + 'static, { pub(crate) fn new( - settings: Rc>, io: T, peer: Option, + settings: WorkerSettings, io: T, peer: Option, ) -> HttpChannel { let ka_timeout = settings.keep_alive_timer(); diff --git a/src/server/h1.rs b/src/server/h1.rs index afe143b4a..82ab914a5 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -1,6 +1,5 @@ use std::collections::VecDeque; use std::net::SocketAddr; -use std::rc::Rc; use std::time::{Duration, Instant}; use bytes::BytesMut; @@ -43,7 +42,7 @@ bitflags! { pub(crate) struct Http1 { flags: Flags, - settings: Rc>, + settings: WorkerSettings, addr: Option, stream: H1Writer, decoder: H1Decoder, @@ -90,7 +89,7 @@ where H: HttpHandler + 'static, { pub fn new( - settings: Rc>, stream: T, addr: Option, + settings: WorkerSettings, stream: T, addr: Option, buf: BytesMut, is_eof: bool, keepalive_timer: Option, ) -> Self { Http1 { @@ -99,7 +98,7 @@ where } else { Flags::KEEPALIVE }, - stream: H1Writer::new(stream, Rc::clone(&settings)), + stream: H1Writer::new(stream, settings.clone()), decoder: H1Decoder::new(), payload: None, tasks: VecDeque::new(), @@ -112,7 +111,7 @@ where #[inline] pub fn settings(&self) -> &WorkerSettings { - self.settings.as_ref() + &self.settings } #[inline] diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 72a68aeb0..15451659d 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -1,7 +1,6 @@ // #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] use std::io::{self, Write}; -use std::rc::Rc; use bytes::{BufMut, BytesMut}; use futures::{Async, Poll}; @@ -38,11 +37,11 @@ pub(crate) struct H1Writer { headers_size: u32, buffer: Output, buffer_capacity: usize, - settings: Rc>, + settings: WorkerSettings, } impl H1Writer { - pub fn new(stream: T, settings: Rc>) -> H1Writer { + pub fn new(stream: T, settings: WorkerSettings) -> H1Writer { H1Writer { flags: Flags::KEEPALIVE, written: 0, diff --git a/src/server/h2.rs b/src/server/h2.rs index 913e2cd70..ba52a8843 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -38,7 +38,7 @@ where H: HttpHandler + 'static, { flags: Flags, - settings: Rc>, + settings: WorkerSettings, addr: Option, state: State>, tasks: VecDeque>, @@ -58,7 +58,7 @@ where H: HttpHandler + 'static, { pub fn new( - settings: Rc>, io: T, addr: Option, buf: Bytes, + settings: WorkerSettings, io: T, addr: Option, buf: Bytes, keepalive_timer: Option, ) -> Self { let extensions = io.extensions(); @@ -83,7 +83,7 @@ where } pub fn settings(&self) -> &WorkerSettings { - self.settings.as_ref() + &self.settings } pub fn poll(&mut self) -> Poll<(), ()> { @@ -224,7 +224,7 @@ where body, resp, self.addr, - &self.settings, + self.settings.clone(), self.extensions.clone(), )); } @@ -343,7 +343,7 @@ struct Entry { impl Entry { fn new( parts: Parts, recv: RecvStream, resp: SendResponse, - addr: Option, settings: &Rc>, + addr: Option, settings: WorkerSettings, extensions: Option>, ) -> Entry where @@ -387,7 +387,7 @@ impl Entry { )) }), payload: psender, - stream: H2Writer::new(resp, Rc::clone(settings)), + stream: H2Writer::new(resp, settings), flags: EntryFlags::empty(), recv, } diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index 398e9817a..4bfc1b7c1 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -1,14 +1,12 @@ #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] +use std::{cmp, io}; + use bytes::{Bytes, BytesMut}; use futures::{Async, Poll}; use http2::server::SendResponse; use http2::{Reason, SendStream}; use modhttp::Response; -use std::rc::Rc; -use std::{cmp, io}; - -use http::{HttpTryFrom, Method, Version}; use super::helpers; use super::message::Request; @@ -20,6 +18,7 @@ use header::ContentEncoding; use http::header::{ HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, }; +use http::{HttpTryFrom, Method, Version}; use httpresponse::HttpResponse; const CHUNK_SIZE: usize = 16_384; @@ -40,12 +39,12 @@ pub(crate) struct H2Writer { written: u64, buffer: Output, buffer_capacity: usize, - settings: Rc>, + settings: WorkerSettings, } impl H2Writer { pub fn new( - respond: SendResponse, settings: Rc>, + respond: SendResponse, settings: WorkerSettings, ) -> H2Writer { H2Writer { stream: None, diff --git a/src/server/http.rs b/src/server/http.rs index 5059b1326..b55842fa3 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,5 +1,4 @@ use std::marker::PhantomData; -use std::rc::Rc; use std::sync::Arc; use std::{io, mem, net, time}; @@ -10,7 +9,7 @@ use futures::{Async, Poll, Stream}; use net2::TcpBuilder; use num_cpus; -use actix_net::{ssl, NewService, Service, Server}; +use actix_net::{ssl, NewService, Server, Service}; //#[cfg(feature = "tls")] //use native_tls::TlsAcceptor; @@ -603,7 +602,7 @@ where H: HttpHandler, Io: IoStream, { - settings: Rc>, + settings: WorkerSettings, tcp_ka: Option, _t: PhantomData, } @@ -621,7 +620,7 @@ where } else { None }; - let settings = WorkerSettings::create(apps, keep_alive, settings); + let settings = WorkerSettings::new(apps, keep_alive, settings); HttpServiceHandler { tcp_ka, @@ -647,7 +646,7 @@ where fn call(&mut self, mut req: Self::Request) -> Self::Future { let _ = req.set_nodelay(true); - HttpChannel::new(Rc::clone(&self.settings), req, None) + HttpChannel::new(self.settings.clone(), req, None) } // fn shutdown(&self, force: bool) { diff --git a/src/server/settings.rs b/src/server/settings.rs index 2ca0b9b95..439d0e755 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -2,22 +2,21 @@ use std::cell::{RefCell, RefMut, UnsafeCell}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; -use std::time::{Duration, Instant}; +use std::time::Duration; use std::{env, fmt, net}; -use actix::Arbiter; use bytes::BytesMut; -use futures::Stream; +use futures::{future, Future}; use futures_cpupool::CpuPool; use http::StatusCode; use lazycell::LazyCell; use parking_lot::Mutex; use time; -use tokio_timer::{Delay, Interval}; +use tokio_timer::{sleep, Delay, Interval}; +use tokio_current_thread::spawn; use super::channel::Node; use super::message::{Request, RequestPool}; -// use super::server::{ConnectionRateTag, ConnectionTag, Connections}; use super::KeepAlive; use body::Body; use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool}; @@ -134,34 +133,21 @@ impl ServerSettings { // "Sun, 06 Nov 1994 08:49:37 GMT".len() const DATE_VALUE_LENGTH: usize = 29; -pub(crate) struct WorkerSettings { +pub(crate) struct WorkerSettings(Rc>); + +struct Inner { h: Vec, keep_alive: u64, ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, node: RefCell>, - date: UnsafeCell, + date: UnsafeCell<(bool, Date)>, } -impl WorkerSettings { - pub(crate) fn create( - apps: Vec, keep_alive: KeepAlive, settings: ServerSettings, - ) -> Rc> { - let settings = Rc::new(Self::new(apps, keep_alive, settings)); - - // periodic date update - let s = settings.clone(); - Arbiter::spawn( - Interval::new(Instant::now(), Duration::from_secs(1)) - .map_err(|_| ()) - .and_then(move |_| { - s.update_date(); - Ok(()) - }).fold((), |(), _| Ok(())), - ); - - settings +impl Clone for WorkerSettings { + fn clone(&self) -> Self { + WorkerSettings(self.0.clone()) } } @@ -175,23 +161,23 @@ impl WorkerSettings { KeepAlive::Disabled => (0, false), }; - WorkerSettings { + WorkerSettings(Rc::new(Inner { h, + keep_alive, + ka_enabled, bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), node: RefCell::new(Node::head()), - date: UnsafeCell::new(Date::new()), - keep_alive, - ka_enabled, - } + date: UnsafeCell::new((false, Date::new())), + })) } pub fn head(&self) -> RefMut> { - self.node.borrow_mut() + self.0.node.borrow_mut() } pub fn handlers(&self) -> &Vec { - &self.h + &self.0.h } pub fn keep_alive_timer(&self) -> Option { @@ -205,33 +191,49 @@ impl WorkerSettings { } pub fn keep_alive(&self) -> u64 { - self.keep_alive + self.0.keep_alive } pub fn keep_alive_enabled(&self) -> bool { - self.ka_enabled + self.0.ka_enabled } pub fn get_bytes(&self) -> BytesMut { - self.bytes.get_bytes() + self.0.bytes.get_bytes() } pub fn release_bytes(&self, bytes: BytesMut) { - self.bytes.release_bytes(bytes) + self.0.bytes.release_bytes(bytes) } pub fn get_request(&self) -> Request { - RequestPool::get(self.messages) + RequestPool::get(self.0.messages) } fn update_date(&self) { // Unsafe: WorkerSetting is !Sync and !Send - unsafe { &mut *self.date.get() }.update(); + unsafe { (&mut *self.0.date.get()).0 = false }; } +} +impl WorkerSettings { pub fn set_date(&self, dst: &mut BytesMut, full: bool) { // Unsafe: WorkerSetting is !Sync and !Send - let date_bytes = unsafe { &(*self.date.get()).bytes }; + let date_bytes = unsafe { + let date = &mut (*self.0.date.get()); + if !date.0 { + date.1.update(); + date.0 = true; + + // periodic date update + let s = self.clone(); + spawn(sleep(Duration::from_secs(1)).then(move |_| { + s.update_date(); + future::ok(()) + })); + } + &date.1.bytes + }; if full { let mut buf: [u8; 39] = [0; 39]; buf[..6].copy_from_slice(b"date: "); From 7cf9af9b555e9360eab4c5dee4be5965d9e4e6c2 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 8 Sep 2018 09:21:24 -0700 Subject: [PATCH 125/219] disable ssl for travis --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index f03c95238..494a6a300 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,12 +32,12 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then cargo clean - cargo test --features="alpn,tls,rust-tls" -- --nocapture + cargo test --features="" -- --nocapture fi - | if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="alpn,tls,rust-tls" --out Xml --no-count + cargo tarpaulin --features="" --out Xml --no-count bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi @@ -46,7 +46,7 @@ script: after_success: - | if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then - cargo doc --features "alpn, tls, rust-tls, session" --no-deps && + cargo doc --features "session" --no-deps && echo "" > target/doc/index.html && git clone https://github.com/davisp/ghp-import.git && ./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc && From 6a61138bf80205342feb4140dfcb574a1e1cdf04 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 8 Sep 2018 14:55:39 -0700 Subject: [PATCH 126/219] enable ssl feature --- .travis.yml | 6 +- Cargo.toml | 3 + src/lib.rs | 4 +- src/server/h1.rs | 21 ++-- src/server/http.rs | 259 +++++++++++++++++++++----------------- src/server/mod.rs | 40 +----- src/server/settings.rs | 28 +++-- src/server/ssl/mod.rs | 22 ++-- src/server/ssl/openssl.rs | 91 ++++---------- tests/test_server.rs | 3 +- 10 files changed, 224 insertions(+), 253 deletions(-) diff --git a/.travis.yml b/.travis.yml index 494a6a300..e2d70678e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,12 +32,12 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then cargo clean - cargo test --features="" -- --nocapture + cargo test --features="ssl" -- --nocapture fi - | if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="" --out Xml --no-count + cargo tarpaulin --features="ssl" --out Xml --no-count bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi @@ -46,7 +46,7 @@ script: after_success: - | if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then - cargo doc --features "session" --no-deps && + cargo doc --features "ssl,session" --no-deps && echo "" > target/doc/index.html && git clone https://github.com/davisp/ghp-import.git && ./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc && diff --git a/Cargo.toml b/Cargo.toml index d4ea4fc1e..536806316 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,9 @@ default = ["session", "brotli", "flate2-c"] tls = ["native-tls", "tokio-tls"] # openssl +ssl = ["openssl", "tokio-openssl", "actix-net/ssl"] + +# deprecated, use "ssl" alpn = ["openssl", "tokio-openssl", "actix-net/ssl"] # rustls diff --git a/src/lib.rs b/src/lib.rs index 1dfe143ef..099b0b16c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -64,8 +64,8 @@ //! ## Package feature //! //! * `tls` - enables ssl support via `native-tls` crate -//! * `alpn` - enables ssl support via `openssl` crate, require for `http/2` -//! support +//! * `ssl` - enables ssl support via `openssl` crate, supports `http/2` +//! * `rust-tls` - enables ssl support via `rustls` crate, supports `http/2` //! * `uds` - enables support for making client requests via Unix Domain Sockets. //! Unix only. Not necessary for *serving* requests. //! * `session` - enables session support, includes `ring` crate as diff --git a/src/server/h1.rs b/src/server/h1.rs index 82ab914a5..1d2ddbe2d 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -517,15 +517,14 @@ mod tests { use httpmessage::HttpMessage; use server::h1decoder::Message; use server::settings::{ServerSettings, WorkerSettings}; - use server::{Connections, KeepAlive, Request}; + use server::{KeepAlive, Request}; - fn wrk_settings() -> Rc> { - Rc::new(WorkerSettings::::new( + fn wrk_settings() -> WorkerSettings { + WorkerSettings::::new( Vec::new(), KeepAlive::Os, ServerSettings::default(), - Connections::default(), - )) + ) } impl Message { @@ -644,9 +643,9 @@ mod tests { fn test_req_parse1() { let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n"); let readbuf = BytesMut::new(); - let settings = Rc::new(wrk_settings()); + let settings = wrk_settings(); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false, None); + let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); h1.poll_io(); h1.poll_io(); assert_eq!(h1.tasks.len(), 1); @@ -657,9 +656,9 @@ mod tests { let buf = Buffer::new(""); let readbuf = BytesMut::from(Vec::::from(&b"GET /test HTTP/1.1\r\n\r\n"[..])); - let settings = Rc::new(wrk_settings()); + let settings = wrk_settings(); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true, None); + let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, true, None); h1.poll_io(); assert_eq!(h1.tasks.len(), 1); } @@ -668,9 +667,9 @@ mod tests { fn test_req_parse_err() { let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); let readbuf = BytesMut::new(); - let settings = Rc::new(wrk_settings()); + let settings = wrk_settings(); - let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false, None); + let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); h1.poll_io(); h1.poll_io(); assert!(h1.flags.contains(Flags::ERROR)); diff --git a/src/server/http.rs b/src/server/http.rs index b55842fa3..725cfbac0 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -2,19 +2,19 @@ use std::marker::PhantomData; use std::sync::Arc; use std::{io, mem, net, time}; -use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; +use actix::{Actor, Addr, AsyncContext, Context, Handler, System}; +use actix_net::{ssl, NewService, NewServiceExt, Server, Service}; use futures::future::{ok, FutureResult}; use futures::{Async, Poll, Stream}; use net2::TcpBuilder; use num_cpus; - -use actix_net::{ssl, NewService, Server, Service}; +use tokio_tcp::TcpStream; //#[cfg(feature = "tls")] //use native_tls::TlsAcceptor; -#[cfg(feature = "alpn")] +#[cfg(any(feature = "alpn", feature = "ssl"))] use openssl::ssl::SslAcceptorBuilder; //#[cfg(feature = "rust-tls")] @@ -25,9 +25,10 @@ use super::settings::{ServerSettings, WorkerSettings}; use super::{HttpHandler, IntoHttpHandler, IoStream, KeepAlive}; struct Socket { + scheme: &'static str, lst: net::TcpListener, addr: net::SocketAddr, - handler: Box>, + handler: Box>, } /// An HTTP Server @@ -194,10 +195,7 @@ where /// and the user should be presented with an enumeration of which /// socket requires which protocol. pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { - self.sockets - .iter() - .map(|s| (s.addr, s.handler.scheme())) - .collect() + self.sockets.iter().map(|s| (s.addr, s.scheme)).collect() } /// Use listener for accepting incoming connection requests @@ -209,7 +207,8 @@ where self.sockets.push(Socket { lst, addr, - handler: Box::new(SimpleHandler { + scheme: "http", + handler: Box::new(SimpleFactory { addr, factory: self.factory.clone(), }), @@ -218,22 +217,28 @@ where self } - // #[doc(hidden)] - // /// Use listener for accepting incoming connection requests - // pub fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self - // where - // A: AcceptorService + Send + 'static, - // { - // let token = Token(self.handlers.len()); - // let addr = lst.local_addr().unwrap(); - // self.handlers.push(Box::new(StreamHandler::new( - // lst.local_addr().unwrap(), - // acceptor, - // ))); - // self.sockets.push(Socket { lst, addr, token }); + #[doc(hidden)] + /// Use listener for accepting incoming connection requests + pub(crate) fn listen_with(mut self, lst: net::TcpListener, acceptor: F) -> Self + where + F: Fn() -> T + Send + Clone + 'static, + T: NewService + Clone + 'static, + T::Response: IoStream, + { + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + lst, + addr, + scheme: "https", + handler: Box::new(AcceptorFactory { + addr, + acceptor, + factory: self.factory.clone(), + }), + }); - // self - // } + self + } // #[cfg(feature = "tls")] // /// Use listener for accepting incoming tls connection requests @@ -246,24 +251,27 @@ where // self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) // } - // #[cfg(feature = "alpn")] - // /// Use listener for accepting incoming tls connection requests - // /// - // /// This method sets alpn protocols to "h2" and "http/1.1" - // pub fn listen_ssl( - // self, lst: net::TcpListener, builder: SslAcceptorBuilder, - // ) -> io::Result { - // use super::{OpensslAcceptor, ServerFlags}; + #[cfg(any(feature = "alpn", feature = "ssl"))] + /// Use listener for accepting incoming tls connection requests + /// + /// This method sets alpn protocols to "h2" and "http/1.1" + pub fn listen_ssl( + self, lst: net::TcpListener, builder: SslAcceptorBuilder, + ) -> io::Result { + use super::{openssl_acceptor_with_flags, ServerFlags}; - // alpn support - // let flags = if self.no_http2 { - // ServerFlags::HTTP1 - // } else { - // ServerFlags::HTTP1 | ServerFlags::HTTP2 - // }; + let flags = if self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; - // Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?)) - // } + let acceptor = openssl_acceptor_with_flags(builder, flags)?; + + Ok(self.listen_with(lst, move || { + ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()) + })) + } // #[cfg(feature = "rust-tls")] // /// Use listener for accepting incoming tls connection requests @@ -400,60 +408,6 @@ where // } } -struct HttpService -where - H: HttpHandler, - F: IntoHttpHandler, - Io: IoStream, -{ - factory: Arc Vec + Send + Sync>, - addr: net::SocketAddr, - host: Option, - keep_alive: KeepAlive, - _t: PhantomData<(H, Io)>, -} - -impl NewService for HttpService -where - H: HttpHandler, - F: IntoHttpHandler, - Io: IoStream, -{ - type Request = Io; - type Response = (); - type Error = (); - type InitError = (); - type Service = HttpServiceHandler; - type Future = FutureResult; - - fn new_service(&self) -> Self::Future { - let s = ServerSettings::new(Some(self.addr), &self.host, false); - let apps: Vec<_> = (*self.factory)() - .into_iter() - .map(|h| h.into_handler()) - .collect(); - - ok(HttpServiceHandler::new(apps, self.keep_alive, s)) - } -} - -impl Clone for HttpService -where - H: HttpHandler, - F: IntoHttpHandler, - Io: IoStream, -{ - fn clone(&self) -> HttpService { - HttpService { - addr: self.addr, - factory: self.factory.clone(), - host: self.host.clone(), - keep_alive: self.keep_alive, - _t: PhantomData, - } - } -} - impl HttpServer { /// Start listening for incoming connections. /// @@ -500,8 +454,9 @@ impl HttpServer { for socket in sockets { let Socket { lst, - addr: _, handler, + addr: _, + scheme: _, } = socket; srv = handler.register(srv, lst, self.host.clone(), self.keep_alive); } @@ -597,6 +552,43 @@ impl HttpServer { // } // } +struct HttpService +where + H: HttpHandler, + F: IntoHttpHandler, + Io: IoStream, +{ + factory: Arc Vec + Send + Sync>, + addr: net::SocketAddr, + host: Option, + keep_alive: KeepAlive, + _t: PhantomData<(H, Io)>, +} + +impl NewService for HttpService +where + H: HttpHandler, + F: IntoHttpHandler, + Io: IoStream, +{ + type Request = Io; + type Response = (); + type Error = (); + type InitError = (); + type Service = HttpServiceHandler; + type Future = FutureResult; + + fn new_service(&self) -> Self::Future { + let s = ServerSettings::new(Some(self.addr), &self.host, false); + let apps: Vec<_> = (*self.factory)() + .into_iter() + .map(|h| h.into_handler()) + .collect(); + + ok(HttpServiceHandler::new(apps, self.keep_alive, s)) + } +} + struct HttpServiceHandler where H: HttpHandler, @@ -656,21 +648,17 @@ where // } } -trait IoStreamHandler: Send +trait ServiceFactory where H: IntoHttpHandler, { - fn addr(&self) -> net::SocketAddr; - - fn scheme(&self) -> &'static str; - fn register( &self, server: Server, lst: net::TcpListener, host: Option, keep_alive: KeepAlive, ) -> Server; } -struct SimpleHandler +struct SimpleFactory where H: IntoHttpHandler, { @@ -678,27 +666,19 @@ where pub factory: Arc Vec + Send + Sync>, } -impl Clone for SimpleHandler { +impl Clone for SimpleFactory { fn clone(&self) -> Self { - SimpleHandler { + SimpleFactory { addr: self.addr, factory: self.factory.clone(), } } } -impl IoStreamHandler for SimpleHandler +impl ServiceFactory for SimpleFactory where H: IntoHttpHandler + 'static, { - fn addr(&self) -> net::SocketAddr { - self.addr - } - - fn scheme(&self) -> &'static str { - "http" - } - fn register( &self, server: Server, lst: net::TcpListener, host: Option, keep_alive: KeepAlive, @@ -716,6 +696,59 @@ where } } +struct AcceptorFactory +where + F: Fn() -> T + Send + Clone + 'static, + T: NewService, + H: IntoHttpHandler, +{ + pub addr: net::SocketAddr, + pub acceptor: F, + pub factory: Arc Vec + Send + Sync>, +} + +impl Clone for AcceptorFactory +where + F: Fn() -> T + Send + Clone + 'static, + T: NewService, + H: IntoHttpHandler, +{ + fn clone(&self) -> Self { + AcceptorFactory { + addr: self.addr, + acceptor: self.acceptor.clone(), + factory: self.factory.clone(), + } + } +} + +impl ServiceFactory for AcceptorFactory +where + F: Fn() -> T + Send + Clone + 'static, + H: IntoHttpHandler + 'static, + T: NewService + Clone + 'static, + T::Response: IoStream, +{ + fn register( + &self, server: Server, lst: net::TcpListener, host: Option, + keep_alive: KeepAlive, + ) -> Server { + let addr = self.addr; + let factory = self.factory.clone(); + let acceptor = self.acceptor.clone(); + + server.listen(lst, move || { + (acceptor)().and_then(HttpService { + keep_alive, + addr, + host: host.clone(), + factory: factory.clone(), + _t: PhantomData, + }) + }) + } +} + fn create_tcp_listener( addr: net::SocketAddr, backlog: i32, ) -> io::Result { diff --git a/src/server/mod.rs b/src/server/mod.rs index 25eca3a71..111cc87a4 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -115,6 +115,8 @@ use futures::{Async, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_tcp::TcpStream; +pub use actix_net::{PauseServer, ResumeServer, StopServer}; + mod channel; mod error; pub(crate) mod h1; @@ -128,9 +130,9 @@ pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; pub(crate) mod settings; -mod ssl; -use actix::Message; +mod ssl; +pub use self::ssl::*; pub use self::http::HttpServer; pub use self::message::Request; @@ -221,40 +223,6 @@ impl From> for KeepAlive { } } -/// Pause accepting incoming connections -/// -/// If socket contains some pending connection, they might be dropped. -/// All opened connection remains active. -#[derive(Message)] -pub struct PauseServer; - -/// Resume accepting incoming connections -#[derive(Message)] -pub struct ResumeServer; - -/// Stop incoming connection processing, stop all workers and exit. -/// -/// If server starts with `spawn()` method, then spawned thread get terminated. -pub struct StopServer { - /// Whether to try and shut down gracefully - pub graceful: bool, -} - -impl Message for StopServer { - type Result = Result<(), ()>; -} - -/// Socket id token -#[doc(hidden)] -#[derive(Clone, Copy)] -pub struct Token(usize); - -impl Token { - pub(crate) fn new(val: usize) -> Token { - Token(val) - } -} - /// Low level http request handler #[allow(unused_variables)] pub trait HttpHandler: 'static { diff --git a/src/server/settings.rs b/src/server/settings.rs index 439d0e755..47da515a0 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -303,6 +303,8 @@ impl SharedBytesPool { #[cfg(test)] mod tests { use super::*; + use futures::future; + use tokio::runtime::current_thread; #[test] fn test_date_len() { @@ -311,16 +313,20 @@ mod tests { #[test] fn test_date() { - let settings = WorkerSettings::<()>::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - Connections::default(), - ); - let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); - settings.set_date(&mut buf1, true); - let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); - settings.set_date(&mut buf2, true); - assert_eq!(buf1, buf2); + let mut rt = current_thread::Runtime::new().unwrap(); + + let _ = rt.block_on(future::lazy(|| { + let settings = WorkerSettings::<()>::new( + Vec::new(), + KeepAlive::Os, + ServerSettings::default(), + ); + let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); + settings.set_date(&mut buf1, true); + let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); + settings.set_date(&mut buf2, true); + assert_eq!(buf1, buf2); + future::ok::<_, ()>(()) + })); } } diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs index bd931fb82..7101de78a 100644 --- a/src/server/ssl/mod.rs +++ b/src/server/ssl/mod.rs @@ -1,14 +1,14 @@ -#[cfg(feature = "alpn")] +#[cfg(any(feature = "alpn", feature = "ssl"))] mod openssl; -#[cfg(feature = "alpn")] -pub use self::openssl::OpensslAcceptor; +#[cfg(any(feature = "alpn", feature = "ssl"))] +pub use self::openssl::*; -#[cfg(feature = "tls")] -mod nativetls; -#[cfg(feature = "tls")] -pub use self::nativetls::{NativeTlsAcceptor, TlsStream}; +//#[cfg(feature = "tls")] +//mod nativetls; +//#[cfg(feature = "tls")] +//pub use self::nativetls::{NativeTlsAcceptor, TlsStream}; -#[cfg(feature = "rust-tls")] -mod rustls; -#[cfg(feature = "rust-tls")] -pub use self::rustls::RustlsAcceptor; +//#[cfg(feature = "rust-tls")] +//mod rustls; +//#[cfg(feature = "rust-tls")] +//pub use self::rustls::RustlsAcceptor; diff --git a/src/server/ssl/openssl.rs b/src/server/ssl/openssl.rs index 996c510dc..343155233 100644 --- a/src/server/ssl/openssl.rs +++ b/src/server/ssl/openssl.rs @@ -1,80 +1,41 @@ use std::net::Shutdown; use std::{io, time}; -use futures::{Future, Poll}; use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder}; -use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream}; +use tokio_openssl::SslStream; -use server::{AcceptorService, IoStream, ServerFlags}; +use server::{IoStream, ServerFlags}; -#[derive(Clone)] -/// Support `SSL` connections via openssl package -/// -/// `alpn` feature enables `OpensslAcceptor` type -pub struct OpensslAcceptor { - acceptor: SslAcceptor, +/// Configure `SslAcceptorBuilder` with enabled `HTTP/2` and `HTTP1.1` support. +pub fn openssl_acceptor(builder: SslAcceptorBuilder) -> io::Result { + openssl_acceptor_with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2) } -impl OpensslAcceptor { - /// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. - pub fn new(builder: SslAcceptorBuilder) -> io::Result { - OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2) +/// Configure `SslAcceptorBuilder` with custom server flags. +pub fn openssl_acceptor_with_flags( + mut builder: SslAcceptorBuilder, flags: ServerFlags, +) -> io::Result { + let mut protos = Vec::new(); + if flags.contains(ServerFlags::HTTP1) { + protos.extend(b"\x08http/1.1"); + } + if flags.contains(ServerFlags::HTTP2) { + protos.extend(b"\x02h2"); + builder.set_alpn_select_callback(|_, protos| { + const H2: &[u8] = b"\x02h2"; + if protos.windows(3).any(|window| window == H2) { + Ok(b"h2") + } else { + Err(AlpnError::NOACK) + } + }); } - /// Create `OpensslAcceptor` with custom server flags. - pub fn with_flags( - mut builder: SslAcceptorBuilder, flags: ServerFlags, - ) -> io::Result { - let mut protos = Vec::new(); - if flags.contains(ServerFlags::HTTP1) { - protos.extend(b"\x08http/1.1"); - } - if flags.contains(ServerFlags::HTTP2) { - protos.extend(b"\x02h2"); - builder.set_alpn_select_callback(|_, protos| { - const H2: &[u8] = b"\x02h2"; - if protos.windows(3).any(|window| window == H2) { - Ok(b"h2") - } else { - Err(AlpnError::NOACK) - } - }); - } - - if !protos.is_empty() { - builder.set_alpn_protos(&protos)?; - } - - Ok(OpensslAcceptor { - acceptor: builder.build(), - }) - } -} - -pub struct AcceptorFut(AcceptAsync); - -impl Future for AcceptorFut { - type Item = SslStream; - type Error = io::Error; - - fn poll(&mut self) -> Poll { - self.0 - .poll() - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - } -} - -impl AcceptorService for OpensslAcceptor { - type Accepted = SslStream; - type Future = AcceptorFut; - - fn scheme(&self) -> &'static str { - "https" + if !protos.is_empty() { + builder.set_alpn_protos(&protos)?; } - fn accept(&self, io: Io) -> Self::Future { - AcceptorFut(SslAcceptorExt::accept_async(&self.acceptor, io)) - } + Ok(builder.build()) } impl IoStream for SslStream { diff --git a/tests/test_server.rs b/tests/test_server.rs index 52c47dd27..30ee13fb3 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -30,6 +30,7 @@ use modhttp::Request; use rand::distributions::Alphanumeric; use rand::Rng; use tokio::runtime::current_thread::Runtime; +use tokio_current_thread::spawn; use tokio_tcp::TcpStream; use actix_web::*; @@ -904,7 +905,7 @@ fn test_h2() { let (response, _) = client.send_request(request, false).unwrap(); // Spawn a task to run the conn... - current_thread::spawn(h2.map_err(|e| println!("GOT ERR={:?}", e))); + spawn(h2.map_err(|e| println!("GOT ERR={:?}", e))); response.and_then(|response| { assert_eq!(response.status(), http::StatusCode::OK); From a3cfc242328c4e501c22728f73db8f94c27cc413 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 9 Sep 2018 10:51:30 -0700 Subject: [PATCH 127/219] refactor acceptor service --- src/server/http.rs | 382 +++++++++++++++++++++++++++++++++------------ src/server/mod.rs | 8 +- src/test.rs | 40 ++--- 3 files changed, 307 insertions(+), 123 deletions(-) diff --git a/src/server/http.rs b/src/server/http.rs index 725cfbac0..41161ed3f 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,9 +1,9 @@ use std::marker::PhantomData; -use std::sync::Arc; use std::{io, mem, net, time}; use actix::{Actor, Addr, AsyncContext, Context, Handler, System}; -use actix_net::{ssl, NewService, NewServiceExt, Server, Service}; +use actix_net::server::{Server, ServerServiceFactory}; +use actix_net::{ssl, NewService, NewServiceExt, Service}; use futures::future::{ok, FutureResult}; use futures::{Async, Poll, Stream}; @@ -36,11 +36,12 @@ struct Socket { /// By default it serves HTTP2 when HTTPs is enabled, /// in order to change it, use `ServerFlags` that can be provided /// to acceptor service. -pub struct HttpServer +pub struct HttpServer where H: IntoHttpHandler + 'static, + F: Fn() -> Vec + Send + Clone, { - factory: Arc Vec + Send + Sync>, + factory: F, host: Option, keep_alive: KeepAlive, backlog: i32, @@ -54,21 +55,39 @@ where sockets: Vec>, } -impl HttpServer +impl HttpServer where H: IntoHttpHandler + 'static, + F: Fn() -> Vec + Send + Clone + 'static, { /// Create new http server with application factory - pub fn new(factory: F) -> Self + pub fn new(factory: F1) -> HttpServer Vec + Send + Clone> where - F: Fn() -> U + Sync + Send + 'static, + F1: Fn() -> U + Send + Clone, U: IntoIterator + 'static, { - let f = move || (factory)().into_iter().collect(); + let f = move || (factory.clone())().into_iter().collect(); HttpServer { threads: num_cpus::get(), - factory: Arc::new(f), + factory: f, + host: None, + backlog: 2048, + keep_alive: KeepAlive::Os, + shutdown_timeout: 30, + exit: false, + no_http2: false, + no_signals: false, + maxconn: 25_600, + maxconnrate: 256, + sockets: Vec::new(), + } + } + + pub(crate) fn with_factory(factory: F) -> HttpServer { + HttpServer { + factory, + threads: num_cpus::get(), host: None, backlog: 2048, keep_alive: KeepAlive::Timeout(5), @@ -211,6 +230,13 @@ where handler: Box::new(SimpleFactory { addr, factory: self.factory.clone(), + pipeline: DefaultPipelineFactory { + addr, + factory: self.factory.clone(), + host: self.host.clone(), + keep_alive: self.keep_alive, + _t: PhantomData, + }, }), }); @@ -219,22 +245,30 @@ where #[doc(hidden)] /// Use listener for accepting incoming connection requests - pub(crate) fn listen_with(mut self, lst: net::TcpListener, acceptor: F) -> Self + pub(crate) fn listen_with( + mut self, lst: net::TcpListener, acceptor: A, + ) -> Self where - F: Fn() -> T + Send + Clone + 'static, - T: NewService + Clone + 'static, - T::Response: IoStream, + A: AcceptorServiceFactory, + T: NewService + + Clone + + 'static, + Io: IoStream + Send, { let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { lst, addr, scheme: "https", - handler: Box::new(AcceptorFactory { - addr, + handler: Box::new(HttpServiceBuilder::new( acceptor, - factory: self.factory.clone(), - }), + DefaultPipelineFactory::new( + self.factory.clone(), + self.host.clone(), + addr, + self.keep_alive, + ), + )), }); self @@ -256,7 +290,7 @@ where /// /// This method sets alpn protocols to "h2" and "http/1.1" pub fn listen_ssl( - self, lst: net::TcpListener, builder: SslAcceptorBuilder, + mut self, lst: net::TcpListener, builder: SslAcceptorBuilder, ) -> io::Result { use super::{openssl_acceptor_with_flags, ServerFlags}; @@ -268,9 +302,23 @@ where let acceptor = openssl_acceptor_with_flags(builder, flags)?; - Ok(self.listen_with(lst, move || { - ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()) - })) + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + lst, + addr, + scheme: "https", + handler: Box::new(HttpServiceBuilder::new( + move || ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()), + DefaultPipelineFactory::new( + self.factory.clone(), + self.host.clone(), + addr, + self.keep_alive, + ), + )), + }); + + Ok(self) } // #[cfg(feature = "rust-tls")] @@ -408,7 +456,7 @@ where // } } -impl HttpServer { +impl Vec + Send + Clone> HttpServer { /// Start listening for incoming connections. /// /// This method starts number of http workers in separate threads. @@ -552,35 +600,35 @@ impl HttpServer { // } // } -struct HttpService +struct HttpService where - H: HttpHandler, - F: IntoHttpHandler, + F: Fn() -> Vec, + H: IntoHttpHandler, Io: IoStream, { - factory: Arc Vec + Send + Sync>, + factory: F, addr: net::SocketAddr, host: Option, keep_alive: KeepAlive, - _t: PhantomData<(H, Io)>, + _t: PhantomData, } -impl NewService for HttpService +impl NewService for HttpService where - H: HttpHandler, - F: IntoHttpHandler, + F: Fn() -> Vec, + H: IntoHttpHandler, Io: IoStream, { type Request = Io; type Response = (); type Error = (); type InitError = (); - type Service = HttpServiceHandler; + type Service = HttpServiceHandler; type Future = FutureResult; fn new_service(&self) -> Self::Future { let s = ServerSettings::new(Some(self.addr), &self.host, false); - let apps: Vec<_> = (*self.factory)() + let apps: Vec<_> = (self.factory)() .into_iter() .map(|h| h.into_handler()) .collect(); @@ -658,94 +706,43 @@ where ) -> Server; } -struct SimpleFactory +struct SimpleFactory where H: IntoHttpHandler, + F: Fn() -> Vec + Send + Clone, + P: HttpPipelineFactory, { pub addr: net::SocketAddr, - pub factory: Arc Vec + Send + Sync>, + pub factory: F, + pub pipeline: P, } -impl Clone for SimpleFactory { +impl Clone for SimpleFactory +where + P: HttpPipelineFactory, + F: Fn() -> Vec + Send + Clone, +{ fn clone(&self) -> Self { SimpleFactory { addr: self.addr, factory: self.factory.clone(), + pipeline: self.pipeline.clone(), } } } -impl ServiceFactory for SimpleFactory +impl ServiceFactory for SimpleFactory where H: IntoHttpHandler + 'static, + F: Fn() -> Vec + Send + Clone + 'static, + P: HttpPipelineFactory, { fn register( - &self, server: Server, lst: net::TcpListener, host: Option, - keep_alive: KeepAlive, + &self, server: Server, lst: net::TcpListener, _host: Option, + _keep_alive: KeepAlive, ) -> Server { - let addr = self.addr; - let factory = self.factory.clone(); - - server.listen(lst, move || HttpService { - keep_alive, - addr, - host: host.clone(), - factory: factory.clone(), - _t: PhantomData, - }) - } -} - -struct AcceptorFactory -where - F: Fn() -> T + Send + Clone + 'static, - T: NewService, - H: IntoHttpHandler, -{ - pub addr: net::SocketAddr, - pub acceptor: F, - pub factory: Arc Vec + Send + Sync>, -} - -impl Clone for AcceptorFactory -where - F: Fn() -> T + Send + Clone + 'static, - T: NewService, - H: IntoHttpHandler, -{ - fn clone(&self) -> Self { - AcceptorFactory { - addr: self.addr, - acceptor: self.acceptor.clone(), - factory: self.factory.clone(), - } - } -} - -impl ServiceFactory for AcceptorFactory -where - F: Fn() -> T + Send + Clone + 'static, - H: IntoHttpHandler + 'static, - T: NewService + Clone + 'static, - T::Response: IoStream, -{ - fn register( - &self, server: Server, lst: net::TcpListener, host: Option, - keep_alive: KeepAlive, - ) -> Server { - let addr = self.addr; - let factory = self.factory.clone(); - let acceptor = self.acceptor.clone(); - - server.listen(lst, move || { - (acceptor)().and_then(HttpService { - keep_alive, - addr, - host: host.clone(), - factory: factory.clone(), - _t: PhantomData, - }) - }) + let pipeline = self.pipeline.clone(); + server.listen(lst, move || pipeline.create()) } } @@ -760,3 +757,186 @@ fn create_tcp_listener( builder.bind(addr)?; Ok(builder.listen(backlog)?) } + +pub struct HttpServiceBuilder { + acceptor: A, + pipeline: P, + t: PhantomData, +} + +impl HttpServiceBuilder +where + A: AcceptorServiceFactory, + P: HttpPipelineFactory, + H: IntoHttpHandler, +{ + pub fn new(acceptor: A, pipeline: P) -> Self { + Self { + acceptor, + pipeline, + t: PhantomData, + } + } + + pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder + where + A1: AcceptorServiceFactory, + { + HttpServiceBuilder { + acceptor, + pipeline: self.pipeline, + t: PhantomData, + } + } + + pub fn pipeline(self, pipeline: P1) -> HttpServiceBuilder + where + P1: HttpPipelineFactory, + { + HttpServiceBuilder { + pipeline, + acceptor: self.acceptor, + t: PhantomData, + } + } + + fn finish(&self) -> impl ServerServiceFactory { + let acceptor = self.acceptor.clone(); + let pipeline = self.pipeline.clone(); + + move || acceptor.create().and_then(pipeline.create()) + } +} + +impl ServiceFactory for HttpServiceBuilder +where + A: AcceptorServiceFactory, + P: HttpPipelineFactory, + H: IntoHttpHandler, +{ + fn register( + &self, server: Server, lst: net::TcpListener, _host: Option, + _keep_alive: KeepAlive, + ) -> Server { + server.listen(lst, self.finish()) + } +} + +pub trait AcceptorServiceFactory: Send + Clone + 'static { + type Io: IoStream + Send; + type NewService: NewService< + Request = TcpStream, + Response = Self::Io, + Error = (), + InitError = (), + >; + + fn create(&self) -> Self::NewService; +} + +impl AcceptorServiceFactory for F +where + F: Fn() -> T + Send + Clone + 'static, + T::Response: IoStream + Send, + T: NewService, +{ + type Io = T::Response; + type NewService = T; + + fn create(&self) -> T { + (self)() + } +} + +pub trait HttpPipelineFactory: Send + Clone + 'static { + type Io: IoStream; + type NewService: NewService< + Request = Self::Io, + Response = (), + Error = (), + InitError = (), + >; + + fn create(&self) -> Self::NewService; +} + +impl HttpPipelineFactory for F +where + F: Fn() -> T + Send + Clone + 'static, + T: NewService, + T::Request: IoStream, +{ + type Io = T::Request; + type NewService = T; + + fn create(&self) -> T { + (self)() + } +} + +struct DefaultPipelineFactory +where + F: Fn() -> Vec + Send + Clone, +{ + factory: F, + host: Option, + addr: net::SocketAddr, + keep_alive: KeepAlive, + _t: PhantomData, +} + +impl DefaultPipelineFactory +where + Io: IoStream + Send, + F: Fn() -> Vec + Send + Clone + 'static, + H: IntoHttpHandler + 'static, +{ + fn new( + factory: F, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, + ) -> Self { + Self { + factory, + addr, + keep_alive, + host, + _t: PhantomData, + } + } +} + +impl Clone for DefaultPipelineFactory +where + Io: IoStream, + F: Fn() -> Vec + Send + Clone, + H: IntoHttpHandler, +{ + fn clone(&self) -> Self { + Self { + factory: self.factory.clone(), + addr: self.addr, + keep_alive: self.keep_alive, + host: self.host.clone(), + _t: PhantomData, + } + } +} + +impl HttpPipelineFactory for DefaultPipelineFactory +where + Io: IoStream + Send, + F: Fn() -> Vec + Send + Clone + 'static, + H: IntoHttpHandler + 'static, +{ + type Io = Io; + type NewService = HttpService; + + fn create(&self) -> Self::NewService { + HttpService { + addr: self.addr, + keep_alive: self.keep_alive, + host: self.host.clone(), + factory: self.factory.clone(), + _t: PhantomData, + } + } +} diff --git a/src/server/mod.rs b/src/server/mod.rs index 111cc87a4..6ba033762 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -174,13 +174,13 @@ const HW_BUFFER_SIZE: usize = 32_768; /// sys.run(); /// } /// ``` -pub fn new(factory: F) -> HttpServer +pub fn new(factory: F) -> HttpServer Vec + Send + Clone> where - F: Fn() -> U + Sync + Send + 'static, - U: IntoIterator + 'static, + F: Fn() -> U + Send + Clone + 'static, + U: IntoIterator, H: IntoHttpHandler + 'static, { - HttpServer::new(factory) + HttpServer::with_factory(move || (factory.clone())().into_iter().collect()) } #[doc(hidden)] diff --git a/src/test.rs b/src/test.rs index c068086d5..c589ea4b0 100644 --- a/src/test.rs +++ b/src/test.rs @@ -79,13 +79,13 @@ impl TestServer { /// middlewares or set handlers for test application. pub fn new(config: F) -> Self where - F: Sync + Send + 'static + Fn(&mut TestApp<()>), + F: Clone + Send + 'static + Fn(&mut TestApp<()>), { TestServerBuilder::new(|| ()).start(config) } /// Create test server builder - pub fn build() -> TestServerBuilder<()> { + pub fn build() -> TestServerBuilder<(), impl Fn() -> () + Clone + Send + 'static> { TestServerBuilder::new(|| ()) } @@ -94,9 +94,9 @@ impl TestServer { /// This method can be used for constructing application state. /// Also it can be used for external dependency initialization, /// like creating sync actors for diesel integration. - pub fn build_with_state(state: F) -> TestServerBuilder + pub fn build_with_state(state: F) -> TestServerBuilder where - F: Fn() -> S + Sync + Send + 'static, + F: Fn() -> S + Clone + Send + 'static, S: 'static, { TestServerBuilder::new(state) @@ -105,11 +105,12 @@ impl TestServer { /// Start new test server with application factory pub fn with_factory(factory: F) -> Self where - F: Fn() -> U + Sync + Send + 'static, - U: IntoIterator + 'static, + F: Fn() -> U + Send + Clone + 'static, + U: IntoIterator, H: IntoHttpHandler + 'static, { let (tx, rx) = mpsc::channel(); + let factory = move || (factory.clone())().into_iter().collect(); // run server in separate thread thread::spawn(move || { @@ -117,7 +118,7 @@ impl TestServer { let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let local_addr = tcp.local_addr().unwrap(); - HttpServer::new(factory) + let _ = HttpServer::with_factory(factory) .disable_signals() .listen(tcp) .keep_alive(5) @@ -261,22 +262,25 @@ impl Drop for TestServer { /// /// This type can be used to construct an instance of `TestServer` through a /// builder-like pattern. -pub struct TestServerBuilder { - state: Box S + Sync + Send + 'static>, +pub struct TestServerBuilder +where + F: Fn() -> S + Send + Clone + 'static, +{ + state: F, #[cfg(feature = "alpn")] ssl: Option, #[cfg(feature = "rust-tls")] rust_ssl: Option, } -impl TestServerBuilder { +impl TestServerBuilder +where + F: Fn() -> S + Send + Clone + 'static, +{ /// Create a new test server - pub fn new(state: F) -> TestServerBuilder - where - F: Fn() -> S + Sync + Send + 'static, - { + pub fn new(state: F) -> TestServerBuilder { TestServerBuilder { - state: Box::new(state), + state, #[cfg(feature = "alpn")] ssl: None, #[cfg(feature = "rust-tls")] @@ -300,9 +304,9 @@ impl TestServerBuilder { #[allow(unused_mut)] /// Configure test application and run test server - pub fn start(mut self, config: F) -> TestServer + pub fn start(mut self, config: C) -> TestServer where - F: Sync + Send + 'static + Fn(&mut TestApp), + C: Fn(&mut TestApp) + Clone + Send + 'static, { let (tx, rx) = mpsc::channel(); @@ -324,7 +328,7 @@ impl TestServerBuilder { let sys = System::new("actix-test-server"); let state = self.state; - let mut srv = HttpServer::new(move || { + let mut srv = HttpServer::with_factory(move || { let mut app = TestApp::new(state()); config(&mut app); vec![app] From a63d3f9a7a0e5f4982404b66802c73eb9e6c65fd Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 9 Sep 2018 14:14:53 -0700 Subject: [PATCH 128/219] cleanup ServerFactory trait --- src/server/http.rs | 130 ++++++++++++++++++++++++------------------- tests/test_server.rs | 1 + 2 files changed, 75 insertions(+), 56 deletions(-) diff --git a/src/server/http.rs b/src/server/http.rs index 41161ed3f..5cdeb5642 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -351,28 +351,36 @@ where Ok(self) } - // /// Start listening for incoming connections with supplied acceptor. - // #[doc(hidden)] - // #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] - // pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result - // where - // S: net::ToSocketAddrs, - // A: AcceptorService + Send + 'static, - // { - // let sockets = self.bind2(addr)?; + /// Start listening for incoming connections with supplied acceptor. + #[doc(hidden)] + #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] + pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result + where + S: net::ToSocketAddrs, + A: AcceptorServiceFactory, + { + let sockets = self.bind2(addr)?; - // for lst in sockets { - // let token = Token(self.handlers.len()); - // let addr = lst.local_addr().unwrap(); - // self.handlers.push(Box::new(StreamHandler::new( - // lst.local_addr().unwrap(), - // acceptor.clone(), - // ))); - // self.sockets.push(Socket { lst, addr, token }) - // } + for lst in sockets { + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + lst, + addr, + scheme: "https", + handler: Box::new(HttpServiceBuilder::new( + acceptor.clone(), + DefaultPipelineFactory::new( + self.factory.clone(), + self.host.clone(), + addr, + self.keep_alive, + ), + )), + }); + } - // Ok(self) - // } + Ok(self) + } fn bind2( &self, addr: S, @@ -416,25 +424,50 @@ where // self.bind_with(addr, NativeTlsAcceptor::new(acceptor)) // } - // #[cfg(feature = "alpn")] - // /// Start listening for incoming tls connections. - // /// - // /// This method sets alpn protocols to "h2" and "http/1.1" - // pub fn bind_ssl(self, addr: S, builder: SslAcceptorBuilder) -> io::Result - // where - // S: net::ToSocketAddrs, - // { - // use super::{OpensslAcceptor, ServerFlags}; + #[cfg(any(feature = "alpn", feature = "ssl"))] + /// Start listening for incoming tls connections. + /// + /// This method sets alpn protocols to "h2" and "http/1.1" + pub fn bind_ssl( + mut self, addr: S, builder: SslAcceptorBuilder, + ) -> io::Result + where + S: net::ToSocketAddrs, + { + use super::{openssl_acceptor_with_flags, ServerFlags}; - // // alpn support - // let flags = if !self.no_http2 { - // ServerFlags::HTTP1 - // } else { - // ServerFlags::HTTP1 | ServerFlags::HTTP2 - // }; + let sockets = self.bind2(addr)?; - // self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?) - // } + // alpn support + let flags = if !self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; + + let acceptor = openssl_acceptor_with_flags(builder, flags)?; + + for lst in sockets { + let addr = lst.local_addr().unwrap(); + let accpt = acceptor.clone(); + self.sockets.push(Socket { + lst, + addr, + scheme: "https", + handler: Box::new(HttpServiceBuilder::new( + move || ssl::OpensslAcceptor::new(accpt.clone()).map_err(|_| ()), + DefaultPipelineFactory::new( + self.factory.clone(), + self.host.clone(), + addr, + self.keep_alive, + ), + )), + }); + } + + Ok(self) + } // #[cfg(feature = "rust-tls")] // /// Start listening for incoming tls connections. @@ -500,13 +533,7 @@ impl Vec + Send + Clone> HttpServer { let sockets = mem::replace(&mut self.sockets, Vec::new()); for socket in sockets { - let Socket { - lst, - handler, - addr: _, - scheme: _, - } = socket; - srv = handler.register(srv, lst, self.host.clone(), self.keep_alive); + srv = socket.handler.register(srv, socket.lst); } srv.start() } @@ -700,10 +727,7 @@ trait ServiceFactory where H: IntoHttpHandler, { - fn register( - &self, server: Server, lst: net::TcpListener, host: Option, - keep_alive: KeepAlive, - ) -> Server; + fn register(&self, server: Server, lst: net::TcpListener) -> Server; } struct SimpleFactory @@ -737,10 +761,7 @@ where F: Fn() -> Vec + Send + Clone + 'static, P: HttpPipelineFactory, { - fn register( - &self, server: Server, lst: net::TcpListener, _host: Option, - _keep_alive: KeepAlive, - ) -> Server { + fn register(&self, server: Server, lst: net::TcpListener) -> Server { let pipeline = self.pipeline.clone(); server.listen(lst, move || pipeline.create()) } @@ -814,10 +835,7 @@ where P: HttpPipelineFactory, H: IntoHttpHandler, { - fn register( - &self, server: Server, lst: net::TcpListener, _host: Option, - _keep_alive: KeepAlive, - ) -> Server { + fn register(&self, server: Server, lst: net::TcpListener) -> Server { server.listen(lst, self.finish()) } } diff --git a/tests/test_server.rs b/tests/test_server.rs index 30ee13fb3..41f4bcf39 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -9,6 +9,7 @@ extern crate h2; extern crate http as modhttp; extern crate rand; extern crate tokio; +extern crate tokio_current_thread; extern crate tokio_reactor; extern crate tokio_tcp; extern crate tokio_current_thread as current_thread; From 6f3e70a92a39501c8655c9c8e45e4004e424efa6 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 9 Sep 2018 14:33:45 -0700 Subject: [PATCH 129/219] simplify application factory --- src/server/h1.rs | 75 +++++++++++++++++++++++------------------- src/server/h2.rs | 26 +++++---------- src/server/http.rs | 60 ++++++++++----------------------- src/server/mod.rs | 7 ++-- src/server/settings.rs | 10 +++--- src/test.rs | 12 +++---- 6 files changed, 80 insertions(+), 110 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 1d2ddbe2d..739c66519 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -410,45 +410,52 @@ where self.keepalive_timer.take(); // search handler for request - for h in self.settings.handlers().iter() { - msg = match h.handle(msg) { - Ok(mut pipe) => { - if self.tasks.is_empty() { - match pipe.poll_io(&mut self.stream) { - Ok(Async::Ready(ready)) => { - // override keep-alive state - if self.stream.keepalive() { - self.flags.insert(Flags::KEEPALIVE); - } else { - self.flags.remove(Flags::KEEPALIVE); - } - // prepare stream for next response - self.stream.reset(); + match self.settings.handler().handle(msg) { + Ok(mut pipe) => { + if self.tasks.is_empty() { + match pipe.poll_io(&mut self.stream) { + Ok(Async::Ready(ready)) => { + // override keep-alive state + if self.stream.keepalive() { + self.flags.insert(Flags::KEEPALIVE); + } else { + self.flags.remove(Flags::KEEPALIVE); + } + // prepare stream for next response + self.stream.reset(); - if !ready { - let item = Entry { - pipe: EntryPipe::Task(pipe), - flags: EntryFlags::EOF, - }; - self.tasks.push_back(item); - } - continue 'outer; - } - Ok(Async::NotReady) => {} - Err(err) => { - error!("Unhandled error: {}", err); - self.flags.insert(Flags::ERROR); - return; + if !ready { + let item = Entry { + pipe: EntryPipe::Task(pipe), + flags: EntryFlags::EOF, + }; + self.tasks.push_back(item); } + continue 'outer; + } + Ok(Async::NotReady) => {} + Err(err) => { + error!("Unhandled error: {}", err); + self.flags.insert(Flags::ERROR); + return; } } - self.tasks.push_back(Entry { - pipe: EntryPipe::Task(pipe), - flags: EntryFlags::empty(), - }); - continue 'outer; } - Err(msg) => msg, + self.tasks.push_back(Entry { + pipe: EntryPipe::Task(pipe), + flags: EntryFlags::empty(), + }); + continue 'outer; + } + Err(msg) => { + // handler is not found + self.tasks.push_back(Entry { + pipe: EntryPipe::Error(ServerError::err( + Version::HTTP_11, + StatusCode::NOT_FOUND, + )), + flags: EntryFlags::empty(), + }); } } diff --git a/src/server/h2.rs b/src/server/h2.rs index ba52a8843..a7cf8aec5 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -368,28 +368,20 @@ impl Entry { let psender = PayloadType::new(msg.headers(), psender); // start request processing - let mut task = None; - for h in settings.handlers().iter() { - msg = match h.handle(msg) { - Ok(t) => { - task = Some(t); - break; - } - Err(msg) => msg, - } - } + let task = match settings.handler().handle(msg) { + Ok(task) => EntryPipe::Task(task), + Err(msg) => EntryPipe::Error(ServerError::err( + Version::HTTP_2, + StatusCode::NOT_FOUND, + )), + }; Entry { - task: task.map(EntryPipe::Task).unwrap_or_else(|| { - EntryPipe::Error(ServerError::err( - Version::HTTP_2, - StatusCode::NOT_FOUND, - )) - }), + task, + recv, payload: psender, stream: H2Writer::new(resp, settings), flags: EntryFlags::empty(), - recv, } } diff --git a/src/server/http.rs b/src/server/http.rs index 5cdeb5642..faee041c3 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -39,7 +39,7 @@ struct Socket { pub struct HttpServer where H: IntoHttpHandler + 'static, - F: Fn() -> Vec + Send + Clone, + F: Fn() -> H + Send + Clone, { factory: F, host: Option, @@ -58,33 +58,10 @@ where impl HttpServer where H: IntoHttpHandler + 'static, - F: Fn() -> Vec + Send + Clone + 'static, + F: Fn() -> H + Send + Clone + 'static, { /// Create new http server with application factory - pub fn new(factory: F1) -> HttpServer Vec + Send + Clone> - where - F1: Fn() -> U + Send + Clone, - U: IntoIterator + 'static, - { - let f = move || (factory.clone())().into_iter().collect(); - - HttpServer { - threads: num_cpus::get(), - factory: f, - host: None, - backlog: 2048, - keep_alive: KeepAlive::Os, - shutdown_timeout: 30, - exit: false, - no_http2: false, - no_signals: false, - maxconn: 25_600, - maxconnrate: 256, - sockets: Vec::new(), - } - } - - pub(crate) fn with_factory(factory: F) -> HttpServer { + pub fn new(factory: F) -> HttpServer { HttpServer { factory, threads: num_cpus::get(), @@ -489,7 +466,7 @@ where // } } -impl Vec + Send + Clone> HttpServer { +impl H + Send + Clone> HttpServer { /// Start listening for incoming connections. /// /// This method starts number of http workers in separate threads. @@ -629,7 +606,7 @@ impl Vec + Send + Clone> HttpServer { struct HttpService where - F: Fn() -> Vec, + F: Fn() -> H, H: IntoHttpHandler, Io: IoStream, { @@ -642,7 +619,7 @@ where impl NewService for HttpService where - F: Fn() -> Vec, + F: Fn() -> H, H: IntoHttpHandler, Io: IoStream, { @@ -655,12 +632,9 @@ where fn new_service(&self) -> Self::Future { let s = ServerSettings::new(Some(self.addr), &self.host, false); - let apps: Vec<_> = (self.factory)() - .into_iter() - .map(|h| h.into_handler()) - .collect(); + let app = (self.factory)().into_handler(); - ok(HttpServiceHandler::new(apps, self.keep_alive, s)) + ok(HttpServiceHandler::new(app, self.keep_alive, s)) } } @@ -680,14 +654,14 @@ where Io: IoStream, { fn new( - apps: Vec, keep_alive: KeepAlive, settings: ServerSettings, + app: H, keep_alive: KeepAlive, settings: ServerSettings, ) -> HttpServiceHandler { let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { Some(time::Duration::new(val as u64, 0)) } else { None }; - let settings = WorkerSettings::new(apps, keep_alive, settings); + let settings = WorkerSettings::new(app, keep_alive, settings); HttpServiceHandler { tcp_ka, @@ -733,7 +707,7 @@ where struct SimpleFactory where H: IntoHttpHandler, - F: Fn() -> Vec + Send + Clone, + F: Fn() -> H + Send + Clone, P: HttpPipelineFactory, { pub addr: net::SocketAddr, @@ -744,7 +718,7 @@ where impl Clone for SimpleFactory where P: HttpPipelineFactory, - F: Fn() -> Vec + Send + Clone, + F: Fn() -> H + Send + Clone, { fn clone(&self) -> Self { SimpleFactory { @@ -758,7 +732,7 @@ where impl ServiceFactory for SimpleFactory where H: IntoHttpHandler + 'static, - F: Fn() -> Vec + Send + Clone + 'static, + F: Fn() -> H + Send + Clone + 'static, P: HttpPipelineFactory, { fn register(&self, server: Server, lst: net::TcpListener) -> Server { @@ -894,7 +868,7 @@ where struct DefaultPipelineFactory where - F: Fn() -> Vec + Send + Clone, + F: Fn() -> H + Send + Clone, { factory: F, host: Option, @@ -906,7 +880,7 @@ where impl DefaultPipelineFactory where Io: IoStream + Send, - F: Fn() -> Vec + Send + Clone + 'static, + F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler + 'static, { fn new( @@ -925,7 +899,7 @@ where impl Clone for DefaultPipelineFactory where Io: IoStream, - F: Fn() -> Vec + Send + Clone, + F: Fn() -> H + Send + Clone, H: IntoHttpHandler, { fn clone(&self) -> Self { @@ -942,7 +916,7 @@ where impl HttpPipelineFactory for DefaultPipelineFactory where Io: IoStream + Send, - F: Fn() -> Vec + Send + Clone + 'static, + F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler + 'static, { type Io = Io; diff --git a/src/server/mod.rs b/src/server/mod.rs index 6ba033762..ec7e8e4e2 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -174,13 +174,12 @@ const HW_BUFFER_SIZE: usize = 32_768; /// sys.run(); /// } /// ``` -pub fn new(factory: F) -> HttpServer Vec + Send + Clone> +pub fn new(factory: F) -> HttpServer where - F: Fn() -> U + Send + Clone + 'static, - U: IntoIterator, + F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler + 'static, { - HttpServer::with_factory(move || (factory.clone())().into_iter().collect()) + HttpServer::new(factory) } #[doc(hidden)] diff --git a/src/server/settings.rs b/src/server/settings.rs index 47da515a0..18a8c0956 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -136,7 +136,7 @@ const DATE_VALUE_LENGTH: usize = 29; pub(crate) struct WorkerSettings(Rc>); struct Inner { - h: Vec, + handler: H, keep_alive: u64, ka_enabled: bool, bytes: Rc, @@ -153,7 +153,7 @@ impl Clone for WorkerSettings { impl WorkerSettings { pub(crate) fn new( - h: Vec, keep_alive: KeepAlive, settings: ServerSettings, + handler: H, keep_alive: KeepAlive, settings: ServerSettings, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -162,7 +162,7 @@ impl WorkerSettings { }; WorkerSettings(Rc::new(Inner { - h, + handler, keep_alive, ka_enabled, bytes: Rc::new(SharedBytesPool::new()), @@ -176,8 +176,8 @@ impl WorkerSettings { self.0.node.borrow_mut() } - pub fn handlers(&self) -> &Vec { - &self.0.h + pub fn handler(&self) -> &H { + &self.0.handler } pub fn keep_alive_timer(&self) -> Option { diff --git a/src/test.rs b/src/test.rs index c589ea4b0..b9d64f270 100644 --- a/src/test.rs +++ b/src/test.rs @@ -103,14 +103,12 @@ impl TestServer { } /// Start new test server with application factory - pub fn with_factory(factory: F) -> Self + pub fn with_factory(factory: F) -> Self where - F: Fn() -> U + Send + Clone + 'static, - U: IntoIterator, + F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler + 'static, { let (tx, rx) = mpsc::channel(); - let factory = move || (factory.clone())().into_iter().collect(); // run server in separate thread thread::spawn(move || { @@ -118,7 +116,7 @@ impl TestServer { let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let local_addr = tcp.local_addr().unwrap(); - let _ = HttpServer::with_factory(factory) + let _ = HttpServer::new(factory) .disable_signals() .listen(tcp) .keep_alive(5) @@ -328,10 +326,10 @@ where let sys = System::new("actix-test-server"); let state = self.state; - let mut srv = HttpServer::with_factory(move || { + let mut srv = HttpServer::new(move || { let mut app = TestApp::new(state()); config(&mut app); - vec![app] + app }).workers(1) .keep_alive(5) .disable_signals(); From dbb4fab4f7a91cb69d5356d5027193ba2c436dc4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 9 Sep 2018 18:06:00 -0700 Subject: [PATCH 130/219] separate mod for HttpHandler; add HttpHandler impl for Vec --- src/server/h1.rs | 33 ++----- src/server/h2.rs | 2 +- src/server/handler.rs | 189 +++++++++++++++++++++++++++++++++++++++++ src/server/http.rs | 3 +- src/server/mod.rs | 63 +------------- src/server/settings.rs | 7 +- 6 files changed, 204 insertions(+), 93 deletions(-) create mode 100644 src/server/handler.rs diff --git a/src/server/h1.rs b/src/server/h1.rs index 739c66519..5ae841bda 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -447,7 +447,7 @@ where }); continue 'outer; } - Err(msg) => { + Err(_) => { // handler is not found self.tasks.push_back(Entry { pipe: EntryPipe::Error(ServerError::err( @@ -516,19 +516,22 @@ mod tests { use std::{cmp, io, time}; use bytes::{Buf, Bytes, BytesMut}; + use futures::future; use http::{Method, Version}; + use tokio::runtime::current_thread; use tokio_io::{AsyncRead, AsyncWrite}; use super::*; - use application::HttpApplication; + use application::{App, HttpApplication}; use httpmessage::HttpMessage; use server::h1decoder::Message; + use server::handler::IntoHttpHandler; use server::settings::{ServerSettings, WorkerSettings}; use server::{KeepAlive, Request}; fn wrk_settings() -> WorkerSettings { WorkerSettings::::new( - Vec::new(), + App::new().into_handler(), KeepAlive::Os, ServerSettings::default(), ) @@ -646,30 +649,6 @@ mod tests { } } - #[test] - fn test_req_parse1() { - let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n"); - let readbuf = BytesMut::new(); - let settings = wrk_settings(); - - let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); - h1.poll_io(); - h1.poll_io(); - assert_eq!(h1.tasks.len(), 1); - } - - #[test] - fn test_req_parse2() { - let buf = Buffer::new(""); - let readbuf = - BytesMut::from(Vec::::from(&b"GET /test HTTP/1.1\r\n\r\n"[..])); - let settings = wrk_settings(); - - let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, true, None); - h1.poll_io(); - assert_eq!(h1.tasks.len(), 1); - } - #[test] fn test_req_parse_err() { let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); diff --git a/src/server/h2.rs b/src/server/h2.rs index a7cf8aec5..f31c2db38 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -370,7 +370,7 @@ impl Entry { // start request processing let task = match settings.handler().handle(msg) { Ok(task) => EntryPipe::Task(task), - Err(msg) => EntryPipe::Error(ServerError::err( + Err(_) => EntryPipe::Error(ServerError::err( Version::HTTP_2, StatusCode::NOT_FOUND, )), diff --git a/src/server/handler.rs b/src/server/handler.rs new file mode 100644 index 000000000..0700e1961 --- /dev/null +++ b/src/server/handler.rs @@ -0,0 +1,189 @@ +use futures::{Async, Poll}; + +use super::message::Request; +use super::Writer; +use error::Error; + +/// Low level http request handler +#[allow(unused_variables)] +pub trait HttpHandler: 'static { + /// Request handling task + type Task: HttpHandlerTask; + + /// Handle request + fn handle(&self, req: Request) -> Result; +} + +impl HttpHandler for Box>> { + type Task = Box; + + fn handle(&self, req: Request) -> Result, Request> { + self.as_ref().handle(req) + } +} + +/// Low level http request handler +pub trait HttpHandlerTask { + /// Poll task, this method is used before or after *io* object is available + fn poll_completed(&mut self) -> Poll<(), Error> { + Ok(Async::Ready(())) + } + + /// Poll task when *io* object is available + fn poll_io(&mut self, io: &mut Writer) -> Poll; + + /// Connection is disconnected + fn disconnected(&mut self) {} +} + +impl HttpHandlerTask for Box { + fn poll_io(&mut self, io: &mut Writer) -> Poll { + self.as_mut().poll_io(io) + } +} + +/// Conversion helper trait +pub trait IntoHttpHandler { + /// The associated type which is result of conversion. + type Handler: HttpHandler; + + /// Convert into `HttpHandler` object. + fn into_handler(self) -> Self::Handler; +} + +impl IntoHttpHandler for T { + type Handler = T; + + fn into_handler(self) -> Self::Handler { + self + } +} + +impl IntoHttpHandler for Vec { + type Handler = VecHttpHandler; + + fn into_handler(self) -> Self::Handler { + VecHttpHandler(self.into_iter().map(|item| item.into_handler()).collect()) + } +} + +#[doc(hidden)] +pub struct VecHttpHandler(Vec); + +impl HttpHandler for VecHttpHandler { + type Task = H::Task; + + fn handle(&self, mut req: Request) -> Result { + for h in &self.0 { + req = match h.handle(req) { + Ok(task) => return Ok(task), + Err(e) => e, + }; + } + Err(req) + } +} + +macro_rules! http_handler ({$EN:ident, $(($n:tt, $T:ident)),+} => { + impl<$($T: HttpHandler,)+> HttpHandler for ($($T,)+) { + type Task = $EN<$($T,)+>; + + fn handle(&self, mut req: Request) -> Result { + $( + req = match self.$n.handle(req) { + Ok(task) => return Ok($EN::$T(task)), + Err(e) => e, + }; + )+ + Err(req) + } + } + + #[doc(hidden)] + pub enum $EN<$($T: HttpHandler,)+> { + $($T ($T::Task),)+ + } + + impl<$($T: HttpHandler,)+> HttpHandlerTask for $EN<$($T,)+> + { + fn poll_completed(&mut self) -> Poll<(), Error> { + match self { + $($EN :: $T(ref mut task) => task.poll_completed(),)+ + } + } + + fn poll_io(&mut self, io: &mut Writer) -> Poll { + match self { + $($EN::$T(ref mut task) => task.poll_io(io),)+ + } + } + + /// Connection is disconnected + fn disconnected(&mut self) { + match self { + $($EN::$T(ref mut task) => task.disconnected(),)+ + } + } + } +}); + +http_handler!(HttpHandlerTask1, (0, A)); +http_handler!(HttpHandlerTask2, (0, A), (1, B)); +http_handler!(HttpHandlerTask3, (0, A), (1, B), (2, C)); +http_handler!(HttpHandlerTask4, (0, A), (1, B), (2, C), (3, D)); +http_handler!(HttpHandlerTask5, (0, A), (1, B), (2, C), (3, D), (4, E)); +http_handler!( + HttpHandlerTask6, + (0, A), + (1, B), + (2, C), + (3, D), + (4, E), + (5, F) +); +http_handler!( + HttpHandlerTask7, + (0, A), + (1, B), + (2, C), + (3, D), + (4, E), + (5, F), + (6, G) +); +http_handler!( + HttpHandlerTask8, + (0, A), + (1, B), + (2, C), + (3, D), + (4, E), + (5, F), + (6, G), + (7, H) +); +http_handler!( + HttpHandlerTask9, + (0, A), + (1, B), + (2, C), + (3, D), + (4, E), + (5, F), + (6, G), + (7, H), + (8, I) +); +http_handler!( + HttpHandlerTask10, + (0, A), + (1, B), + (2, C), + (3, D), + (4, E), + (5, F), + (6, G), + (7, H), + (8, I), + (9, J) +); diff --git a/src/server/http.rs b/src/server/http.rs index faee041c3..f67ebe959 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -3,7 +3,8 @@ use std::{io, mem, net, time}; use actix::{Actor, Addr, AsyncContext, Context, Handler, System}; use actix_net::server::{Server, ServerServiceFactory}; -use actix_net::{ssl, NewService, NewServiceExt, Service}; +use actix_net::service::{NewService, NewServiceExt, Service}; +use actix_net::ssl; use futures::future::{ok, FutureResult}; use futures::{Async, Poll, Stream}; diff --git a/src/server/mod.rs b/src/server/mod.rs index ec7e8e4e2..75f75fcde 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -115,7 +115,7 @@ use futures::{Async, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_tcp::TcpStream; -pub use actix_net::{PauseServer, ResumeServer, StopServer}; +pub use actix_net::server::{PauseServer, ResumeServer, StopServer}; mod channel; mod error; @@ -124,25 +124,25 @@ pub(crate) mod h1decoder; mod h1writer; mod h2; mod h2writer; +mod handler; pub(crate) mod helpers; mod http; pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; pub(crate) mod settings; - mod ssl; -pub use self::ssl::*; +pub use self::handler::*; pub use self::http::HttpServer; pub use self::message::Request; pub use self::settings::ServerSettings; +pub use self::ssl::*; #[doc(hidden)] pub use self::helpers::write_content_length; use body::Binary; -use error::Error; use extensions::Extensions; use header::ContentEncoding; use httpresponse::HttpResponse; @@ -222,61 +222,6 @@ impl From> for KeepAlive { } } -/// Low level http request handler -#[allow(unused_variables)] -pub trait HttpHandler: 'static { - /// Request handling task - type Task: HttpHandlerTask; - - /// Handle request - fn handle(&self, req: Request) -> Result; -} - -impl HttpHandler for Box>> { - type Task = Box; - - fn handle(&self, req: Request) -> Result, Request> { - self.as_ref().handle(req) - } -} - -/// Low level http request handler -pub trait HttpHandlerTask { - /// Poll task, this method is used before or after *io* object is available - fn poll_completed(&mut self) -> Poll<(), Error> { - Ok(Async::Ready(())) - } - - /// Poll task when *io* object is available - fn poll_io(&mut self, io: &mut Writer) -> Poll; - - /// Connection is disconnected - fn disconnected(&mut self) {} -} - -impl HttpHandlerTask for Box { - fn poll_io(&mut self, io: &mut Writer) -> Poll { - self.as_mut().poll_io(io) - } -} - -/// Conversion helper trait -pub trait IntoHttpHandler { - /// The associated type which is result of conversion. - type Handler: HttpHandler; - - /// Convert into `HttpHandler` object. - fn into_handler(self) -> Self::Handler; -} - -impl IntoHttpHandler for T { - type Handler = T; - - fn into_handler(self) -> Self::Handler { - self - } -} - #[doc(hidden)] #[derive(Debug)] pub enum WriterState { diff --git a/src/server/settings.rs b/src/server/settings.rs index 18a8c0956..fe36c331b 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -316,11 +316,8 @@ mod tests { let mut rt = current_thread::Runtime::new().unwrap(); let _ = rt.block_on(future::lazy(|| { - let settings = WorkerSettings::<()>::new( - Vec::new(), - KeepAlive::Os, - ServerSettings::default(), - ); + let settings = + WorkerSettings::<()>::new((), KeepAlive::Os, ServerSettings::default()); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); settings.set_date(&mut buf1, true); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); From 0aa0f326f72ecdfe51d1494ef1ec0b9a0fc1c379 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sat, 15 Sep 2018 10:27:58 -0700 Subject: [PATCH 131/219] fix changes from master --- src/server/settings.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/server/settings.rs b/src/server/settings.rs index fe36c331b..6b2fc7270 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -2,7 +2,7 @@ use std::cell::{RefCell, RefMut, UnsafeCell}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; -use std::time::Duration; +use std::time::{Instant, Duration}; use std::{env, fmt, net}; use bytes::BytesMut; @@ -12,7 +12,7 @@ use http::StatusCode; use lazycell::LazyCell; use parking_lot::Mutex; use time; -use tokio_timer::{sleep, Delay, Interval}; +use tokio_timer::{sleep, Delay}; use tokio_current_thread::spawn; use super::channel::Node; @@ -181,9 +181,10 @@ impl WorkerSettings { } pub fn keep_alive_timer(&self) -> Option { - if self.keep_alive != 0 { + let ka = self.0.keep_alive; + if ka != 0 { Some(Delay::new( - Instant::now() + Duration::from_secs(self.keep_alive), + Instant::now() + Duration::from_secs(ka), )) } else { None From 9f1417af301024f07c964a0c28f56265676bd9af Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 26 Sep 2018 20:43:54 -0700 Subject: [PATCH 132/219] refactor http service builder --- Cargo.toml | 1 + src/middleware/cors.rs | 17 +- src/payload.rs | 2 +- src/server/builder.rs | 257 +++++++++++++++++++++++++++++ src/server/h1.rs | 9 +- src/server/http.rs | 359 +++-------------------------------------- src/server/mod.rs | 2 + src/server/service.rs | 133 +++++++++++++++ src/server/settings.rs | 8 +- tests/test_server.rs | 2 +- 10 files changed, 435 insertions(+), 355 deletions(-) create mode 100644 src/server/builder.rs create mode 100644 src/server/service.rs diff --git a/Cargo.toml b/Cargo.toml index 536806316..e17b72838 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ flate2-rust = ["flate2/rust_backend"] [dependencies] actix = "0.7.0" actix-net = { git="https://github.com/actix/actix-net.git" } +#actix-net = { path = "../actix-net" } base64 = "0.9" bitflags = "1.0" diff --git a/src/middleware/cors.rs b/src/middleware/cors.rs index f1adf0c4b..953f2911c 100644 --- a/src/middleware/cors.rs +++ b/src/middleware/cors.rs @@ -1127,12 +1127,23 @@ mod tests { let resp: HttpResponse = HttpResponse::Ok().into(); let resp = cors.response(&req, resp).unwrap().response(); - let origins_str = resp.headers().get(header::ACCESS_CONTROL_ALLOW_ORIGIN).unwrap().to_str().unwrap(); + let origins_str = resp + .headers() + .get(header::ACCESS_CONTROL_ALLOW_ORIGIN) + .unwrap() + .to_str() + .unwrap(); if origins_str.starts_with("https://www.example.com") { - assert_eq!("https://www.example.com, https://www.google.com", origins_str); + assert_eq!( + "https://www.example.com, https://www.google.com", + origins_str + ); } else { - assert_eq!("https://www.google.com, https://www.example.com", origins_str); + assert_eq!( + "https://www.google.com, https://www.example.com", + origins_str + ); } } diff --git a/src/payload.rs b/src/payload.rs index 382c0b0f5..2131e3c3c 100644 --- a/src/payload.rs +++ b/src/payload.rs @@ -1,8 +1,8 @@ //! Payload stream use bytes::{Bytes, BytesMut}; -use futures::task::Task; #[cfg(not(test))] use futures::task::current as current_task; +use futures::task::Task; use futures::{Async, Poll, Stream}; use std::cell::RefCell; use std::cmp; diff --git a/src/server/builder.rs b/src/server/builder.rs new file mode 100644 index 000000000..4a77bcd5c --- /dev/null +++ b/src/server/builder.rs @@ -0,0 +1,257 @@ +use std::marker::PhantomData; +use std::net; + +use actix_net::server; +use actix_net::service::{NewService, NewServiceExt, Service}; +use futures::future::{ok, FutureResult}; +use futures::{Async, Poll}; +use tokio_tcp::TcpStream; + +use super::handler::IntoHttpHandler; +use super::service::HttpService; +use super::{IoStream, KeepAlive}; + +pub(crate) trait ServiceFactory +where + H: IntoHttpHandler, +{ + fn register(&self, server: server::Server, lst: net::TcpListener) -> server::Server; +} + +pub struct HttpServiceBuilder +where + F: Fn() -> H + Send + Clone, +{ + factory: F, + acceptor: A, + pipeline: P, +} + +impl HttpServiceBuilder +where + F: Fn() -> H + Send + Clone, + H: IntoHttpHandler, + A: AcceptorServiceFactory, + P: HttpPipelineFactory, +{ + pub fn new(factory: F, acceptor: A, pipeline: P) -> Self { + Self { + factory, + pipeline, + acceptor, + } + } + + pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder + where + A1: AcceptorServiceFactory, + { + HttpServiceBuilder { + acceptor, + pipeline: self.pipeline, + factory: self.factory.clone(), + } + } + + pub fn pipeline(self, pipeline: P1) -> HttpServiceBuilder + where + P1: HttpPipelineFactory, + { + HttpServiceBuilder { + pipeline, + acceptor: self.acceptor, + factory: self.factory.clone(), + } + } + + fn finish(&self) -> impl server::StreamServiceFactory { + let pipeline = self.pipeline.clone(); + let acceptor = self.acceptor.clone(); + move || acceptor.create().and_then(pipeline.create()) + } +} + +impl Clone for HttpServiceBuilder +where + F: Fn() -> H + Send + Clone, + A: AcceptorServiceFactory, + P: HttpPipelineFactory, +{ + fn clone(&self) -> Self { + HttpServiceBuilder { + factory: self.factory.clone(), + acceptor: self.acceptor.clone(), + pipeline: self.pipeline.clone(), + } + } +} + +impl ServiceFactory for HttpServiceBuilder +where + F: Fn() -> H + Send + Clone, + A: AcceptorServiceFactory, + P: HttpPipelineFactory, + H: IntoHttpHandler, +{ + fn register(&self, server: server::Server, lst: net::TcpListener) -> server::Server { + server.listen("actix-web", lst, self.finish()) + } +} + +pub trait AcceptorServiceFactory: Send + Clone + 'static { + type Io: IoStream + Send; + type NewService: NewService< + Request = TcpStream, + Response = Self::Io, + Error = (), + InitError = (), + >; + + fn create(&self) -> Self::NewService; +} + +impl AcceptorServiceFactory for F +where + F: Fn() -> T + Send + Clone + 'static, + T::Response: IoStream + Send, + T: NewService, +{ + type Io = T::Response; + type NewService = T; + + fn create(&self) -> T { + (self)() + } +} + +pub trait HttpPipelineFactory: Send + Clone + 'static { + type Io: IoStream; + type NewService: NewService< + Request = Self::Io, + Response = (), + Error = (), + InitError = (), + >; + + fn create(&self) -> Self::NewService; +} + +impl HttpPipelineFactory for F +where + F: Fn() -> T + Send + Clone + 'static, + T: NewService, + T::Request: IoStream, +{ + type Io = T::Request; + type NewService = T; + + fn create(&self) -> T { + (self)() + } +} + +pub(crate) struct DefaultPipelineFactory +where + F: Fn() -> H + Send + Clone, +{ + factory: F, + host: Option, + addr: net::SocketAddr, + keep_alive: KeepAlive, + _t: PhantomData, +} + +impl DefaultPipelineFactory +where + Io: IoStream + Send, + F: Fn() -> H + Send + Clone + 'static, + H: IntoHttpHandler + 'static, +{ + pub fn new( + factory: F, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, + ) -> Self { + Self { + factory, + addr, + keep_alive, + host, + _t: PhantomData, + } + } +} + +impl Clone for DefaultPipelineFactory +where + Io: IoStream, + F: Fn() -> H + Send + Clone, + H: IntoHttpHandler, +{ + fn clone(&self) -> Self { + Self { + factory: self.factory.clone(), + addr: self.addr, + keep_alive: self.keep_alive, + host: self.host.clone(), + _t: PhantomData, + } + } +} + +impl HttpPipelineFactory for DefaultPipelineFactory +where + Io: IoStream + Send, + F: Fn() -> H + Send + Clone + 'static, + H: IntoHttpHandler + 'static, +{ + type Io = Io; + type NewService = HttpService; + + fn create(&self) -> Self::NewService { + HttpService::new( + self.factory.clone(), + self.addr, + self.host.clone(), + self.keep_alive, + ) + } +} + +#[derive(Clone)] +pub(crate) struct DefaultAcceptor; + +impl AcceptorServiceFactory for DefaultAcceptor { + type Io = TcpStream; + type NewService = DefaultAcceptor; + + fn create(&self) -> Self::NewService { + DefaultAcceptor + } +} + +impl NewService for DefaultAcceptor { + type Request = TcpStream; + type Response = TcpStream; + type Error = (); + type InitError = (); + type Service = DefaultAcceptor; + type Future = FutureResult; + + fn new_service(&self) -> Self::Future { + ok(DefaultAcceptor) + } +} + +impl Service for DefaultAcceptor { + type Request = TcpStream; + type Response = TcpStream; + type Error = (); + type Future = FutureResult; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + Ok(Async::Ready(())) + } + + fn call(&mut self, req: Self::Request) -> Self::Future { + ok(req) + } +} diff --git a/src/server/h1.rs b/src/server/h1.rs index 5ae841bda..36d40e8d3 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -89,8 +89,8 @@ where H: HttpHandler + 'static, { pub fn new( - settings: WorkerSettings, stream: T, addr: Option, - buf: BytesMut, is_eof: bool, keepalive_timer: Option, + settings: WorkerSettings, stream: T, addr: Option, buf: BytesMut, + is_eof: bool, keepalive_timer: Option, ) -> Self { Http1 { flags: if is_eof { @@ -379,10 +379,7 @@ where fn push_response_entry(&mut self, status: StatusCode) { self.tasks.push_back(Entry { - pipe: EntryPipe::Error(ServerError::err( - Version::HTTP_11, - status, - )), + pipe: EntryPipe::Error(ServerError::err(Version::HTTP_11, status)), flags: EntryFlags::empty(), }); } diff --git a/src/server/http.rs b/src/server/http.rs index f67ebe959..f54900fc3 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,13 +1,10 @@ -use std::marker::PhantomData; -use std::{io, mem, net, time}; +use std::{io, mem, net}; -use actix::{Actor, Addr, AsyncContext, Context, Handler, System}; -use actix_net::server::{Server, ServerServiceFactory}; -use actix_net::service::{NewService, NewServiceExt, Service}; +use actix::{Addr, System}; +use actix_net::server; +use actix_net::service::NewService; use actix_net::ssl; -use futures::future::{ok, FutureResult}; -use futures::{Async, Poll, Stream}; use net2::TcpBuilder; use num_cpus; use tokio_tcp::TcpStream; @@ -21,9 +18,9 @@ use openssl::ssl::SslAcceptorBuilder; //#[cfg(feature = "rust-tls")] //use rustls::ServerConfig; -use super::channel::HttpChannel; -use super::settings::{ServerSettings, WorkerSettings}; -use super::{HttpHandler, IntoHttpHandler, IoStream, KeepAlive}; +use super::builder::{AcceptorServiceFactory, HttpServiceBuilder, ServiceFactory}; +use super::builder::{DefaultAcceptor, DefaultPipelineFactory}; +use super::{IntoHttpHandler, IoStream, KeepAlive}; struct Socket { scheme: &'static str, @@ -205,17 +202,16 @@ where lst, addr, scheme: "http", - handler: Box::new(SimpleFactory { - addr, - factory: self.factory.clone(), - pipeline: DefaultPipelineFactory { + handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), + DefaultAcceptor, + DefaultPipelineFactory::new( + self.factory.clone(), + self.host.clone(), addr, - factory: self.factory.clone(), - host: self.host.clone(), - keep_alive: self.keep_alive, - _t: PhantomData, - }, - }), + self.keep_alive, + ), + )), }); self @@ -239,6 +235,7 @@ where addr, scheme: "https", handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), acceptor, DefaultPipelineFactory::new( self.factory.clone(), @@ -346,6 +343,7 @@ where addr, scheme: "https", handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), acceptor.clone(), DefaultPipelineFactory::new( self.factory.clone(), @@ -493,10 +491,10 @@ impl H + Send + Clone> HttpServer { /// sys.run(); // <- Run actix system, this method starts all async processes /// } /// ``` - pub fn start(mut self) -> Addr { + pub fn start(mut self) -> Addr { ssl::max_concurrent_ssl_connect(self.maxconnrate); - let mut srv = Server::new() + let mut srv = server::Server::new() .workers(self.threads) .maxconn(self.maxconn) .shutdown_timeout(self.shutdown_timeout); @@ -605,143 +603,6 @@ impl H + Send + Clone> HttpServer { // } // } -struct HttpService -where - F: Fn() -> H, - H: IntoHttpHandler, - Io: IoStream, -{ - factory: F, - addr: net::SocketAddr, - host: Option, - keep_alive: KeepAlive, - _t: PhantomData, -} - -impl NewService for HttpService -where - F: Fn() -> H, - H: IntoHttpHandler, - Io: IoStream, -{ - type Request = Io; - type Response = (); - type Error = (); - type InitError = (); - type Service = HttpServiceHandler; - type Future = FutureResult; - - fn new_service(&self) -> Self::Future { - let s = ServerSettings::new(Some(self.addr), &self.host, false); - let app = (self.factory)().into_handler(); - - ok(HttpServiceHandler::new(app, self.keep_alive, s)) - } -} - -struct HttpServiceHandler -where - H: HttpHandler, - Io: IoStream, -{ - settings: WorkerSettings, - tcp_ka: Option, - _t: PhantomData, -} - -impl HttpServiceHandler -where - H: HttpHandler, - Io: IoStream, -{ - fn new( - app: H, keep_alive: KeepAlive, settings: ServerSettings, - ) -> HttpServiceHandler { - let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { - Some(time::Duration::new(val as u64, 0)) - } else { - None - }; - let settings = WorkerSettings::new(app, keep_alive, settings); - - HttpServiceHandler { - tcp_ka, - settings, - _t: PhantomData, - } - } -} - -impl Service for HttpServiceHandler -where - H: HttpHandler, - Io: IoStream, -{ - type Request = Io; - type Response = (); - type Error = (); - type Future = HttpChannel; - - fn poll_ready(&mut self) -> Poll<(), Self::Error> { - Ok(Async::Ready(())) - } - - fn call(&mut self, mut req: Self::Request) -> Self::Future { - let _ = req.set_nodelay(true); - HttpChannel::new(self.settings.clone(), req, None) - } - - // fn shutdown(&self, force: bool) { - // if force { - // self.settings.head().traverse::(); - // } - // } -} - -trait ServiceFactory -where - H: IntoHttpHandler, -{ - fn register(&self, server: Server, lst: net::TcpListener) -> Server; -} - -struct SimpleFactory -where - H: IntoHttpHandler, - F: Fn() -> H + Send + Clone, - P: HttpPipelineFactory, -{ - pub addr: net::SocketAddr, - pub factory: F, - pub pipeline: P, -} - -impl Clone for SimpleFactory -where - P: HttpPipelineFactory, - F: Fn() -> H + Send + Clone, -{ - fn clone(&self) -> Self { - SimpleFactory { - addr: self.addr, - factory: self.factory.clone(), - pipeline: self.pipeline.clone(), - } - } -} - -impl ServiceFactory for SimpleFactory -where - H: IntoHttpHandler + 'static, - F: Fn() -> H + Send + Clone + 'static, - P: HttpPipelineFactory, -{ - fn register(&self, server: Server, lst: net::TcpListener) -> Server { - let pipeline = self.pipeline.clone(); - server.listen(lst, move || pipeline.create()) - } -} - fn create_tcp_listener( addr: net::SocketAddr, backlog: i32, ) -> io::Result { @@ -753,183 +614,3 @@ fn create_tcp_listener( builder.bind(addr)?; Ok(builder.listen(backlog)?) } - -pub struct HttpServiceBuilder { - acceptor: A, - pipeline: P, - t: PhantomData, -} - -impl HttpServiceBuilder -where - A: AcceptorServiceFactory, - P: HttpPipelineFactory, - H: IntoHttpHandler, -{ - pub fn new(acceptor: A, pipeline: P) -> Self { - Self { - acceptor, - pipeline, - t: PhantomData, - } - } - - pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder - where - A1: AcceptorServiceFactory, - { - HttpServiceBuilder { - acceptor, - pipeline: self.pipeline, - t: PhantomData, - } - } - - pub fn pipeline(self, pipeline: P1) -> HttpServiceBuilder - where - P1: HttpPipelineFactory, - { - HttpServiceBuilder { - pipeline, - acceptor: self.acceptor, - t: PhantomData, - } - } - - fn finish(&self) -> impl ServerServiceFactory { - let acceptor = self.acceptor.clone(); - let pipeline = self.pipeline.clone(); - - move || acceptor.create().and_then(pipeline.create()) - } -} - -impl ServiceFactory for HttpServiceBuilder -where - A: AcceptorServiceFactory, - P: HttpPipelineFactory, - H: IntoHttpHandler, -{ - fn register(&self, server: Server, lst: net::TcpListener) -> Server { - server.listen(lst, self.finish()) - } -} - -pub trait AcceptorServiceFactory: Send + Clone + 'static { - type Io: IoStream + Send; - type NewService: NewService< - Request = TcpStream, - Response = Self::Io, - Error = (), - InitError = (), - >; - - fn create(&self) -> Self::NewService; -} - -impl AcceptorServiceFactory for F -where - F: Fn() -> T + Send + Clone + 'static, - T::Response: IoStream + Send, - T: NewService, -{ - type Io = T::Response; - type NewService = T; - - fn create(&self) -> T { - (self)() - } -} - -pub trait HttpPipelineFactory: Send + Clone + 'static { - type Io: IoStream; - type NewService: NewService< - Request = Self::Io, - Response = (), - Error = (), - InitError = (), - >; - - fn create(&self) -> Self::NewService; -} - -impl HttpPipelineFactory for F -where - F: Fn() -> T + Send + Clone + 'static, - T: NewService, - T::Request: IoStream, -{ - type Io = T::Request; - type NewService = T; - - fn create(&self) -> T { - (self)() - } -} - -struct DefaultPipelineFactory -where - F: Fn() -> H + Send + Clone, -{ - factory: F, - host: Option, - addr: net::SocketAddr, - keep_alive: KeepAlive, - _t: PhantomData, -} - -impl DefaultPipelineFactory -where - Io: IoStream + Send, - F: Fn() -> H + Send + Clone + 'static, - H: IntoHttpHandler + 'static, -{ - fn new( - factory: F, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, - ) -> Self { - Self { - factory, - addr, - keep_alive, - host, - _t: PhantomData, - } - } -} - -impl Clone for DefaultPipelineFactory -where - Io: IoStream, - F: Fn() -> H + Send + Clone, - H: IntoHttpHandler, -{ - fn clone(&self) -> Self { - Self { - factory: self.factory.clone(), - addr: self.addr, - keep_alive: self.keep_alive, - host: self.host.clone(), - _t: PhantomData, - } - } -} - -impl HttpPipelineFactory for DefaultPipelineFactory -where - Io: IoStream + Send, - F: Fn() -> H + Send + Clone + 'static, - H: IntoHttpHandler + 'static, -{ - type Io = Io; - type NewService = HttpService; - - fn create(&self) -> Self::NewService { - HttpService { - addr: self.addr, - keep_alive: self.keep_alive, - host: self.host.clone(), - factory: self.factory.clone(), - _t: PhantomData, - } - } -} diff --git a/src/server/mod.rs b/src/server/mod.rs index 75f75fcde..ac4ffc9af 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -117,6 +117,7 @@ use tokio_tcp::TcpStream; pub use actix_net::server::{PauseServer, ResumeServer, StopServer}; +pub(crate) mod builder; mod channel; mod error; pub(crate) mod h1; @@ -130,6 +131,7 @@ mod http; pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; +pub(crate) mod service; pub(crate) mod settings; mod ssl; diff --git a/src/server/service.rs b/src/server/service.rs new file mode 100644 index 000000000..6f80cd6df --- /dev/null +++ b/src/server/service.rs @@ -0,0 +1,133 @@ +use std::marker::PhantomData; +use std::net; +use std::time::Duration; + +use actix_net::service::{NewService, Service}; +use futures::future::{ok, FutureResult}; +use futures::{Async, Poll}; + +use super::channel::HttpChannel; +use super::handler::{HttpHandler, IntoHttpHandler}; +use super::settings::{ServerSettings, WorkerSettings}; +use super::{IoStream, KeepAlive}; + +pub enum HttpServiceMessage { + /// New stream + Connect(T), + /// Gracefull shutdown + Shutdown(Duration), + /// Force shutdown + ForceShutdown, +} + +pub(crate) struct HttpService +where + F: Fn() -> H, + H: IntoHttpHandler, + Io: IoStream, +{ + factory: F, + addr: net::SocketAddr, + host: Option, + keep_alive: KeepAlive, + _t: PhantomData, +} + +impl HttpService +where + F: Fn() -> H, + H: IntoHttpHandler, + Io: IoStream, +{ + pub fn new( + factory: F, addr: net::SocketAddr, host: Option, keep_alive: KeepAlive, + ) -> Self { + HttpService { + factory, + addr, + host, + keep_alive, + _t: PhantomData, + } + } +} + +impl NewService for HttpService +where + F: Fn() -> H, + H: IntoHttpHandler, + Io: IoStream, +{ + type Request = Io; + type Response = (); + type Error = (); + type InitError = (); + type Service = HttpServiceHandler; + type Future = FutureResult; + + fn new_service(&self) -> Self::Future { + let s = ServerSettings::new(Some(self.addr), &self.host, false); + let app = (self.factory)().into_handler(); + + ok(HttpServiceHandler::new(app, self.keep_alive, s)) + } +} + +pub(crate) struct HttpServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ + settings: WorkerSettings, + tcp_ka: Option, + _t: PhantomData, +} + +impl HttpServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ + fn new( + app: H, keep_alive: KeepAlive, settings: ServerSettings, + ) -> HttpServiceHandler { + let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { + Some(Duration::new(val as u64, 0)) + } else { + None + }; + let settings = WorkerSettings::new(app, keep_alive, settings); + + HttpServiceHandler { + tcp_ka, + settings, + _t: PhantomData, + } + } +} + +impl Service for HttpServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ + type Request = Io; + type Response = (); + type Error = (); + type Future = HttpChannel; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + Ok(Async::Ready(())) + } + + fn call(&mut self, mut req: Self::Request) -> Self::Future { + let _ = req.set_nodelay(true); + HttpChannel::new(self.settings.clone(), req, None) + } + + // fn shutdown(&self, force: bool) { + // if force { + // self.settings.head().traverse::(); + // } + // } +} diff --git a/src/server/settings.rs b/src/server/settings.rs index 6b2fc7270..21ce27195 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -2,7 +2,7 @@ use std::cell::{RefCell, RefMut, UnsafeCell}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; -use std::time::{Instant, Duration}; +use std::time::{Duration, Instant}; use std::{env, fmt, net}; use bytes::BytesMut; @@ -12,8 +12,8 @@ use http::StatusCode; use lazycell::LazyCell; use parking_lot::Mutex; use time; -use tokio_timer::{sleep, Delay}; use tokio_current_thread::spawn; +use tokio_timer::{sleep, Delay}; use super::channel::Node; use super::message::{Request, RequestPool}; @@ -183,9 +183,7 @@ impl WorkerSettings { pub fn keep_alive_timer(&self) -> Option { let ka = self.0.keep_alive; if ka != 0 { - Some(Delay::new( - Instant::now() + Duration::from_secs(ka), - )) + Some(Delay::new(Instant::now() + Duration::from_secs(ka))) } else { None } diff --git a/tests/test_server.rs b/tests/test_server.rs index 41f4bcf39..c1dbf531d 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -10,9 +10,9 @@ extern crate http as modhttp; extern crate rand; extern crate tokio; extern crate tokio_current_thread; +extern crate tokio_current_thread as current_thread; extern crate tokio_reactor; extern crate tokio_tcp; -extern crate tokio_current_thread as current_thread; use std::io::{Read, Write}; use std::sync::Arc; From b6a1cfa6ad4534c61da1646b7059785703ff234c Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 26 Sep 2018 22:14:14 -0700 Subject: [PATCH 133/219] update openssl support --- src/server/builder.rs | 2 ++ src/server/h1.rs | 2 -- src/server/http.rs | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/server/builder.rs b/src/server/builder.rs index 4a77bcd5c..ad4124445 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -98,6 +98,7 @@ where } } +/// This trait indicates types that can create acceptor service for http server. pub trait AcceptorServiceFactory: Send + Clone + 'static { type Io: IoStream + Send; type NewService: NewService< @@ -217,6 +218,7 @@ where } #[derive(Clone)] +/// Default acceptor service convert `TcpStream` to a `tokio_tcp::TcpStream` pub(crate) struct DefaultAcceptor; impl AcceptorServiceFactory for DefaultAcceptor { diff --git a/src/server/h1.rs b/src/server/h1.rs index 36d40e8d3..b6b576ed7 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -513,9 +513,7 @@ mod tests { use std::{cmp, io, time}; use bytes::{Buf, Bytes, BytesMut}; - use futures::future; use http::{Method, Version}; - use tokio::runtime::current_thread; use tokio_io::{AsyncRead, AsyncWrite}; use super::*; diff --git a/src/server/http.rs b/src/server/http.rs index f54900fc3..3baf8a237 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -268,6 +268,7 @@ where mut self, lst: net::TcpListener, builder: SslAcceptorBuilder, ) -> io::Result { use super::{openssl_acceptor_with_flags, ServerFlags}; + use actix_net::service::NewServiceExt; let flags = if self.no_http2 { ServerFlags::HTTP1 @@ -283,6 +284,7 @@ where addr, scheme: "https", handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), move || ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()), DefaultPipelineFactory::new( self.factory.clone(), @@ -411,6 +413,7 @@ where S: net::ToSocketAddrs, { use super::{openssl_acceptor_with_flags, ServerFlags}; + use actix_net::service::NewServiceExt; let sockets = self.bind2(addr)?; @@ -431,6 +434,7 @@ where addr, scheme: "https", handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), move || ssl::OpensslAcceptor::new(accpt.clone()).map_err(|_| ()), DefaultPipelineFactory::new( self.factory.clone(), From d57579d70067e675ba47c09d52ac3bab4aa18edf Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 17:15:38 -0700 Subject: [PATCH 134/219] refactor acceptor pipeline add client timeout --- src/server/acceptor.rs | 315 +++++++++++++++++++++++++++++++++++++++++ src/server/builder.rs | 214 +++++++++++----------------- src/server/channel.rs | 2 +- src/server/http.rs | 51 ++++--- src/server/mod.rs | 4 + src/server/service.rs | 77 +++------- src/server/settings.rs | 31 ++-- 7 files changed, 474 insertions(+), 220 deletions(-) create mode 100644 src/server/acceptor.rs diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs new file mode 100644 index 000000000..d78474160 --- /dev/null +++ b/src/server/acceptor.rs @@ -0,0 +1,315 @@ +use std::time::Duration; + +use actix_net::server::ServerMessage; +use actix_net::service::{NewService, Service}; +use futures::future::{err, ok, Either, FutureResult}; +use futures::{Async, Future, Poll}; +use tokio_reactor::Handle; +use tokio_tcp::TcpStream; +use tokio_timer::{sleep, Delay}; + +use super::handler::HttpHandler; +use super::settings::WorkerSettings; +use super::IoStream; + +/// This trait indicates types that can create acceptor service for http server. +pub trait AcceptorServiceFactory: Send + Clone + 'static { + type Io: IoStream + Send; + type NewService: NewService< + Request = TcpStream, + Response = Self::Io, + Error = (), + InitError = (), + >; + + fn create(&self) -> Self::NewService; +} + +impl AcceptorServiceFactory for F +where + F: Fn() -> T + Send + Clone + 'static, + T::Response: IoStream + Send, + T: NewService, +{ + type Io = T::Response; + type NewService = T; + + fn create(&self) -> T { + (self)() + } +} + +#[derive(Clone)] +/// Default acceptor service convert `TcpStream` to a `tokio_tcp::TcpStream` +pub(crate) struct DefaultAcceptor; + +impl AcceptorServiceFactory for DefaultAcceptor { + type Io = TcpStream; + type NewService = DefaultAcceptor; + + fn create(&self) -> Self::NewService { + DefaultAcceptor + } +} + +impl NewService for DefaultAcceptor { + type Request = TcpStream; + type Response = TcpStream; + type Error = (); + type InitError = (); + type Service = DefaultAcceptor; + type Future = FutureResult; + + fn new_service(&self) -> Self::Future { + ok(DefaultAcceptor) + } +} + +impl Service for DefaultAcceptor { + type Request = TcpStream; + type Response = TcpStream; + type Error = (); + type Future = FutureResult; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + Ok(Async::Ready(())) + } + + fn call(&mut self, req: Self::Request) -> Self::Future { + ok(req) + } +} + +pub(crate) struct TcpAcceptor { + inner: T, + settings: WorkerSettings, +} + +impl TcpAcceptor +where + H: HttpHandler, + T: NewService, +{ + pub(crate) fn new(settings: WorkerSettings, inner: T) -> Self { + TcpAcceptor { inner, settings } + } +} + +impl NewService for TcpAcceptor +where + H: HttpHandler, + T: NewService, +{ + type Request = ServerMessage; + type Response = (); + type Error = (); + type InitError = (); + type Service = TcpAcceptorService; + type Future = TcpAcceptorResponse; + + fn new_service(&self) -> Self::Future { + TcpAcceptorResponse { + fut: self.inner.new_service(), + settings: self.settings.clone(), + } + } +} + +pub(crate) struct TcpAcceptorResponse +where + H: HttpHandler, + T: NewService, +{ + fut: T::Future, + settings: WorkerSettings, +} + +impl Future for TcpAcceptorResponse +where + H: HttpHandler, + T: NewService, +{ + type Item = TcpAcceptorService; + type Error = (); + + fn poll(&mut self) -> Poll { + match self.fut.poll() { + Err(_) => Err(()), + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(service)) => Ok(Async::Ready(TcpAcceptorService { + inner: service, + settings: self.settings.clone(), + })), + } + } +} + +pub(crate) struct TcpAcceptorService { + inner: T, + settings: WorkerSettings, +} + +impl Service for TcpAcceptorService +where + H: HttpHandler, + T: Service, +{ + type Request = ServerMessage; + type Response = (); + type Error = (); + type Future = Either, FutureResult<(), ()>>; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + self.inner.poll_ready().map_err(|_| ()) + } + + fn call(&mut self, req: Self::Request) -> Self::Future { + match req { + ServerMessage::Connect(stream) => { + let stream = + TcpStream::from_std(stream, &Handle::default()).map_err(|e| { + error!("Can not convert to an async tcp stream: {}", e); + }); + + if let Ok(stream) = stream { + Either::A(TcpAcceptorServiceFut { + fut: self.inner.call(stream), + }) + } else { + Either::B(err(())) + } + } + ServerMessage::Shutdown(timeout) => Either::B(ok(())), + ServerMessage::ForceShutdown => { + // self.settings.head().traverse::(); + Either::B(ok(())) + } + } + } +} + +pub(crate) struct TcpAcceptorServiceFut { + fut: T, +} + +impl Future for TcpAcceptorServiceFut +where + T: Future, +{ + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll { + match self.fut.poll() { + Err(_) => Err(()), + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(_)) => Ok(Async::Ready(())), + } + } +} + +/// Errors produced by `AcceptorTimeout` service. +#[derive(Debug)] +pub enum TimeoutError { + /// The inner service error + Service(T), + + /// The request did not complete within the specified timeout. + Timeout, +} + +/// Acceptor timeout middleware +/// +/// Applies timeout to request prcoessing. +pub(crate) struct AcceptorTimeout { + inner: T, + timeout: usize, +} + +impl AcceptorTimeout { + pub(crate) fn new(timeout: usize, inner: T) -> Self { + Self { inner, timeout } + } +} + +impl NewService for AcceptorTimeout { + type Request = T::Request; + type Response = T::Response; + type Error = TimeoutError; + type InitError = T::InitError; + type Service = AcceptorTimeoutService; + type Future = AcceptorTimeoutFut; + + fn new_service(&self) -> Self::Future { + AcceptorTimeoutFut { + fut: self.inner.new_service(), + timeout: self.timeout, + } + } +} + +#[doc(hidden)] +pub(crate) struct AcceptorTimeoutFut { + fut: T::Future, + timeout: usize, +} + +impl Future for AcceptorTimeoutFut { + type Item = AcceptorTimeoutService; + type Error = T::InitError; + + fn poll(&mut self) -> Poll { + let inner = try_ready!(self.fut.poll()); + Ok(Async::Ready(AcceptorTimeoutService { + inner, + timeout: self.timeout as u64, + })) + } +} + +/// Acceptor timeout service +/// +/// Applies timeout to request prcoessing. +pub(crate) struct AcceptorTimeoutService { + inner: T, + timeout: u64, +} + +impl Service for AcceptorTimeoutService { + type Request = T::Request; + type Response = T::Response; + type Error = TimeoutError; + type Future = AcceptorTimeoutResponse; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + self.inner.poll_ready().map_err(TimeoutError::Service) + } + + fn call(&mut self, req: Self::Request) -> Self::Future { + AcceptorTimeoutResponse { + fut: self.inner.call(req), + sleep: sleep(Duration::from_millis(self.timeout)), + } + } +} + +pub(crate) struct AcceptorTimeoutResponse { + fut: T::Future, + sleep: Delay, +} +impl Future for AcceptorTimeoutResponse { + type Item = T::Response; + type Error = TimeoutError; + + fn poll(&mut self) -> Poll { + match self.fut.poll() { + Ok(Async::NotReady) => match self.sleep.poll() { + Err(_) => Err(TimeoutError::Timeout), + Ok(Async::Ready(_)) => Err(TimeoutError::Timeout), + Ok(Async::NotReady) => Ok(Async::NotReady), + }, + Ok(Async::Ready(resp)) => Ok(Async::Ready(resp)), + Err(err) => Err(TimeoutError::Service(err)), + } + } +} diff --git a/src/server/builder.rs b/src/server/builder.rs index ad4124445..98a2d5023 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -1,21 +1,24 @@ use std::marker::PhantomData; use std::net; +use actix_net::either::Either; use actix_net::server; -use actix_net::service::{NewService, NewServiceExt, Service}; -use futures::future::{ok, FutureResult}; -use futures::{Async, Poll}; -use tokio_tcp::TcpStream; +use actix_net::service::{NewService, NewServiceExt}; -use super::handler::IntoHttpHandler; +use super::acceptor::{AcceptorServiceFactory, AcceptorTimeout, TcpAcceptor}; +use super::handler::{HttpHandler, IntoHttpHandler}; use super::service::HttpService; +use super::settings::{ServerSettings, WorkerSettings}; use super::{IoStream, KeepAlive}; pub(crate) trait ServiceFactory where H: IntoHttpHandler, { - fn register(&self, server: server::Server, lst: net::TcpListener) -> server::Server; + fn register( + &self, server: server::Server, lst: net::TcpListener, host: Option, + addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, + ) -> server::Server; } pub struct HttpServiceBuilder @@ -29,11 +32,12 @@ where impl HttpServiceBuilder where - F: Fn() -> H + Send + Clone, + F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler, A: AcceptorServiceFactory, - P: HttpPipelineFactory, + P: HttpPipelineFactory, { + /// Create http service builder pub fn new(factory: F, acceptor: A, pipeline: P) -> Self { Self { factory, @@ -42,6 +46,7 @@ where } } + /// Use different acceptor factory pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder where A1: AcceptorServiceFactory, @@ -53,9 +58,10 @@ where } } + /// Use different pipeline factory pub fn pipeline(self, pipeline: P1) -> HttpServiceBuilder where - P1: HttpPipelineFactory, + P1: HttpPipelineFactory, { HttpServiceBuilder { pipeline, @@ -64,18 +70,45 @@ where } } - fn finish(&self) -> impl server::StreamServiceFactory { + fn finish( + &self, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, + client_timeout: usize, + ) -> impl server::ServiceFactory { + let factory = self.factory.clone(); let pipeline = self.pipeline.clone(); let acceptor = self.acceptor.clone(); - move || acceptor.create().and_then(pipeline.create()) + move || { + let app = (factory)().into_handler(); + let settings = WorkerSettings::new( + app, + keep_alive, + client_timeout as u64, + ServerSettings::new(Some(addr), &host, false), + ); + + if client_timeout == 0 { + Either::A(TcpAcceptor::new( + settings.clone(), + acceptor.create().and_then(pipeline.create(settings)), + )) + } else { + Either::B(TcpAcceptor::new( + settings.clone(), + AcceptorTimeout::new(client_timeout, acceptor.create()) + .map_err(|_| ()) + .and_then(pipeline.create(settings)), + )) + } + } } } impl Clone for HttpServiceBuilder where F: Fn() -> H + Send + Clone, + H: IntoHttpHandler, A: AcceptorServiceFactory, - P: HttpPipelineFactory, + P: HttpPipelineFactory, { fn clone(&self) -> Self { HttpServiceBuilder { @@ -88,44 +121,24 @@ where impl ServiceFactory for HttpServiceBuilder where - F: Fn() -> H + Send + Clone, + F: Fn() -> H + Send + Clone + 'static, A: AcceptorServiceFactory, - P: HttpPipelineFactory, + P: HttpPipelineFactory, H: IntoHttpHandler, { - fn register(&self, server: server::Server, lst: net::TcpListener) -> server::Server { - server.listen("actix-web", lst, self.finish()) + fn register( + &self, server: server::Server, lst: net::TcpListener, host: Option, + addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, + ) -> server::Server { + server.listen2( + "actix-web", + lst, + self.finish(host, addr, keep_alive, client_timeout), + ) } } -/// This trait indicates types that can create acceptor service for http server. -pub trait AcceptorServiceFactory: Send + Clone + 'static { - type Io: IoStream + Send; - type NewService: NewService< - Request = TcpStream, - Response = Self::Io, - Error = (), - InitError = (), - >; - - fn create(&self) -> Self::NewService; -} - -impl AcceptorServiceFactory for F -where - F: Fn() -> T + Send + Clone + 'static, - T::Response: IoStream + Send, - T: NewService, -{ - type Io = T::Response; - type NewService = T; - - fn create(&self) -> T { - (self)() - } -} - -pub trait HttpPipelineFactory: Send + Clone + 'static { +pub trait HttpPipelineFactory: Send + Clone + 'static { type Io: IoStream; type NewService: NewService< Request = Self::Io, @@ -134,126 +147,59 @@ pub trait HttpPipelineFactory: Send + Clone + 'static { InitError = (), >; - fn create(&self) -> Self::NewService; + fn create(&self, settings: WorkerSettings) -> Self::NewService; } -impl HttpPipelineFactory for F +impl HttpPipelineFactory for F where - F: Fn() -> T + Send + Clone + 'static, + F: Fn(WorkerSettings) -> T + Send + Clone + 'static, T: NewService, T::Request: IoStream, + H: HttpHandler, { type Io = T::Request; type NewService = T; - fn create(&self) -> T { - (self)() + fn create(&self, settings: WorkerSettings) -> T { + (self)(settings) } } -pub(crate) struct DefaultPipelineFactory -where - F: Fn() -> H + Send + Clone, -{ - factory: F, - host: Option, - addr: net::SocketAddr, - keep_alive: KeepAlive, - _t: PhantomData, +pub(crate) struct DefaultPipelineFactory { + _t: PhantomData<(H, Io)>, } -impl DefaultPipelineFactory +unsafe impl Send for DefaultPipelineFactory {} + +impl DefaultPipelineFactory where Io: IoStream + Send, - F: Fn() -> H + Send + Clone + 'static, - H: IntoHttpHandler + 'static, + H: HttpHandler + 'static, { - pub fn new( - factory: F, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, - ) -> Self { - Self { - factory, - addr, - keep_alive, - host, - _t: PhantomData, - } + pub fn new() -> Self { + Self { _t: PhantomData } } } -impl Clone for DefaultPipelineFactory +impl Clone for DefaultPipelineFactory where Io: IoStream, - F: Fn() -> H + Send + Clone, - H: IntoHttpHandler, + H: HttpHandler, { fn clone(&self) -> Self { - Self { - factory: self.factory.clone(), - addr: self.addr, - keep_alive: self.keep_alive, - host: self.host.clone(), - _t: PhantomData, - } + Self { _t: PhantomData } } } -impl HttpPipelineFactory for DefaultPipelineFactory +impl HttpPipelineFactory for DefaultPipelineFactory where - Io: IoStream + Send, - F: Fn() -> H + Send + Clone + 'static, - H: IntoHttpHandler + 'static, + Io: IoStream, + H: HttpHandler + 'static, { type Io = Io; - type NewService = HttpService; + type NewService = HttpService; - fn create(&self) -> Self::NewService { - HttpService::new( - self.factory.clone(), - self.addr, - self.host.clone(), - self.keep_alive, - ) - } -} - -#[derive(Clone)] -/// Default acceptor service convert `TcpStream` to a `tokio_tcp::TcpStream` -pub(crate) struct DefaultAcceptor; - -impl AcceptorServiceFactory for DefaultAcceptor { - type Io = TcpStream; - type NewService = DefaultAcceptor; - - fn create(&self) -> Self::NewService { - DefaultAcceptor - } -} - -impl NewService for DefaultAcceptor { - type Request = TcpStream; - type Response = TcpStream; - type Error = (); - type InitError = (); - type Service = DefaultAcceptor; - type Future = FutureResult; - - fn new_service(&self) -> Self::Future { - ok(DefaultAcceptor) - } -} - -impl Service for DefaultAcceptor { - type Request = TcpStream; - type Response = TcpStream; - type Error = (); - type Future = FutureResult; - - fn poll_ready(&mut self) -> Poll<(), Self::Error> { - Ok(Async::Ready(())) - } - - fn call(&mut self, req: Self::Request) -> Self::Future { - ok(req) + fn create(&self, settings: WorkerSettings) -> Self::NewService { + HttpService::new(settings) } } diff --git a/src/server/channel.rs b/src/server/channel.rs index 6d0992bc9..c1e6b6b24 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -41,7 +41,7 @@ where pub(crate) fn new( settings: WorkerSettings, io: T, peer: Option, ) -> HttpChannel { - let ka_timeout = settings.keep_alive_timer(); + let ka_timeout = settings.client_timer(); HttpChannel { ka_timeout, diff --git a/src/server/http.rs b/src/server/http.rs index 3baf8a237..0fe14221e 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -18,8 +18,9 @@ use openssl::ssl::SslAcceptorBuilder; //#[cfg(feature = "rust-tls")] //use rustls::ServerConfig; -use super::builder::{AcceptorServiceFactory, HttpServiceBuilder, ServiceFactory}; -use super::builder::{DefaultAcceptor, DefaultPipelineFactory}; +use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; +use super::builder::DefaultPipelineFactory; +use super::builder::{HttpServiceBuilder, ServiceFactory}; use super::{IntoHttpHandler, IoStream, KeepAlive}; struct Socket { @@ -50,6 +51,7 @@ where no_signals: bool, maxconn: usize, maxconnrate: usize, + client_timeout: usize, sockets: Vec>, } @@ -72,6 +74,7 @@ where no_signals: false, maxconn: 25_600, maxconnrate: 256, + client_timeout: 5000, sockets: Vec::new(), } } @@ -130,6 +133,20 @@ where self } + /// Set server client timneout in milliseconds for first request. + /// + /// Defines a timeout for reading client request header. If a client does not transmit + /// the entire set headers within this time, the request is terminated with + /// the 408 (Request Time-out) error. + /// + /// To disable timeout set value to 0. + /// + /// By default client timeout is set to 5000 milliseconds. + pub fn client_timeout(mut self, val: usize) -> Self { + self.client_timeout = val; + self + } + /// Set server host name. /// /// Host name is used by application router aa a hostname for url @@ -205,12 +222,7 @@ where handler: Box::new(HttpServiceBuilder::new( self.factory.clone(), DefaultAcceptor, - DefaultPipelineFactory::new( - self.factory.clone(), - self.host.clone(), - addr, - self.keep_alive, - ), + DefaultPipelineFactory::new(), )), }); @@ -237,12 +249,7 @@ where handler: Box::new(HttpServiceBuilder::new( self.factory.clone(), acceptor, - DefaultPipelineFactory::new( - self.factory.clone(), - self.host.clone(), - addr, - self.keep_alive, - ), + DefaultPipelineFactory::new(), )), }); @@ -347,12 +354,7 @@ where handler: Box::new(HttpServiceBuilder::new( self.factory.clone(), acceptor.clone(), - DefaultPipelineFactory::new( - self.factory.clone(), - self.host.clone(), - addr, - self.keep_alive, - ), + DefaultPipelineFactory::new(), )), }); } @@ -513,7 +515,14 @@ impl H + Send + Clone> HttpServer { let sockets = mem::replace(&mut self.sockets, Vec::new()); for socket in sockets { - srv = socket.handler.register(srv, socket.lst); + srv = socket.handler.register( + srv, + socket.lst, + self.host.clone(), + socket.addr, + self.keep_alive.clone(), + self.client_timeout, + ); } srv.start() } diff --git a/src/server/mod.rs b/src/server/mod.rs index ac4ffc9af..9e91eda08 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -117,6 +117,7 @@ use tokio_tcp::TcpStream; pub use actix_net::server::{PauseServer, ResumeServer, StopServer}; +pub(crate) mod acceptor; pub(crate) mod builder; mod channel; mod error; @@ -144,6 +145,9 @@ pub use self::ssl::*; #[doc(hidden)] pub use self::helpers::write_content_length; +#[doc(hidden)] +pub use self::builder::HttpServiceBuilder; + use body::Binary; use extensions::Extensions; use header::ContentEncoding; diff --git a/src/server/service.rs b/src/server/service.rs index 6f80cd6df..042c86ed4 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -1,75 +1,50 @@ use std::marker::PhantomData; -use std::net; -use std::time::Duration; use actix_net::service::{NewService, Service}; use futures::future::{ok, FutureResult}; use futures::{Async, Poll}; use super::channel::HttpChannel; -use super::handler::{HttpHandler, IntoHttpHandler}; -use super::settings::{ServerSettings, WorkerSettings}; -use super::{IoStream, KeepAlive}; +use super::handler::HttpHandler; +use super::settings::WorkerSettings; +use super::IoStream; -pub enum HttpServiceMessage { - /// New stream - Connect(T), - /// Gracefull shutdown - Shutdown(Duration), - /// Force shutdown - ForceShutdown, -} - -pub(crate) struct HttpService +pub(crate) struct HttpService where - F: Fn() -> H, - H: IntoHttpHandler, + H: HttpHandler, Io: IoStream, { - factory: F, - addr: net::SocketAddr, - host: Option, - keep_alive: KeepAlive, + settings: WorkerSettings, _t: PhantomData, } -impl HttpService +impl HttpService where - F: Fn() -> H, - H: IntoHttpHandler, + H: HttpHandler, Io: IoStream, { - pub fn new( - factory: F, addr: net::SocketAddr, host: Option, keep_alive: KeepAlive, - ) -> Self { + pub fn new(settings: WorkerSettings) -> Self { HttpService { - factory, - addr, - host, - keep_alive, + settings, _t: PhantomData, } } } -impl NewService for HttpService +impl NewService for HttpService where - F: Fn() -> H, - H: IntoHttpHandler, + H: HttpHandler, Io: IoStream, { type Request = Io; type Response = (); type Error = (); type InitError = (); - type Service = HttpServiceHandler; + type Service = HttpServiceHandler; type Future = FutureResult; fn new_service(&self) -> Self::Future { - let s = ServerSettings::new(Some(self.addr), &self.host, false); - let app = (self.factory)().into_handler(); - - ok(HttpServiceHandler::new(app, self.keep_alive, s)) + ok(HttpServiceHandler::new(self.settings.clone())) } } @@ -79,7 +54,7 @@ where Io: IoStream, { settings: WorkerSettings, - tcp_ka: Option, + // tcp_ka: Option, _t: PhantomData, } @@ -88,18 +63,14 @@ where H: HttpHandler, Io: IoStream, { - fn new( - app: H, keep_alive: KeepAlive, settings: ServerSettings, - ) -> HttpServiceHandler { - let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { - Some(Duration::new(val as u64, 0)) - } else { - None - }; - let settings = WorkerSettings::new(app, keep_alive, settings); + fn new(settings: WorkerSettings) -> HttpServiceHandler { + // let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { + // Some(Duration::new(val as u64, 0)) + // } else { + // None + // }; HttpServiceHandler { - tcp_ka, settings, _t: PhantomData, } @@ -124,10 +95,4 @@ where let _ = req.set_nodelay(true); HttpChannel::new(self.settings.clone(), req, None) } - - // fn shutdown(&self, force: bool) { - // if force { - // self.settings.head().traverse::(); - // } - // } } diff --git a/src/server/settings.rs b/src/server/settings.rs index 21ce27195..fe564c5b9 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -133,11 +133,12 @@ impl ServerSettings { // "Sun, 06 Nov 1994 08:49:37 GMT".len() const DATE_VALUE_LENGTH: usize = 29; -pub(crate) struct WorkerSettings(Rc>); +pub struct WorkerSettings(Rc>); struct Inner { handler: H, keep_alive: u64, + client_timeout: u64, ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, @@ -153,7 +154,7 @@ impl Clone for WorkerSettings { impl WorkerSettings { pub(crate) fn new( - handler: H, keep_alive: KeepAlive, settings: ServerSettings, + handler: H, keep_alive: KeepAlive, client_timeout: u64, settings: ServerSettings, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -165,6 +166,7 @@ impl WorkerSettings { handler, keep_alive, ka_enabled, + client_timeout, bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), node: RefCell::new(Node::head()), @@ -172,14 +174,15 @@ impl WorkerSettings { })) } - pub fn head(&self) -> RefMut> { + pub(crate) fn head(&self) -> RefMut> { self.0.node.borrow_mut() } - pub fn handler(&self) -> &H { + pub(crate) fn handler(&self) -> &H { &self.0.handler } + #[inline] pub fn keep_alive_timer(&self) -> Option { let ka = self.0.keep_alive; if ka != 0 { @@ -189,23 +192,35 @@ impl WorkerSettings { } } + #[inline] pub fn keep_alive(&self) -> u64 { self.0.keep_alive } + #[inline] pub fn keep_alive_enabled(&self) -> bool { self.0.ka_enabled } - pub fn get_bytes(&self) -> BytesMut { + #[inline] + pub fn client_timer(&self) -> Option { + let delay = self.0.client_timeout; + if delay != 0 { + Some(Delay::new(Instant::now() + Duration::from_millis(delay))) + } else { + None + } + } + + pub(crate) fn get_bytes(&self) -> BytesMut { self.0.bytes.get_bytes() } - pub fn release_bytes(&self, bytes: BytesMut) { + pub(crate) fn release_bytes(&self, bytes: BytesMut) { self.0.bytes.release_bytes(bytes) } - pub fn get_request(&self) -> Request { + pub(crate) fn get_request(&self) -> Request { RequestPool::get(self.0.messages) } @@ -216,7 +231,7 @@ impl WorkerSettings { } impl WorkerSettings { - pub fn set_date(&self, dst: &mut BytesMut, full: bool) { + pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) { // Unsafe: WorkerSetting is !Sync and !Send let date_bytes = unsafe { let date = &mut (*self.0.date.get()); From 85445ea8096e8e1edf018241cfb300c51ef19628 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 17:21:28 -0700 Subject: [PATCH 135/219] rename and simplify ServiceFactory trait --- src/server/builder.rs | 19 ++++++++----------- src/server/h1.rs | 1 + src/server/http.rs | 8 ++++---- src/server/settings.rs | 8 ++++++-- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/server/builder.rs b/src/server/builder.rs index 98a2d5023..5af9d0c8f 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -2,7 +2,7 @@ use std::marker::PhantomData; use std::net; use actix_net::either::Either; -use actix_net::server; +use actix_net::server::{Server, ServiceFactory}; use actix_net::service::{NewService, NewServiceExt}; use super::acceptor::{AcceptorServiceFactory, AcceptorTimeout, TcpAcceptor}; @@ -11,14 +11,11 @@ use super::service::HttpService; use super::settings::{ServerSettings, WorkerSettings}; use super::{IoStream, KeepAlive}; -pub(crate) trait ServiceFactory -where - H: IntoHttpHandler, -{ +pub(crate) trait ServiceProvider { fn register( - &self, server: server::Server, lst: net::TcpListener, host: Option, + &self, server: Server, lst: net::TcpListener, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, - ) -> server::Server; + ) -> Server; } pub struct HttpServiceBuilder @@ -73,7 +70,7 @@ where fn finish( &self, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, - ) -> impl server::ServiceFactory { + ) -> impl ServiceFactory { let factory = self.factory.clone(); let pipeline = self.pipeline.clone(); let acceptor = self.acceptor.clone(); @@ -119,7 +116,7 @@ where } } -impl ServiceFactory for HttpServiceBuilder +impl ServiceProvider for HttpServiceBuilder where F: Fn() -> H + Send + Clone + 'static, A: AcceptorServiceFactory, @@ -127,9 +124,9 @@ where H: IntoHttpHandler, { fn register( - &self, server: server::Server, lst: net::TcpListener, host: Option, + &self, server: Server, lst: net::TcpListener, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, - ) -> server::Server { + ) -> Server { server.listen2( "actix-web", lst, diff --git a/src/server/h1.rs b/src/server/h1.rs index b6b576ed7..b5ee93e66 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -528,6 +528,7 @@ mod tests { WorkerSettings::::new( App::new().into_handler(), KeepAlive::Os, + 5000, ServerSettings::default(), ) } diff --git a/src/server/http.rs b/src/server/http.rs index 0fe14221e..49ae4f28c 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -20,14 +20,14 @@ use openssl::ssl::SslAcceptorBuilder; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; use super::builder::DefaultPipelineFactory; -use super::builder::{HttpServiceBuilder, ServiceFactory}; +use super::builder::{HttpServiceBuilder, ServiceProvider}; use super::{IntoHttpHandler, IoStream, KeepAlive}; -struct Socket { +struct Socket { scheme: &'static str, lst: net::TcpListener, addr: net::SocketAddr, - handler: Box>, + handler: Box, } /// An HTTP Server @@ -52,7 +52,7 @@ where maxconn: usize, maxconnrate: usize, client_timeout: usize, - sockets: Vec>, + sockets: Vec, } impl HttpServer diff --git a/src/server/settings.rs b/src/server/settings.rs index fe564c5b9..db5f6c57b 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -330,8 +330,12 @@ mod tests { let mut rt = current_thread::Runtime::new().unwrap(); let _ = rt.block_on(future::lazy(|| { - let settings = - WorkerSettings::<()>::new((), KeepAlive::Os, ServerSettings::default()); + let settings = WorkerSettings::<()>::new( + (), + KeepAlive::Os, + 0, + ServerSettings::default(), + ); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); settings.set_date(&mut buf1, true); let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); From 3173c9fa830b71999424028bed4ccd4e19680cb4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 19:34:07 -0700 Subject: [PATCH 136/219] diesable client timeout for tcp stream acceptor --- src/server/builder.rs | 22 +++++++++++++++++++--- src/server/http.rs | 12 +++++++----- 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/src/server/builder.rs b/src/server/builder.rs index 5af9d0c8f..28541820b 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -18,6 +18,7 @@ pub(crate) trait ServiceProvider { ) -> Server; } +/// Utility type that builds complete http pipeline pub struct HttpServiceBuilder where F: Fn() -> H + Send + Clone, @@ -25,6 +26,7 @@ where factory: F, acceptor: A, pipeline: P, + no_client_timer: bool, } impl HttpServiceBuilder @@ -40,9 +42,15 @@ where factory, pipeline, acceptor, + no_client_timer: false, } } + pub(crate) fn no_client_timer(mut self) -> Self { + self.no_client_timer = true; + self + } + /// Use different acceptor factory pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder where @@ -52,6 +60,7 @@ where acceptor, pipeline: self.pipeline, factory: self.factory.clone(), + no_client_timer: self.no_client_timer, } } @@ -64,6 +73,7 @@ where pipeline, acceptor: self.acceptor, factory: self.factory.clone(), + no_client_timer: self.no_client_timer, } } @@ -71,6 +81,11 @@ where &self, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, ) -> impl ServiceFactory { + let timeout = if self.no_client_timer { + 0 + } else { + client_timeout + }; let factory = self.factory.clone(); let pipeline = self.pipeline.clone(); let acceptor = self.acceptor.clone(); @@ -79,11 +94,11 @@ where let settings = WorkerSettings::new( app, keep_alive, - client_timeout as u64, + timeout as u64, ServerSettings::new(Some(addr), &host, false), ); - if client_timeout == 0 { + if timeout == 0 { Either::A(TcpAcceptor::new( settings.clone(), acceptor.create().and_then(pipeline.create(settings)), @@ -91,7 +106,7 @@ where } else { Either::B(TcpAcceptor::new( settings.clone(), - AcceptorTimeout::new(client_timeout, acceptor.create()) + AcceptorTimeout::new(timeout, acceptor.create()) .map_err(|_| ()) .and_then(pipeline.create(settings)), )) @@ -112,6 +127,7 @@ where factory: self.factory.clone(), acceptor: self.acceptor.clone(), pipeline: self.pipeline.clone(), + no_client_timer: self.no_client_timer, } } } diff --git a/src/server/http.rs b/src/server/http.rs index 49ae4f28c..6d37473c3 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -219,11 +219,13 @@ where lst, addr, scheme: "http", - handler: Box::new(HttpServiceBuilder::new( - self.factory.clone(), - DefaultAcceptor, - DefaultPipelineFactory::new(), - )), + handler: Box::new( + HttpServiceBuilder::new( + self.factory.clone(), + DefaultAcceptor, + DefaultPipelineFactory::new(), + ).no_client_timer(), + ), }); self From 0bca21ec6dd4205e5476b7eaf2c282a22f063300 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 19:57:40 -0700 Subject: [PATCH 137/219] fix ssl tests --- src/server/http.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/server/http.rs b/src/server/http.rs index 6d37473c3..263fd40a0 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -295,12 +295,7 @@ where handler: Box::new(HttpServiceBuilder::new( self.factory.clone(), move || ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()), - DefaultPipelineFactory::new( - self.factory.clone(), - self.host.clone(), - addr, - self.keep_alive, - ), + DefaultPipelineFactory::new(), )), }); @@ -440,12 +435,7 @@ where handler: Box::new(HttpServiceBuilder::new( self.factory.clone(), move || ssl::OpensslAcceptor::new(accpt.clone()).map_err(|_| ()), - DefaultPipelineFactory::new( - self.factory.clone(), - self.host.clone(), - addr, - self.keep_alive, - ), + DefaultPipelineFactory::new(), )), }); } From ecfda64f6d5b433e8ba11c918c579bac755b6927 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 20:39:37 -0700 Subject: [PATCH 138/219] add native-tls support --- .travis.yml | 4 +- Cargo.toml | 4 +- src/server/http.rs | 35 ++++++---- src/server/ssl/mod.rs | 6 +- src/server/ssl/nativetls.rs | 123 +----------------------------------- 5 files changed, 31 insertions(+), 141 deletions(-) diff --git a/.travis.yml b/.travis.yml index e2d70678e..497f7bbc2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,12 +32,12 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then cargo clean - cargo test --features="ssl" -- --nocapture + cargo test --features="ssl,tls" -- --nocapture fi - | if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="ssl" --out Xml --no-count + cargo tarpaulin --features="ssl,tls" --out Xml --no-count bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi diff --git a/Cargo.toml b/Cargo.toml index e17b72838..205e178b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ path = "src/lib.rs" default = ["session", "brotli", "flate2-c"] # tls -tls = ["native-tls", "tokio-tls"] +tls = ["native-tls", "tokio-tls", "actix-net/tls"] # openssl ssl = ["openssl", "tokio-openssl", "actix-net/ssl"] @@ -41,7 +41,7 @@ ssl = ["openssl", "tokio-openssl", "actix-net/ssl"] alpn = ["openssl", "tokio-openssl", "actix-net/ssl"] # rustls -rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"] +rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots", "actix-net/rust-tls"] # unix sockets uds = ["tokio-uds"] diff --git a/src/server/http.rs b/src/server/http.rs index 263fd40a0..1cc899816 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -9,8 +9,8 @@ use net2::TcpBuilder; use num_cpus; use tokio_tcp::TcpStream; -//#[cfg(feature = "tls")] -//use native_tls::TlsAcceptor; +#[cfg(feature = "tls")] +use native_tls::TlsAcceptor; #[cfg(any(feature = "alpn", feature = "ssl"))] use openssl::ssl::SslAcceptorBuilder; @@ -258,16 +258,27 @@ where self } - // #[cfg(feature = "tls")] - // /// Use listener for accepting incoming tls connection requests - // /// - // /// HttpServer does not change any configuration for TcpListener, - // /// it needs to be configured before passing it to listen() method. - // pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { - // use super::NativeTlsAcceptor; - // - // self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) - // } + #[cfg(feature = "tls")] + /// Use listener for accepting incoming tls connection requests + /// + /// HttpServer does not change any configuration for TcpListener, + /// it needs to be configured before passing it to listen() method. + pub fn listen_tls(mut self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { + use actix_net::service::NewServiceExt; + + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + lst, + addr, + scheme: "https", + handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), + move || ssl::NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ()), + DefaultPipelineFactory::new(), + )), + }); + self + } #[cfg(any(feature = "alpn", feature = "ssl"))] /// Use listener for accepting incoming tls connection requests diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs index 7101de78a..7302cf0b4 100644 --- a/src/server/ssl/mod.rs +++ b/src/server/ssl/mod.rs @@ -3,10 +3,8 @@ mod openssl; #[cfg(any(feature = "alpn", feature = "ssl"))] pub use self::openssl::*; -//#[cfg(feature = "tls")] -//mod nativetls; -//#[cfg(feature = "tls")] -//pub use self::nativetls::{NativeTlsAcceptor, TlsStream}; +#[cfg(feature = "tls")] +mod nativetls; //#[cfg(feature = "rust-tls")] //mod rustls; diff --git a/src/server/ssl/nativetls.rs b/src/server/ssl/nativetls.rs index e35f12d2d..d59948c79 100644 --- a/src/server/ssl/nativetls.rs +++ b/src/server/ssl/nativetls.rs @@ -1,61 +1,9 @@ use std::net::Shutdown; use std::{io, time}; -use futures::{Async, Future, Poll}; -use native_tls::{self, HandshakeError, TlsAcceptor}; -use tokio_io::{AsyncRead, AsyncWrite}; +use actix_net::ssl::TlsStream; -use server::{AcceptorService, IoStream}; - -#[derive(Clone)] -/// Support `SSL` connections via native-tls package -/// -/// `tls` feature enables `NativeTlsAcceptor` type -pub struct NativeTlsAcceptor { - acceptor: TlsAcceptor, -} - -/// A wrapper around an underlying raw stream which implements the TLS or SSL -/// protocol. -/// -/// A `TlsStream` represents a handshake that has been completed successfully -/// and both the server and the client are ready for receiving and sending -/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written -/// to a `TlsStream` are encrypted when passing through to `S`. -#[derive(Debug)] -pub struct TlsStream { - inner: native_tls::TlsStream, -} - -/// Future returned from `NativeTlsAcceptor::accept` which will resolve -/// once the accept handshake has finished. -pub struct Accept { - inner: Option, HandshakeError>>, -} - -impl NativeTlsAcceptor { - /// Create `NativeTlsAcceptor` instance - pub fn new(acceptor: TlsAcceptor) -> Self { - NativeTlsAcceptor { - acceptor: acceptor.into(), - } - } -} - -impl AcceptorService for NativeTlsAcceptor { - type Accepted = TlsStream; - type Future = Accept; - - fn scheme(&self) -> &'static str { - "https" - } - - fn accept(&self, io: Io) -> Self::Future { - Accept { - inner: Some(self.acceptor.accept(io)), - } - } -} +use server::IoStream; impl IoStream for TlsStream { #[inline] @@ -74,70 +22,3 @@ impl IoStream for TlsStream { self.get_mut().get_mut().set_linger(dur) } } - -impl Future for Accept { - type Item = TlsStream; - type Error = io::Error; - - fn poll(&mut self) -> Poll { - match self.inner.take().expect("cannot poll MidHandshake twice") { - Ok(stream) => Ok(TlsStream { inner: stream }.into()), - Err(HandshakeError::Failure(e)) => { - Err(io::Error::new(io::ErrorKind::Other, e)) - } - Err(HandshakeError::WouldBlock(s)) => match s.handshake() { - Ok(stream) => Ok(TlsStream { inner: stream }.into()), - Err(HandshakeError::Failure(e)) => { - Err(io::Error::new(io::ErrorKind::Other, e)) - } - Err(HandshakeError::WouldBlock(s)) => { - self.inner = Some(Err(HandshakeError::WouldBlock(s))); - Ok(Async::NotReady) - } - }, - } - } -} - -impl TlsStream { - /// Get access to the internal `native_tls::TlsStream` stream which also - /// transitively allows access to `S`. - pub fn get_ref(&self) -> &native_tls::TlsStream { - &self.inner - } - - /// Get mutable access to the internal `native_tls::TlsStream` stream which - /// also transitively allows mutable access to `S`. - pub fn get_mut(&mut self) -> &mut native_tls::TlsStream { - &mut self.inner - } -} - -impl io::Read for TlsStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.inner.read(buf) - } -} - -impl io::Write for TlsStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.inner.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.inner.flush() - } -} - -impl AsyncRead for TlsStream {} - -impl AsyncWrite for TlsStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - match self.inner.shutdown() { - Ok(_) => (), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (), - Err(e) => return Err(e), - } - self.inner.get_mut().shutdown() - } -} From 1ff86e5ac4f2378295b1d1880c3ec759b1d4b8cc Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 21:24:21 -0700 Subject: [PATCH 139/219] restore rust-tls support --- .travis.yml | 6 ++--- src/server/http.rs | 50 ++++++++++++++++++++++++++-------------- src/server/ssl/mod.rs | 8 +++---- src/server/ssl/rustls.rs | 43 ++++++++++------------------------ src/test.rs | 4 +--- 5 files changed, 53 insertions(+), 58 deletions(-) diff --git a/.travis.yml b/.travis.yml index 497f7bbc2..0023965da 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,12 +32,12 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then cargo clean - cargo test --features="ssl,tls" -- --nocapture + cargo test --features="ssl,tls,rust-tls" -- --nocapture fi - | if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="ssl,tls" --out Xml --no-count + cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml --no-count bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi @@ -46,7 +46,7 @@ script: after_success: - | if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then - cargo doc --features "ssl,session" --no-deps && + cargo doc --features "ssl,tls,rust-tls,session" --no-deps && echo "" > target/doc/index.html && git clone https://github.com/davisp/ghp-import.git && ./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc && diff --git a/src/server/http.rs b/src/server/http.rs index 1cc899816..6432f18fc 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -15,8 +15,8 @@ use native_tls::TlsAcceptor; #[cfg(any(feature = "alpn", feature = "ssl"))] use openssl::ssl::SslAcceptorBuilder; -//#[cfg(feature = "rust-tls")] -//use rustls::ServerConfig; +#[cfg(feature = "rust-tls")] +use rustls::ServerConfig; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; use super::builder::DefaultPipelineFactory; @@ -313,22 +313,38 @@ where Ok(self) } - // #[cfg(feature = "rust-tls")] - // /// Use listener for accepting incoming tls connection requests - // /// - // /// This method sets alpn protocols to "h2" and "http/1.1" - // pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self { - // use super::{RustlsAcceptor, ServerFlags}; + #[cfg(feature = "rust-tls")] + /// Use listener for accepting incoming tls connection requests + /// + /// This method sets alpn protocols to "h2" and "http/1.1" + pub fn listen_rustls(mut self, lst: net::TcpListener, config: ServerConfig) -> Self { + use super::{RustlsAcceptor, ServerFlags}; + use actix_net::service::NewServiceExt; - // // alpn support - // let flags = if self.no_http2 { - // ServerFlags::HTTP1 - // } else { - // ServerFlags::HTTP1 | ServerFlags::HTTP2 - // }; - // - // self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags)) - // } + // alpn support + let flags = if self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; + + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + lst, + addr, + scheme: "https", + handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), + move || { + RustlsAcceptor::with_flags(config.clone(), flags).map_err(|_| ()) + }, + DefaultPipelineFactory::new(), + )), + }); + + //Ok(self) + self + } /// The socket address to bind /// diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs index 7302cf0b4..1d6b55b10 100644 --- a/src/server/ssl/mod.rs +++ b/src/server/ssl/mod.rs @@ -6,7 +6,7 @@ pub use self::openssl::*; #[cfg(feature = "tls")] mod nativetls; -//#[cfg(feature = "rust-tls")] -//mod rustls; -//#[cfg(feature = "rust-tls")] -//pub use self::rustls::RustlsAcceptor; +#[cfg(feature = "rust-tls")] +mod rustls; +#[cfg(feature = "rust-tls")] +pub use self::rustls::RustlsAcceptor; diff --git a/src/server/ssl/rustls.rs b/src/server/ssl/rustls.rs index 6ad0a7b2b..c74b62ea4 100644 --- a/src/server/ssl/rustls.rs +++ b/src/server/ssl/rustls.rs @@ -1,29 +1,25 @@ use std::net::Shutdown; -use std::sync::Arc; use std::{io, time}; +use actix_net::ssl; //::RustlsAcceptor; use rustls::{ClientSession, ServerConfig, ServerSession}; -use tokio_io::AsyncWrite; -use tokio_rustls::{AcceptAsync, ServerConfigExt, TlsStream}; +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_rustls::TlsStream; -use server::{AcceptorService, IoStream, ServerFlags}; +use server::{IoStream, ServerFlags}; -#[derive(Clone)] /// Support `SSL` connections via rustls package /// /// `rust-tls` feature enables `RustlsAcceptor` type -pub struct RustlsAcceptor { - config: Arc, +pub struct RustlsAcceptor { + _t: ssl::RustlsAcceptor, } -impl RustlsAcceptor { - /// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. - pub fn new(config: ServerConfig) -> Self { - RustlsAcceptor::with_flags(config, ServerFlags::HTTP1 | ServerFlags::HTTP2) - } - - /// Create `OpensslAcceptor` with custom server flags. - pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self { +impl RustlsAcceptor { + /// Create `RustlsAcceptor` with custom server flags. + pub fn with_flags( + mut config: ServerConfig, flags: ServerFlags, + ) -> ssl::RustlsAcceptor { let mut protos = Vec::new(); if flags.contains(ServerFlags::HTTP2) { protos.push("h2".to_string()); @@ -35,22 +31,7 @@ impl RustlsAcceptor { config.set_protocols(&protos); } - RustlsAcceptor { - config: Arc::new(config), - } - } -} - -impl AcceptorService for RustlsAcceptor { - type Accepted = TlsStream; - type Future = AcceptAsync; - - fn scheme(&self) -> &'static str { - "https" - } - - fn accept(&self, io: Io) -> Self::Future { - ServerConfigExt::accept_async(&self.config, io) + ssl::RustlsAcceptor::new(config) } } diff --git a/src/test.rs b/src/test.rs index b9d64f270..83b0b83b7 100644 --- a/src/test.rs +++ b/src/test.rs @@ -19,8 +19,6 @@ use openssl::ssl::SslAcceptorBuilder; use rustls::ServerConfig; #[cfg(feature = "alpn")] use server::OpensslAcceptor; -#[cfg(feature = "rust-tls")] -use server::RustlsAcceptor; use application::{App, HttpApplication}; use body::Binary; @@ -350,7 +348,7 @@ where let ssl = self.rust_ssl.take(); if let Some(ssl) = ssl { let tcp = net::TcpListener::bind(addr).unwrap(); - srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl)); + srv = srv.listen_rustls(tcp, ssl); } } if !has_ssl { From d0fc9d7b99961cbbcd8dc389292abdaf46337fcb Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 21:55:44 -0700 Subject: [PATCH 140/219] simplify listen_ and bind_ methods --- src/server/http.rs | 157 +++++++++++++++------------------------------ 1 file changed, 52 insertions(+), 105 deletions(-) diff --git a/src/server/http.rs b/src/server/http.rs index 6432f18fc..22537cb86 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,13 +1,11 @@ use std::{io, mem, net}; use actix::{Addr, System}; -use actix_net::server; -use actix_net::service::NewService; +use actix_net::server::Server; use actix_net::ssl; use net2::TcpBuilder; use num_cpus; -use tokio_tcp::TcpStream; #[cfg(feature = "tls")] use native_tls::TlsAcceptor; @@ -21,7 +19,7 @@ use rustls::ServerConfig; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; use super::builder::DefaultPipelineFactory; use super::builder::{HttpServiceBuilder, ServiceProvider}; -use super::{IntoHttpHandler, IoStream, KeepAlive}; +use super::{IntoHttpHandler, KeepAlive}; struct Socket { scheme: &'static str, @@ -233,15 +231,9 @@ where #[doc(hidden)] /// Use listener for accepting incoming connection requests - pub(crate) fn listen_with( - mut self, lst: net::TcpListener, acceptor: A, - ) -> Self + pub(crate) fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self where - A: AcceptorServiceFactory, - T: NewService - + Clone - + 'static, - Io: IoStream + Send, + A: AcceptorServiceFactory, { let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { @@ -266,18 +258,9 @@ where pub fn listen_tls(mut self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { use actix_net::service::NewServiceExt; - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - lst, - addr, - scheme: "https", - handler: Box::new(HttpServiceBuilder::new( - self.factory.clone(), - move || ssl::NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ()), - DefaultPipelineFactory::new(), - )), - }); - self + self.listen_with(lst, move || { + ssl::NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ()) + }) } #[cfg(any(feature = "alpn", feature = "ssl"))] @@ -285,7 +268,7 @@ where /// /// This method sets alpn protocols to "h2" and "http/1.1" pub fn listen_ssl( - mut self, lst: net::TcpListener, builder: SslAcceptorBuilder, + self, lst: net::TcpListener, builder: SslAcceptorBuilder, ) -> io::Result { use super::{openssl_acceptor_with_flags, ServerFlags}; use actix_net::service::NewServiceExt; @@ -297,20 +280,9 @@ where }; let acceptor = openssl_acceptor_with_flags(builder, flags)?; - - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - lst, - addr, - scheme: "https", - handler: Box::new(HttpServiceBuilder::new( - self.factory.clone(), - move || ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()), - DefaultPipelineFactory::new(), - )), - }); - - Ok(self) + Ok(self.listen_with(lst, move || { + ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()) + })) } #[cfg(feature = "rust-tls")] @@ -328,22 +300,9 @@ where ServerFlags::HTTP1 | ServerFlags::HTTP2 }; - let addr = lst.local_addr().unwrap(); - self.sockets.push(Socket { - lst, - addr, - scheme: "https", - handler: Box::new(HttpServiceBuilder::new( - self.factory.clone(), - move || { - RustlsAcceptor::with_flags(config.clone(), flags).map_err(|_| ()) - }, - DefaultPipelineFactory::new(), - )), - }); - - //Ok(self) - self + self.listen_with(lst, move || { + RustlsAcceptor::with_flags(config.clone(), flags).map_err(|_| ()) + }) } /// The socket address to bind @@ -416,33 +375,32 @@ where } } - // #[cfg(feature = "tls")] - // /// The ssl socket address to bind - // /// - // /// To bind multiple addresses this method can be called multiple times. - // pub fn bind_tls( - // self, addr: S, acceptor: TlsAcceptor, - // ) -> io::Result { - // use super::NativeTlsAcceptor; + #[cfg(feature = "tls")] + /// The ssl socket address to bind + /// + /// To bind multiple addresses this method can be called multiple times. + pub fn bind_tls( + self, addr: S, acceptor: TlsAcceptor, + ) -> io::Result { + use actix_net::service::NewServiceExt; + use actix_net::ssl::NativeTlsAcceptor; - // self.bind_with(addr, NativeTlsAcceptor::new(acceptor)) - // } + self.bind_with(addr, move || { + NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ()) + }) + } #[cfg(any(feature = "alpn", feature = "ssl"))] /// Start listening for incoming tls connections. /// /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn bind_ssl( - mut self, addr: S, builder: SslAcceptorBuilder, - ) -> io::Result + pub fn bind_ssl(self, addr: S, builder: SslAcceptorBuilder) -> io::Result where S: net::ToSocketAddrs, { use super::{openssl_acceptor_with_flags, ServerFlags}; use actix_net::service::NewServiceExt; - let sockets = self.bind2(addr)?; - // alpn support let flags = if !self.no_http2 { ServerFlags::HTTP1 @@ -451,43 +409,32 @@ where }; let acceptor = openssl_acceptor_with_flags(builder, flags)?; - - for lst in sockets { - let addr = lst.local_addr().unwrap(); - let accpt = acceptor.clone(); - self.sockets.push(Socket { - lst, - addr, - scheme: "https", - handler: Box::new(HttpServiceBuilder::new( - self.factory.clone(), - move || ssl::OpensslAcceptor::new(accpt.clone()).map_err(|_| ()), - DefaultPipelineFactory::new(), - )), - }); - } - - Ok(self) + self.bind_with(addr, move || { + ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ()) + }) } - // #[cfg(feature = "rust-tls")] - // /// Start listening for incoming tls connections. - // /// - // /// This method sets alpn protocols to "h2" and "http/1.1" - // pub fn bind_rustls( - // self, addr: S, builder: ServerConfig, - // ) -> io::Result { - // use super::{RustlsAcceptor, ServerFlags}; + #[cfg(feature = "rust-tls")] + /// Start listening for incoming tls connections. + /// + /// This method sets alpn protocols to "h2" and "http/1.1" + pub fn bind_rustls( + self, addr: S, builder: ServerConfig, + ) -> io::Result { + use super::{RustlsAcceptor, ServerFlags}; + use actix_net::service::NewServiceExt; - // // alpn support - // let flags = if !self.no_http2 { - // ServerFlags::HTTP1 - // } else { - // ServerFlags::HTTP1 | ServerFlags::HTTP2 - // }; + // alpn support + let flags = if !self.no_http2 { + ServerFlags::HTTP1 + } else { + ServerFlags::HTTP1 | ServerFlags::HTTP2 + }; - // self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags)) - // } + self.bind_with(addr, move || { + RustlsAcceptor::with_flags(builder.clone(), flags).map_err(|_| ()) + }) + } } impl H + Send + Clone> HttpServer { @@ -516,10 +463,10 @@ impl H + Send + Clone> HttpServer { /// sys.run(); // <- Run actix system, this method starts all async processes /// } /// ``` - pub fn start(mut self) -> Addr { + pub fn start(mut self) -> Addr { ssl::max_concurrent_ssl_connect(self.maxconnrate); - let mut srv = server::Server::new() + let mut srv = Server::new() .workers(self.threads) .maxconn(self.maxconn) .shutdown_timeout(self.shutdown_timeout); From 4b59ae24760b361c85b04967611c5ddeae16c912 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 22:15:38 -0700 Subject: [PATCH 141/219] fix ssl config for client connector --- src/client/connector.rs | 60 +++++++++++++++++++++++++++++------------ src/test.rs | 25 ++++++++--------- 2 files changed, 56 insertions(+), 29 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 896f98a41..6e82e3fd8 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -16,13 +16,16 @@ use http::{Error as HttpError, HttpTryFrom, Uri}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; -#[cfg(feature = "alpn")] +#[cfg(any(feature = "alpn", feature = "ssl"))] use { openssl::ssl::{Error as SslError, SslConnector, SslMethod}, tokio_openssl::SslConnectorExt, }; -#[cfg(all(feature = "tls", not(feature = "alpn")))] +#[cfg(all( + feature = "tls", + not(any(feature = "alpn", feature = "ssl", feature = "rust-tls")) +))] use { native_tls::{Error as SslError, TlsConnector as NativeTlsConnector}, tokio_tls::TlsConnector as SslConnector, @@ -30,7 +33,7 @@ use { #[cfg(all( feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) + not(any(feature = "alpn", feature = "tls", feature = "ssl")) ))] use { rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc, @@ -39,11 +42,16 @@ use { #[cfg(all( feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) + not(any(feature = "alpn", feature = "tls", feature = "ssl")) ))] type SslConnector = Arc; -#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] +#[cfg(not(any( + feature = "alpn", + feature = "ssl", + feature = "tls", + feature = "rust-tls", +)))] type SslConnector = (); use server::IoStream; @@ -150,7 +158,12 @@ pub enum ClientConnectorError { SslIsNotSupported, /// SSL error - #[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))] + #[cfg(any( + feature = "tls", + feature = "alpn", + feature = "ssl", + feature = "rust-tls", + ))] #[fail(display = "{}", _0)] SslError(#[cause] SslError), @@ -247,19 +260,22 @@ impl SystemService for ClientConnector {} impl Default for ClientConnector { fn default() -> ClientConnector { let connector = { - #[cfg(all(feature = "alpn"))] + #[cfg(all(any(feature = "alpn", feature = "ssl")))] { SslConnector::builder(SslMethod::tls()).unwrap().build() } - #[cfg(all(feature = "tls", not(feature = "alpn")))] + #[cfg(all( + feature = "tls", + not(any(feature = "alpn", feature = "ssl", feature = "rust-tls")) + ))] { NativeTlsConnector::builder().build().unwrap().into() } #[cfg(all( feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) + not(any(feature = "alpn", feature = "tls", feature = "ssl")) ))] { let mut config = ClientConfig::new(); @@ -269,7 +285,12 @@ impl Default for ClientConnector { Arc::new(config) } - #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] + #[cfg(not(any( + feature = "alpn", + feature = "ssl", + feature = "tls", + feature = "rust-tls", + )))] { () } @@ -280,7 +301,7 @@ impl Default for ClientConnector { } impl ClientConnector { - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] /// Create `ClientConnector` actor with custom `SslConnector` instance. /// /// By default `ClientConnector` uses very a simple SSL configuration. @@ -325,7 +346,7 @@ impl ClientConnector { #[cfg(all( feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) + not(any(feature = "alpn", feature = "ssl", feature = "tls")) ))] /// Create `ClientConnector` actor with custom `SslConnector` instance. /// @@ -376,7 +397,7 @@ impl ClientConnector { #[cfg(all( feature = "tls", - not(any(feature = "alpn", feature = "rust-tls")) + not(any(feature = "ssl", feature = "alpn", feature = "rust-tls")) ))] /// Create `ClientConnector` actor with custom `SslConnector` instance. /// @@ -714,7 +735,7 @@ impl ClientConnector { act.release_key(&key2); () }).and_then(move |res, act, _| { - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] match res { Err(err) => { let _ = waiter.tx.send(Err(err.into())); @@ -756,7 +777,7 @@ impl ClientConnector { } } - #[cfg(all(feature = "tls", not(feature = "alpn")))] + #[cfg(all(feature = "tls", not(any(feature = "alpn", feature = "ssl"))))] match res { Err(err) => { let _ = waiter.tx.send(Err(err.into())); @@ -800,7 +821,7 @@ impl ClientConnector { #[cfg(all( feature = "rust-tls", - not(any(feature = "alpn", feature = "tls")) + not(any(feature = "alpn", feature = "ssl", feature = "tls")) ))] match res { Err(err) => { @@ -844,7 +865,12 @@ impl ClientConnector { } } - #[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] + #[cfg(not(any( + feature = "alpn", + feature = "ssl", + feature = "tls", + feature = "rust-tls" + )))] match res { Err(err) => { let _ = waiter.tx.send(Err(err.into())); diff --git a/src/test.rs b/src/test.rs index 83b0b83b7..d0cfb255a 100644 --- a/src/test.rs +++ b/src/test.rs @@ -13,12 +13,10 @@ use http::{HeaderMap, HttpTryFrom, Method, Uri, Version}; use net2::TcpBuilder; use tokio::runtime::current_thread::Runtime; -#[cfg(feature = "alpn")] +#[cfg(any(feature = "alpn", feature = "ssl"))] use openssl::ssl::SslAcceptorBuilder; #[cfg(feature = "rust-tls")] use rustls::ServerConfig; -#[cfg(feature = "alpn")] -use server::OpensslAcceptor; use application::{App, HttpApplication}; use body::Binary; @@ -136,7 +134,7 @@ impl TestServer { } fn get_conn() -> Addr { - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] { use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; @@ -144,7 +142,10 @@ impl TestServer { builder.set_verify(SslVerifyMode::NONE); ClientConnector::with_connector(builder.build()).start() } - #[cfg(all(feature = "rust-tls", not(feature = "alpn")))] + #[cfg(all( + feature = "rust-tls", + not(any(feature = "alpn", feature = "ssl")) + ))] { use rustls::ClientConfig; use std::fs::File; @@ -154,7 +155,7 @@ impl TestServer { config.root_store.add_pem_file(pem_file).unwrap(); ClientConnector::with_connector(config).start() } - #[cfg(not(any(feature = "alpn", feature = "rust-tls")))] + #[cfg(not(any(feature = "alpn", feature = "ssl", feature = "rust-tls")))] { ClientConnector::default().start() } @@ -263,7 +264,7 @@ where F: Fn() -> S + Send + Clone + 'static, { state: F, - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] ssl: Option, #[cfg(feature = "rust-tls")] rust_ssl: Option, @@ -277,14 +278,14 @@ where pub fn new(state: F) -> TestServerBuilder { TestServerBuilder { state, - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] ssl: None, #[cfg(feature = "rust-tls")] rust_ssl: None, } } - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] /// Create ssl server pub fn ssl(mut self, ssl: SslAcceptorBuilder) -> Self { self.ssl = Some(ssl); @@ -308,7 +309,7 @@ where let mut has_ssl = false; - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] { has_ssl = has_ssl || self.ssl.is_some(); } @@ -335,12 +336,12 @@ where tx.send((System::current(), addr, TestServer::get_conn())) .unwrap(); - #[cfg(feature = "alpn")] + #[cfg(any(feature = "alpn", feature = "ssl"))] { let ssl = self.ssl.take(); if let Some(ssl) = ssl { let tcp = net::TcpListener::bind(addr).unwrap(); - srv = srv.listen_with(tcp, OpensslAcceptor::new(ssl).unwrap()); + srv = srv.listen_ssl(tcp, ssl).unwrap(); } } #[cfg(feature = "rust-tls")] From bec37fdbd53f91e96ba161568ed6e191729c1411 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 27 Sep 2018 22:23:29 -0700 Subject: [PATCH 142/219] update travis config --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0023965da..dbdcb923c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,12 +30,12 @@ before_script: script: - | - if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then + if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then cargo clean cargo test --features="ssl,tls,rust-tls" -- --nocapture fi - | - if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then + if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml --no-count bash <(curl -s https://codecov.io/bash) @@ -45,7 +45,7 @@ script: # Upload docs after_success: - | - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then + if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then cargo doc --features "ssl,tls,rust-tls,session" --no-deps && echo "" > target/doc/index.html && git clone https://github.com/davisp/ghp-import.git && From fc5088b55ee4285d7f17dd58fd81982156d9b977 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 28 Sep 2018 00:08:23 -0700 Subject: [PATCH 143/219] fix tarpaulin args --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index dbdcb923c..59f6a8549 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,7 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml --no-count + cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi From 0f1c80ccc63840b9da646b268f2e07dd520c6837 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 28 Sep 2018 08:45:49 -0700 Subject: [PATCH 144/219] deprecate start_incoming --- src/client/connector.rs | 4 +- src/server/channel.rs | 5 ++ src/server/http.rs | 115 +++++++++++++++++++--------------------- 3 files changed, 63 insertions(+), 61 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 6e82e3fd8..8d71913fe 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -51,7 +51,7 @@ type SslConnector = Arc; feature = "ssl", feature = "tls", feature = "rust-tls", -)))] +),))] type SslConnector = (); use server::IoStream; @@ -290,7 +290,7 @@ impl Default for ClientConnector { feature = "ssl", feature = "tls", feature = "rust-tls", - )))] + ),))] { () } diff --git a/src/server/channel.rs b/src/server/channel.rs index c1e6b6b24..0d92c23a3 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -1,6 +1,7 @@ use std::net::{Shutdown, SocketAddr}; use std::{io, ptr, time}; +use actix::Message; use bytes::{Buf, BufMut, BytesMut}; use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -282,6 +283,10 @@ where io: T, } +impl Message for WrapperStream { + type Result = (); +} + impl WrapperStream where T: AsyncRead + AsyncWrite + 'static, diff --git a/src/server/http.rs b/src/server/http.rs index 22537cb86..81c4d3ad6 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,11 +1,13 @@ use std::{io, mem, net}; -use actix::{Addr, System}; +use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; use actix_net::server::Server; use actix_net::ssl; +use futures::Stream; use net2::TcpBuilder; use num_cpus; +use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "tls")] use native_tls::TlsAcceptor; @@ -17,8 +19,10 @@ use openssl::ssl::SslAcceptorBuilder; use rustls::ServerConfig; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; -use super::builder::DefaultPipelineFactory; -use super::builder::{HttpServiceBuilder, ServiceProvider}; +use super::builder::{DefaultPipelineFactory, HttpServiceBuilder, ServiceProvider}; +use super::channel::{HttpChannel, WrapperStream}; +use super::handler::HttpHandler; +use super::settings::{ServerSettings, WorkerSettings}; use super::{IntoHttpHandler, KeepAlive}; struct Socket { @@ -520,67 +524,60 @@ impl H + Send + Clone> HttpServer { } } -// impl HttpServer { -// /// Start listening for incoming connections from a stream. -// /// -// /// This method uses only one thread for handling incoming connections. -// pub fn start_incoming(self, stream: S, secure: bool) -// where -// S: Stream + Send + 'static, -// T: AsyncRead + AsyncWrite + Send + 'static, -// { -// // set server settings -// let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); -// let srv_settings = ServerSettings::new(Some(addr), &self.host, secure); -// let apps: Vec<_> = (*self.factory)() -// .into_iter() -// .map(|h| h.into_handler()) -// .collect(); -// let settings = WorkerSettings::create( -// apps, -// self.keep_alive, -// srv_settings, -// ); +impl HttpServer +where + H: IntoHttpHandler, + F: Fn() -> H + Send + Clone, +{ + #[doc(hidden)] + #[deprecated(since = "0.7.8")] + /// Start listening for incoming connections from a stream. + /// + /// This method uses only one thread for handling incoming connections. + pub fn start_incoming(self, stream: S, secure: bool) + where + S: Stream + 'static, + T: AsyncRead + AsyncWrite + 'static, + { + // set server settings + let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); + let apps = (self.factory)().into_handler(); + let settings = WorkerSettings::new( + apps, + self.keep_alive, + self.client_timeout as u64, + ServerSettings::new(Some(addr), &self.host, secure), + ); -// // start server -// HttpIncoming::create(move |ctx| { -// ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn { -// io: WrapperStream::new(t), -// handler: Token::new(0), -// token: Token::new(0), -// peer: None, -// })); -// HttpIncoming { settings } -// }); -// } -// } + // start server + HttpIncoming::create(move |ctx| { + ctx.add_message_stream( + stream.map_err(|_| ()).map(move |t| WrapperStream::new(t)), + ); + HttpIncoming { settings } + }); + } +} -// struct HttpIncoming { -// settings: Rc>, -// } +struct HttpIncoming { + settings: WorkerSettings, +} -// impl Actor for HttpIncoming -// where -// H: HttpHandler, -// { -// type Context = Context; -// } +impl Actor for HttpIncoming { + type Context = Context; +} -// impl Handler> for HttpIncoming -// where -// T: IoStream, -// H: HttpHandler, -// { -// type Result = (); +impl Handler> for HttpIncoming +where + T: AsyncRead + AsyncWrite, + H: HttpHandler, +{ + type Result = (); -// fn handle(&mut self, msg: Conn, _: &mut Context) -> Self::Result { -// spawn(HttpChannel::new( -// Rc::clone(&self.settings), -// msg.io, -// msg.peer, -// )); -// } -// } + fn handle(&mut self, msg: WrapperStream, _: &mut Context) -> Self::Result { + Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg, None)); + } +} fn create_tcp_listener( addr: net::SocketAddr, backlog: i32, From f2d42e5e7719383fccdf97315437da27a4991dfb Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 28 Sep 2018 11:50:47 -0700 Subject: [PATCH 145/219] refactor acceptor error handling --- Cargo.toml | 4 +- src/client/connector.rs | 4 +- src/server/acceptor.rs | 275 ++++++++++++++++++++++++---------------- src/server/builder.rs | 38 ++++-- src/server/channel.rs | 5 - src/server/error.rs | 15 +++ src/server/http.rs | 70 +--------- src/server/incoming.rs | 70 ++++++++++ src/server/mod.rs | 1 + 9 files changed, 288 insertions(+), 194 deletions(-) create mode 100644 src/server/incoming.rs diff --git a/Cargo.toml b/Cargo.toml index 205e178b9..0e95c327c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,8 +60,8 @@ flate2-rust = ["flate2/rust_backend"] [dependencies] actix = "0.7.0" -actix-net = { git="https://github.com/actix/actix-net.git" } -#actix-net = { path = "../actix-net" } +#actix-net = { git="https://github.com/actix/actix-net.git" } +actix-net = { path = "../actix-net" } base64 = "0.9" bitflags = "1.0" diff --git a/src/client/connector.rs b/src/client/connector.rs index 8d71913fe..6e82e3fd8 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -51,7 +51,7 @@ type SslConnector = Arc; feature = "ssl", feature = "tls", feature = "rust-tls", -),))] +)))] type SslConnector = (); use server::IoStream; @@ -290,7 +290,7 @@ impl Default for ClientConnector { feature = "ssl", feature = "tls", feature = "rust-tls", - ),))] + )))] { () } diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index d78474160..caad0e2e3 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -1,3 +1,4 @@ +use std::net; use std::time::Duration; use actix_net::server::ServerMessage; @@ -8,6 +9,7 @@ use tokio_reactor::Handle; use tokio_tcp::TcpStream; use tokio_timer::{sleep, Delay}; +use super::error::AcceptorError; use super::handler::HttpHandler; use super::settings::WorkerSettings; use super::IoStream; @@ -15,12 +17,7 @@ use super::IoStream; /// This trait indicates types that can create acceptor service for http server. pub trait AcceptorServiceFactory: Send + Clone + 'static { type Io: IoStream + Send; - type NewService: NewService< - Request = TcpStream, - Response = Self::Io, - Error = (), - InitError = (), - >; + type NewService: NewService; fn create(&self) -> Self::NewService; } @@ -29,7 +26,7 @@ impl AcceptorServiceFactory for F where F: Fn() -> T + Send + Clone + 'static, T::Response: IoStream + Send, - T: NewService, + T: NewService, { type Io = T::Response; type NewService = T; @@ -80,144 +77,91 @@ impl Service for DefaultAcceptor { } } -pub(crate) struct TcpAcceptor { +pub(crate) struct TcpAcceptor { inner: T, - settings: WorkerSettings, } -impl TcpAcceptor +impl TcpAcceptor where - H: HttpHandler, - T: NewService, + T: NewService>, { - pub(crate) fn new(settings: WorkerSettings, inner: T) -> Self { - TcpAcceptor { inner, settings } + pub(crate) fn new(inner: T) -> Self { + TcpAcceptor { inner } } } -impl NewService for TcpAcceptor +impl NewService for TcpAcceptor where - H: HttpHandler, - T: NewService, + T: NewService>, { - type Request = ServerMessage; - type Response = (); - type Error = (); - type InitError = (); - type Service = TcpAcceptorService; - type Future = TcpAcceptorResponse; + type Request = net::TcpStream; + type Response = T::Response; + type Error = AcceptorError; + type InitError = T::InitError; + type Service = TcpAcceptorService; + type Future = TcpAcceptorResponse; fn new_service(&self) -> Self::Future { TcpAcceptorResponse { fut: self.inner.new_service(), - settings: self.settings.clone(), } } } -pub(crate) struct TcpAcceptorResponse +pub(crate) struct TcpAcceptorResponse where - H: HttpHandler, T: NewService, { fut: T::Future, - settings: WorkerSettings, } -impl Future for TcpAcceptorResponse +impl Future for TcpAcceptorResponse where - H: HttpHandler, T: NewService, { - type Item = TcpAcceptorService; - type Error = (); + type Item = TcpAcceptorService; + type Error = T::InitError; fn poll(&mut self) -> Poll { - match self.fut.poll() { - Err(_) => Err(()), - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(service)) => Ok(Async::Ready(TcpAcceptorService { - inner: service, - settings: self.settings.clone(), - })), + match self.fut.poll()? { + Async::NotReady => Ok(Async::NotReady), + Async::Ready(service) => { + Ok(Async::Ready(TcpAcceptorService { inner: service })) + } } } } -pub(crate) struct TcpAcceptorService { +pub(crate) struct TcpAcceptorService { inner: T, - settings: WorkerSettings, } -impl Service for TcpAcceptorService +impl Service for TcpAcceptorService where - H: HttpHandler, - T: Service, + T: Service>, { - type Request = ServerMessage; - type Response = (); - type Error = (); - type Future = Either, FutureResult<(), ()>>; + type Request = net::TcpStream; + type Response = T::Response; + type Error = AcceptorError; + type Future = Either>; fn poll_ready(&mut self) -> Poll<(), Self::Error> { - self.inner.poll_ready().map_err(|_| ()) + self.inner.poll_ready() } fn call(&mut self, req: Self::Request) -> Self::Future { - match req { - ServerMessage::Connect(stream) => { - let stream = - TcpStream::from_std(stream, &Handle::default()).map_err(|e| { - error!("Can not convert to an async tcp stream: {}", e); - }); + let stream = TcpStream::from_std(req, &Handle::default()).map_err(|e| { + error!("Can not convert to an async tcp stream: {}", e); + AcceptorError::Io(e) + }); - if let Ok(stream) = stream { - Either::A(TcpAcceptorServiceFut { - fut: self.inner.call(stream), - }) - } else { - Either::B(err(())) - } - } - ServerMessage::Shutdown(timeout) => Either::B(ok(())), - ServerMessage::ForceShutdown => { - // self.settings.head().traverse::(); - Either::B(ok(())) - } + match stream { + Ok(stream) => Either::A(self.inner.call(stream)), + Err(e) => Either::B(err(e)), } } } -pub(crate) struct TcpAcceptorServiceFut { - fut: T, -} - -impl Future for TcpAcceptorServiceFut -where - T: Future, -{ - type Item = (); - type Error = (); - - fn poll(&mut self) -> Poll { - match self.fut.poll() { - Err(_) => Err(()), - Ok(Async::NotReady) => Ok(Async::NotReady), - Ok(Async::Ready(_)) => Ok(Async::Ready(())), - } - } -} - -/// Errors produced by `AcceptorTimeout` service. -#[derive(Debug)] -pub enum TimeoutError { - /// The inner service error - Service(T), - - /// The request did not complete within the specified timeout. - Timeout, -} - /// Acceptor timeout middleware /// /// Applies timeout to request prcoessing. @@ -235,7 +179,7 @@ impl AcceptorTimeout { impl NewService for AcceptorTimeout { type Request = T::Request; type Response = T::Response; - type Error = TimeoutError; + type Error = AcceptorError; type InitError = T::InitError; type Service = AcceptorTimeoutService; type Future = AcceptorTimeoutFut; @@ -278,11 +222,11 @@ pub(crate) struct AcceptorTimeoutService { impl Service for AcceptorTimeoutService { type Request = T::Request; type Response = T::Response; - type Error = TimeoutError; + type Error = AcceptorError; type Future = AcceptorTimeoutResponse; fn poll_ready(&mut self) -> Poll<(), Self::Error> { - self.inner.poll_ready().map_err(TimeoutError::Service) + self.inner.poll_ready().map_err(AcceptorError::Service) } fn call(&mut self, req: Self::Request) -> Self::Future { @@ -299,17 +243,134 @@ pub(crate) struct AcceptorTimeoutResponse { } impl Future for AcceptorTimeoutResponse { type Item = T::Response; - type Error = TimeoutError; + type Error = AcceptorError; fn poll(&mut self) -> Poll { - match self.fut.poll() { - Ok(Async::NotReady) => match self.sleep.poll() { - Err(_) => Err(TimeoutError::Timeout), - Ok(Async::Ready(_)) => Err(TimeoutError::Timeout), + match self.fut.poll().map_err(AcceptorError::Service)? { + Async::NotReady => match self.sleep.poll() { + Err(_) => Err(AcceptorError::Timeout), + Ok(Async::Ready(_)) => Err(AcceptorError::Timeout), Ok(Async::NotReady) => Ok(Async::NotReady), }, - Ok(Async::Ready(resp)) => Ok(Async::Ready(resp)), - Err(err) => Err(TimeoutError::Service(err)), + Async::Ready(resp) => Ok(Async::Ready(resp)), + } + } +} + +pub(crate) struct ServerMessageAcceptor { + inner: T, + settings: WorkerSettings, +} + +impl ServerMessageAcceptor +where + H: HttpHandler, + T: NewService, +{ + pub(crate) fn new(settings: WorkerSettings, inner: T) -> Self { + ServerMessageAcceptor { inner, settings } + } +} + +impl NewService for ServerMessageAcceptor +where + H: HttpHandler, + T: NewService, +{ + type Request = ServerMessage; + type Response = (); + type Error = T::Error; + type InitError = T::InitError; + type Service = ServerMessageAcceptorService; + type Future = ServerMessageAcceptorResponse; + + fn new_service(&self) -> Self::Future { + ServerMessageAcceptorResponse { + fut: self.inner.new_service(), + settings: self.settings.clone(), + } + } +} + +pub(crate) struct ServerMessageAcceptorResponse +where + H: HttpHandler, + T: NewService, +{ + fut: T::Future, + settings: WorkerSettings, +} + +impl Future for ServerMessageAcceptorResponse +where + H: HttpHandler, + T: NewService, +{ + type Item = ServerMessageAcceptorService; + type Error = T::InitError; + + fn poll(&mut self) -> Poll { + match self.fut.poll()? { + Async::NotReady => Ok(Async::NotReady), + Async::Ready(service) => Ok(Async::Ready(ServerMessageAcceptorService { + inner: service, + settings: self.settings.clone(), + })), + } + } +} + +pub(crate) struct ServerMessageAcceptorService { + inner: T, + settings: WorkerSettings, +} + +impl Service for ServerMessageAcceptorService +where + H: HttpHandler, + T: Service, +{ + type Request = ServerMessage; + type Response = (); + type Error = T::Error; + type Future = + Either, FutureResult<(), Self::Error>>; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + self.inner.poll_ready() + } + + fn call(&mut self, req: Self::Request) -> Self::Future { + match req { + ServerMessage::Connect(stream) => { + Either::A(ServerMessageAcceptorServiceFut { + fut: self.inner.call(stream), + }) + } + ServerMessage::Shutdown(timeout) => Either::B(ok(())), + ServerMessage::ForceShutdown => { + // self.settings.head().traverse::(); + Either::B(ok(())) + } + } + } +} + +pub(crate) struct ServerMessageAcceptorServiceFut { + fut: T::Future, +} + +impl Future for ServerMessageAcceptorServiceFut +where + T: Service, +{ + type Item = (); + type Error = T::Error; + + fn poll(&mut self) -> Poll { + match self.fut.poll()? { + Async::NotReady => Ok(Async::NotReady), + Async::Ready(_) => Ok(Async::Ready(())), } } } diff --git a/src/server/builder.rs b/src/server/builder.rs index 28541820b..46ab9f467 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -5,7 +5,10 @@ use actix_net::either::Either; use actix_net::server::{Server, ServiceFactory}; use actix_net::service::{NewService, NewServiceExt}; -use super::acceptor::{AcceptorServiceFactory, AcceptorTimeout, TcpAcceptor}; +use super::acceptor::{ + AcceptorServiceFactory, AcceptorTimeout, ServerMessageAcceptor, TcpAcceptor, +}; +use super::error::AcceptorError; use super::handler::{HttpHandler, IntoHttpHandler}; use super::service::HttpService; use super::settings::{ServerSettings, WorkerSettings}; @@ -99,16 +102,30 @@ where ); if timeout == 0 { - Either::A(TcpAcceptor::new( + Either::A(ServerMessageAcceptor::new( settings.clone(), - acceptor.create().and_then(pipeline.create(settings)), + TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service)) + .map_err(|_| ()) + .map_init_err(|_| ()) + .and_then( + pipeline + .create(settings) + .map_init_err(|_| ()) + .map_err(|_| ()), + ), )) } else { - Either::B(TcpAcceptor::new( + Either::B(ServerMessageAcceptor::new( settings.clone(), - AcceptorTimeout::new(timeout, acceptor.create()) + TcpAcceptor::new(AcceptorTimeout::new(timeout, acceptor.create())) .map_err(|_| ()) - .and_then(pipeline.create(settings)), + .map_init_err(|_| ()) + .and_then( + pipeline + .create(settings) + .map_init_err(|_| ()) + .map_err(|_| ()), + ), )) } } @@ -153,12 +170,7 @@ where pub trait HttpPipelineFactory: Send + Clone + 'static { type Io: IoStream; - type NewService: NewService< - Request = Self::Io, - Response = (), - Error = (), - InitError = (), - >; + type NewService: NewService; fn create(&self, settings: WorkerSettings) -> Self::NewService; } @@ -166,7 +178,7 @@ pub trait HttpPipelineFactory: Send + Clone + 'static { impl HttpPipelineFactory for F where F: Fn(WorkerSettings) -> T + Send + Clone + 'static, - T: NewService, + T: NewService, T::Request: IoStream, H: HttpHandler, { diff --git a/src/server/channel.rs b/src/server/channel.rs index 0d92c23a3..c1e6b6b24 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -1,7 +1,6 @@ use std::net::{Shutdown, SocketAddr}; use std::{io, ptr, time}; -use actix::Message; use bytes::{Buf, BufMut, BytesMut}; use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -283,10 +282,6 @@ where io: T, } -impl Message for WrapperStream { - type Result = (); -} - impl WrapperStream where T: AsyncRead + AsyncWrite + 'static, diff --git a/src/server/error.rs b/src/server/error.rs index d08ccf87f..ff8b831a7 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -1,9 +1,24 @@ +use std::io; + use futures::{Async, Poll}; use super::{helpers, HttpHandlerTask, Writer}; use http::{StatusCode, Version}; use Error; +/// Errors produced by `AcceptorError` service. +#[derive(Debug)] +pub enum AcceptorError { + /// The inner service error + Service(T), + + /// Io specific error + Io(io::Error), + + /// The request did not complete within the specified timeout. + Timeout, +} + pub(crate) struct ServerError(Version, StatusCode); impl ServerError { diff --git a/src/server/http.rs b/src/server/http.rs index 81c4d3ad6..846f7f010 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,13 +1,11 @@ use std::{io, mem, net}; -use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; +use actix::{Addr, System}; use actix_net::server::Server; use actix_net::ssl; -use futures::Stream; use net2::TcpBuilder; use num_cpus; -use tokio_io::{AsyncRead, AsyncWrite}; #[cfg(feature = "tls")] use native_tls::TlsAcceptor; @@ -20,9 +18,6 @@ use rustls::ServerConfig; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; use super::builder::{DefaultPipelineFactory, HttpServiceBuilder, ServiceProvider}; -use super::channel::{HttpChannel, WrapperStream}; -use super::handler::HttpHandler; -use super::settings::{ServerSettings, WorkerSettings}; use super::{IntoHttpHandler, KeepAlive}; struct Socket { @@ -42,9 +37,10 @@ where H: IntoHttpHandler + 'static, F: Fn() -> H + Send + Clone, { - factory: F, - host: Option, - keep_alive: KeepAlive, + pub(super) factory: F, + pub(super) host: Option, + pub(super) keep_alive: KeepAlive, + pub(super) client_timeout: usize, backlog: i32, threads: usize, exit: bool, @@ -53,7 +49,6 @@ where no_signals: bool, maxconn: usize, maxconnrate: usize, - client_timeout: usize, sockets: Vec, } @@ -524,61 +519,6 @@ impl H + Send + Clone> HttpServer { } } -impl HttpServer -where - H: IntoHttpHandler, - F: Fn() -> H + Send + Clone, -{ - #[doc(hidden)] - #[deprecated(since = "0.7.8")] - /// Start listening for incoming connections from a stream. - /// - /// This method uses only one thread for handling incoming connections. - pub fn start_incoming(self, stream: S, secure: bool) - where - S: Stream + 'static, - T: AsyncRead + AsyncWrite + 'static, - { - // set server settings - let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); - let apps = (self.factory)().into_handler(); - let settings = WorkerSettings::new( - apps, - self.keep_alive, - self.client_timeout as u64, - ServerSettings::new(Some(addr), &self.host, secure), - ); - - // start server - HttpIncoming::create(move |ctx| { - ctx.add_message_stream( - stream.map_err(|_| ()).map(move |t| WrapperStream::new(t)), - ); - HttpIncoming { settings } - }); - } -} - -struct HttpIncoming { - settings: WorkerSettings, -} - -impl Actor for HttpIncoming { - type Context = Context; -} - -impl Handler> for HttpIncoming -where - T: AsyncRead + AsyncWrite, - H: HttpHandler, -{ - type Result = (); - - fn handle(&mut self, msg: WrapperStream, _: &mut Context) -> Self::Result { - Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg, None)); - } -} - fn create_tcp_listener( addr: net::SocketAddr, backlog: i32, ) -> io::Result { diff --git a/src/server/incoming.rs b/src/server/incoming.rs new file mode 100644 index 000000000..7ab289d04 --- /dev/null +++ b/src/server/incoming.rs @@ -0,0 +1,70 @@ +//! Support for `Stream`, deprecated! +use std::{io, net}; + +use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message}; +use futures::Stream; +use tokio_io::{AsyncRead, AsyncWrite}; + +use super::channel::{HttpChannel, WrapperStream}; +use super::handler::{HttpHandler, IntoHttpHandler}; +use super::http::HttpServer; +use super::settings::{ServerSettings, WorkerSettings}; + +impl Message for WrapperStream { + type Result = (); +} + +impl HttpServer +where + H: IntoHttpHandler, + F: Fn() -> H + Send + Clone, +{ + #[doc(hidden)] + #[deprecated(since = "0.7.8")] + /// Start listening for incoming connections from a stream. + /// + /// This method uses only one thread for handling incoming connections. + pub fn start_incoming(self, stream: S, secure: bool) + where + S: Stream + 'static, + T: AsyncRead + AsyncWrite + 'static, + { + // set server settings + let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); + let apps = (self.factory)().into_handler(); + let settings = WorkerSettings::new( + apps, + self.keep_alive, + self.client_timeout as u64, + ServerSettings::new(Some(addr), &self.host, secure), + ); + + // start server + HttpIncoming::create(move |ctx| { + ctx.add_message_stream( + stream.map_err(|_| ()).map(move |t| WrapperStream::new(t)), + ); + HttpIncoming { settings } + }); + } +} + +struct HttpIncoming { + settings: WorkerSettings, +} + +impl Actor for HttpIncoming { + type Context = Context; +} + +impl Handler> for HttpIncoming +where + T: AsyncRead + AsyncWrite, + H: HttpHandler, +{ + type Result = (); + + fn handle(&mut self, msg: WrapperStream, _: &mut Context) -> Self::Result { + Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg, None)); + } +} diff --git a/src/server/mod.rs b/src/server/mod.rs index 9e91eda08..1e145571c 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -129,6 +129,7 @@ mod h2writer; mod handler; pub(crate) mod helpers; mod http; +pub(crate) mod incoming; pub(crate) mod input; pub(crate) mod message; pub(crate) mod output; From e95babf8d3559b947b4a06c331a1cb505571f834 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 28 Sep 2018 12:35:09 -0700 Subject: [PATCH 146/219] log acctor init errors --- Cargo.toml | 4 ++-- src/client/connector.rs | 4 ++-- src/server/acceptor.rs | 17 +++++++++++++---- src/server/builder.rs | 5 ++++- src/server/http.rs | 9 ++++++--- 5 files changed, 27 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0e95c327c..205e178b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,8 +60,8 @@ flate2-rust = ["flate2/rust_backend"] [dependencies] actix = "0.7.0" -#actix-net = { git="https://github.com/actix/actix-net.git" } -actix-net = { path = "../actix-net" } +actix-net = { git="https://github.com/actix/actix-net.git" } +#actix-net = { path = "../actix-net" } base64 = "0.9" bitflags = "1.0" diff --git a/src/client/connector.rs b/src/client/connector.rs index 6e82e3fd8..8d71913fe 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -51,7 +51,7 @@ type SslConnector = Arc; feature = "ssl", feature = "tls", feature = "rust-tls", -)))] +),))] type SslConnector = (); use server::IoStream; @@ -290,7 +290,7 @@ impl Default for ClientConnector { feature = "ssl", feature = "tls", feature = "rust-tls", - )))] + ),))] { () } diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index caad0e2e3..bad8847dc 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -1,5 +1,5 @@ -use std::net; use std::time::Duration; +use std::{fmt, net}; use actix_net::server::ServerMessage; use actix_net::service::{NewService, Service}; @@ -27,6 +27,7 @@ where F: Fn() -> T + Send + Clone + 'static, T::Response: IoStream + Send, T: NewService, + T::InitError: fmt::Debug, { type Io = T::Response; type NewService = T; @@ -84,6 +85,7 @@ pub(crate) struct TcpAcceptor { impl TcpAcceptor where T: NewService>, + T::InitError: fmt::Debug, { pub(crate) fn new(inner: T) -> Self { TcpAcceptor { inner } @@ -93,6 +95,7 @@ where impl NewService for TcpAcceptor where T: NewService>, + T::InitError: fmt::Debug, { type Request = net::TcpStream; type Response = T::Response; @@ -111,6 +114,7 @@ where pub(crate) struct TcpAcceptorResponse where T: NewService, + T::InitError: fmt::Debug, { fut: T::Future, } @@ -118,16 +122,21 @@ where impl Future for TcpAcceptorResponse where T: NewService, + T::InitError: fmt::Debug, { type Item = TcpAcceptorService; type Error = T::InitError; fn poll(&mut self) -> Poll { - match self.fut.poll()? { - Async::NotReady => Ok(Async::NotReady), - Async::Ready(service) => { + match self.fut.poll() { + Ok(Async::NotReady) => Ok(Async::NotReady), + Ok(Async::Ready(service)) => { Ok(Async::Ready(TcpAcceptorService { inner: service })) } + Err(e) => { + error!("Can not create accetor service: {:?}", e); + Err(e) + } } } } diff --git a/src/server/builder.rs b/src/server/builder.rs index 46ab9f467..8c0a0f624 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -1,5 +1,5 @@ use std::marker::PhantomData; -use std::net; +use std::{fmt, net}; use actix_net::either::Either; use actix_net::server::{Server, ServiceFactory}; @@ -37,6 +37,7 @@ where F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler, A: AcceptorServiceFactory, + ::InitError: fmt::Debug, P: HttpPipelineFactory, { /// Create http service builder @@ -58,6 +59,7 @@ where pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder where A1: AcceptorServiceFactory, + ::InitError: fmt::Debug, { HttpServiceBuilder { acceptor, @@ -153,6 +155,7 @@ impl ServiceProvider for HttpServiceBuilder where F: Fn() -> H + Send + Clone + 'static, A: AcceptorServiceFactory, + ::InitError: fmt::Debug, P: HttpPipelineFactory, H: IntoHttpHandler, { diff --git a/src/server/http.rs b/src/server/http.rs index 846f7f010..034f903e2 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,7 +1,8 @@ -use std::{io, mem, net}; +use std::{fmt, io, mem, net}; use actix::{Addr, System}; use actix_net::server::Server; +use actix_net::service::NewService; use actix_net::ssl; use net2::TcpBuilder; @@ -233,6 +234,7 @@ where pub(crate) fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self where A: AcceptorServiceFactory, + ::InitError: fmt::Debug, { let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { @@ -254,7 +256,7 @@ where /// /// HttpServer does not change any configuration for TcpListener, /// it needs to be configured before passing it to listen() method. - pub fn listen_tls(mut self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { + pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { use actix_net::service::NewServiceExt; self.listen_with(lst, move || { @@ -288,7 +290,7 @@ where /// Use listener for accepting incoming tls connection requests /// /// This method sets alpn protocols to "h2" and "http/1.1" - pub fn listen_rustls(mut self, lst: net::TcpListener, config: ServerConfig) -> Self { + pub fn listen_rustls(self, lst: net::TcpListener, config: ServerConfig) -> Self { use super::{RustlsAcceptor, ServerFlags}; use actix_net::service::NewServiceExt; @@ -324,6 +326,7 @@ where where S: net::ToSocketAddrs, A: AcceptorServiceFactory, + ::InitError: fmt::Debug, { let sockets = self.bind2(addr)?; From 4aac3d6a92ccdffc9eaf324a207c98ce6df8d4b4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 28 Sep 2018 15:04:59 -0700 Subject: [PATCH 147/219] refactor keep-alive timer --- src/client/connector.rs | 4 +- src/server/h1.rs | 176 ++++++++++++++++++++++++++-------------- src/server/h1writer.rs | 4 + src/server/h2.rs | 21 +++-- src/server/settings.rs | 79 +++++++++++++----- 5 files changed, 189 insertions(+), 95 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 8d71913fe..32426e0ac 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -50,8 +50,8 @@ type SslConnector = Arc; feature = "alpn", feature = "ssl", feature = "tls", - feature = "rust-tls", -),))] + feature = "rust-tls" +)))] type SslConnector = (); use server::IoStream; diff --git a/src/server/h1.rs b/src/server/h1.rs index b5ee93e66..76c0d4b6e 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -1,6 +1,6 @@ use std::collections::VecDeque; use std::net::SocketAddr; -use std::time::{Duration, Instant}; +use std::time::Instant; use bytes::BytesMut; use futures::{Async, Future, Poll}; @@ -49,7 +49,14 @@ pub(crate) struct Http1 { payload: Option, buf: BytesMut, tasks: VecDeque>, - keepalive_timer: Option, + ka_enabled: bool, + ka_expire: Instant, + ka_timer: Option, +} + +struct Entry { + pipe: EntryPipe, + flags: EntryFlags, } enum EntryPipe { @@ -78,11 +85,6 @@ impl EntryPipe { } } -struct Entry { - pipe: EntryPipe, - flags: EntryFlags, -} - impl Http1 where T: IoStream, @@ -92,6 +94,15 @@ where settings: WorkerSettings, stream: T, addr: Option, buf: BytesMut, is_eof: bool, keepalive_timer: Option, ) -> Self { + let ka_enabled = settings.keep_alive_enabled(); + let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer { + (delay.deadline(), Some(delay)) + } else if let Some(delay) = settings.keep_alive_timer() { + (delay.deadline(), Some(delay)) + } else { + (settings.now(), None) + }; + Http1 { flags: if is_eof { Flags::READ_DISCONNECTED @@ -105,7 +116,9 @@ where addr, buf, settings, - keepalive_timer, + ka_timer, + ka_expire, + ka_enabled, } } @@ -143,9 +156,6 @@ where for task in &mut self.tasks { task.pipe.disconnected(); } - - // kill keepalive - self.keepalive_timer.take(); } fn read_disconnected(&mut self) { @@ -163,16 +173,9 @@ where #[inline] pub fn poll(&mut self) -> Poll<(), ()> { - // keep-alive timer - if let Some(ref mut timer) = self.keepalive_timer { - match timer.poll() { - Ok(Async::Ready(_)) => { - trace!("Keep-alive timeout, close connection"); - self.flags.insert(Flags::SHUTDOWN); - } - Ok(Async::NotReady) => (), - Err(_) => unreachable!(), - } + // check connection keep-alive + if !self.poll_keep_alive() { + return Ok(Async::Ready(())); } // shutdown @@ -203,11 +206,70 @@ where self.flags.insert(Flags::SHUTDOWN); return self.poll(); } - Async::NotReady => return Ok(Async::NotReady), + Async::NotReady => { + // deal with keep-alive and steam eof (client-side write shutdown) + if self.tasks.is_empty() { + // handle stream eof + if self.flags.contains(Flags::READ_DISCONNECTED) { + self.flags.insert(Flags::SHUTDOWN); + return self.poll(); + } + // no keep-alive + if self.flags.contains(Flags::ERROR) + || (!self.flags.contains(Flags::KEEPALIVE) + || !self.ka_enabled) + && self.flags.contains(Flags::STARTED) + { + self.flags.insert(Flags::SHUTDOWN); + return self.poll(); + } + } + return Ok(Async::NotReady); + } } } } + /// keep-alive timer. returns `true` is keep-alive, otherwise drop + fn poll_keep_alive(&mut self) -> bool { + let timer = if let Some(ref mut timer) = self.ka_timer { + match timer.poll() { + Ok(Async::Ready(_)) => { + if timer.deadline() >= self.ka_expire { + // check for any outstanding request handling + if self.tasks.is_empty() { + // if we get timer during shutdown, just drop connection + if self.flags.contains(Flags::SHUTDOWN) { + return false; + } else { + trace!("Keep-alive timeout, close connection"); + self.flags.insert(Flags::SHUTDOWN); + None + } + } else { + self.settings.keep_alive_timer() + } + } else { + Some(Delay::new(self.ka_expire)) + } + } + Ok(Async::NotReady) => None, + Err(e) => { + error!("Timer error {:?}", e); + return false; + } + } + } else { + None + }; + + if let Some(mut timer) = timer { + let _ = timer.poll(); + self.ka_timer = Some(timer); + } + true + } + #[inline] /// read data from stream pub fn poll_io(&mut self) { @@ -283,6 +345,11 @@ where } // no more IO for this iteration Ok(Async::NotReady) => { + // check if we need timer + if self.ka_timer.is_some() && self.stream.upgrade() { + self.ka_timer.take(); + } + // check if previously read backpressure was enabled if self.can_read() && !retry { return Ok(Async::Ready(true)); @@ -348,32 +415,6 @@ where } } - // deal with keep-alive and steam eof (client-side write shutdown) - if self.tasks.is_empty() { - // handle stream eof - if self.flags.contains(Flags::READ_DISCONNECTED) { - return Ok(Async::Ready(false)); - } - // no keep-alive - if self.flags.contains(Flags::ERROR) - || (!self.flags.contains(Flags::KEEPALIVE) - || !self.settings.keep_alive_enabled()) - && self.flags.contains(Flags::STARTED) - { - return Ok(Async::Ready(false)); - } - - // start keep-alive timer - let keep_alive = self.settings.keep_alive(); - if self.keepalive_timer.is_none() && keep_alive > 0 { - trace!("Start keep-alive timer"); - let mut timer = - Delay::new(Instant::now() + Duration::from_secs(keep_alive)); - // register timer - let _ = timer.poll(); - self.keepalive_timer = Some(timer); - } - } Ok(Async::NotReady) } @@ -385,9 +426,12 @@ where } pub fn parse(&mut self) { + let mut updated = false; + 'outer: loop { match self.decoder.decode(&mut self.buf, &self.settings) { Ok(Some(Message::Message { mut msg, payload })) => { + updated = true; self.flags.insert(Flags::STARTED); if payload { @@ -403,9 +447,6 @@ where // set remote addr msg.inner_mut().addr = self.addr; - // stop keepalive timer - self.keepalive_timer.take(); - // search handler for request match self.settings.handler().handle(msg) { Ok(mut pipe) => { @@ -430,7 +471,7 @@ where } continue 'outer; } - Ok(Async::NotReady) => {} + Ok(Async::NotReady) => (), Err(err) => { error!("Unhandled error: {}", err); self.flags.insert(Flags::ERROR); @@ -460,6 +501,7 @@ where self.push_response_entry(StatusCode::NOT_FOUND); } Ok(Some(Message::Chunk(chunk))) => { + updated = true; if let Some(ref mut payload) = self.payload { payload.feed_data(chunk); } else { @@ -470,6 +512,7 @@ where } } Ok(Some(Message::Eof)) => { + updated = true; if let Some(mut payload) = self.payload.take() { payload.feed_eof(); } else { @@ -489,6 +532,7 @@ where break; } Err(e) => { + updated = false; self.flags.insert(Flags::ERROR); if let Some(mut payload) = self.payload.take() { let e = match e { @@ -504,6 +548,12 @@ where } } } + + if self.ka_timer.is_some() && updated { + if let Some(expire) = self.settings.keep_alive_expire() { + self.ka_expire = expire; + } + } } } @@ -512,7 +562,9 @@ mod tests { use std::net::Shutdown; use std::{cmp, io, time}; + use actix::System; use bytes::{Buf, Bytes, BytesMut}; + use futures::future; use http::{Method, Version}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -647,15 +699,19 @@ mod tests { #[test] fn test_req_parse_err() { - let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); - let readbuf = BytesMut::new(); - let settings = wrk_settings(); + let mut sys = System::new("test"); + sys.block_on(future::lazy(|| { + let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); + let readbuf = BytesMut::new(); + let settings = wrk_settings(); - let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); - h1.poll_io(); - h1.poll_io(); - assert!(h1.flags.contains(Flags::ERROR)); - assert_eq!(h1.tasks.len(), 1); + let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); + h1.poll_io(); + h1.poll_io(); + assert!(h1.flags.contains(Flags::ERROR)); + assert_eq!(h1.tasks.len(), 1); + future::ok::<_, ()>(()) + })); } #[test] diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 15451659d..3036aa089 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -66,6 +66,10 @@ impl H1Writer { self.flags.insert(Flags::DISCONNECTED); } + pub fn upgrade(&self) -> bool { + self.flags.contains(Flags::UPGRADE) + } + pub fn keepalive(&self) -> bool { self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE) } diff --git a/src/server/h2.rs b/src/server/h2.rs index f31c2db38..d9ca2f64a 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -2,7 +2,7 @@ use std::collections::VecDeque; use std::io::{Read, Write}; use std::net::SocketAddr; use std::rc::Rc; -use std::time::{Duration, Instant}; +use std::time::Instant; use std::{cmp, io, mem}; use bytes::{Buf, Bytes}; @@ -232,16 +232,15 @@ where // start keep-alive timer if self.tasks.is_empty() { if self.settings.keep_alive_enabled() { - let keep_alive = self.settings.keep_alive(); - if keep_alive > 0 && self.keepalive_timer.is_none() { - trace!("Start keep-alive timer"); - let mut timeout = Delay::new( - Instant::now() - + Duration::new(keep_alive, 0), - ); - // register timeout - let _ = timeout.poll(); - self.keepalive_timer = Some(timeout); + if self.keepalive_timer.is_none() { + if let Some(ka) = self.settings.keep_alive() { + trace!("Start keep-alive timer"); + let mut timeout = + Delay::new(Instant::now() + ka); + // register timeout + let _ = timeout.poll(); + self.keepalive_timer = Some(timeout); + } } } else { // keep-alive disable, drop connection diff --git a/src/server/settings.rs b/src/server/settings.rs index db5f6c57b..5ca777290 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -137,7 +137,7 @@ pub struct WorkerSettings(Rc>); struct Inner { handler: H, - keep_alive: u64, + keep_alive: Option, client_timeout: u64, ka_enabled: bool, bytes: Rc, @@ -161,6 +161,11 @@ impl WorkerSettings { KeepAlive::Os | KeepAlive::Tcp(_) => (0, true), KeepAlive::Disabled => (0, false), }; + let keep_alive = if ka_enabled && keep_alive > 0 { + Some(Duration::from_secs(keep_alive)) + } else { + None + }; WorkerSettings(Rc::new(Inner { handler, @@ -183,17 +188,7 @@ impl WorkerSettings { } #[inline] - pub fn keep_alive_timer(&self) -> Option { - let ka = self.0.keep_alive; - if ka != 0 { - Some(Delay::new(Instant::now() + Duration::from_secs(ka))) - } else { - None - } - } - - #[inline] - pub fn keep_alive(&self) -> u64 { + pub fn keep_alive(&self) -> Option { self.0.keep_alive } @@ -202,16 +197,6 @@ impl WorkerSettings { self.0.ka_enabled } - #[inline] - pub fn client_timer(&self) -> Option { - let delay = self.0.client_timeout; - if delay != 0 { - Some(Delay::new(Instant::now() + Duration::from_millis(delay))) - } else { - None - } - } - pub(crate) fn get_bytes(&self) -> BytesMut { self.0.bytes.get_bytes() } @@ -231,6 +216,34 @@ impl WorkerSettings { } impl WorkerSettings { + #[inline] + pub fn client_timer(&self) -> Option { + let delay = self.0.client_timeout; + if delay != 0 { + Some(Delay::new(self.now() + Duration::from_millis(delay))) + } else { + None + } + } + + #[inline] + pub fn keep_alive_timer(&self) -> Option { + if let Some(ka) = self.0.keep_alive { + Some(Delay::new(self.now() + ka)) + } else { + None + } + } + + /// Keep-alive expire time + pub fn keep_alive_expire(&self) -> Option { + if let Some(ka) = self.0.keep_alive { + Some(self.now() + ka) + } else { + None + } + } + pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) { // Unsafe: WorkerSetting is !Sync and !Send let date_bytes = unsafe { @@ -258,9 +271,29 @@ impl WorkerSettings { dst.extend_from_slice(date_bytes); } } + + #[inline] + pub(crate) fn now(&self) -> Instant { + unsafe { + let date = &mut (*self.0.date.get()); + if !date.0 { + date.1.update(); + date.0 = true; + + // periodic date update + let s = self.clone(); + spawn(sleep(Duration::from_secs(1)).then(move |_| { + s.update_date(); + future::ok(()) + })); + } + date.1.current + } + } } struct Date { + current: Instant, bytes: [u8; DATE_VALUE_LENGTH], pos: usize, } @@ -268,6 +301,7 @@ struct Date { impl Date { fn new() -> Date { let mut date = Date { + current: Instant::now(), bytes: [0; DATE_VALUE_LENGTH], pos: 0, }; @@ -276,6 +310,7 @@ impl Date { } fn update(&mut self) { self.pos = 0; + self.current = Instant::now(); write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap(); } } From 5966ee6192bcd12580637f9f388244def7a80752 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Fri, 28 Sep 2018 16:03:53 -0700 Subject: [PATCH 148/219] add HttpServer::register() function, allows to register services in actix net server --- src/client/connector.rs | 2 +- src/server/builder.rs | 19 +++++++++++++++++++ src/server/http.rs | 15 +++++++++++++++ src/server/ssl/mod.rs | 2 +- src/server/ssl/openssl.rs | 26 +++++++++++++++++++++++--- 5 files changed, 59 insertions(+), 5 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 32426e0ac..3f4916afa 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -290,7 +290,7 @@ impl Default for ClientConnector { feature = "ssl", feature = "tls", feature = "rust-tls", - ),))] + )))] { () } diff --git a/src/server/builder.rs b/src/server/builder.rs index 8c0a0f624..c9a97af3e 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -32,6 +32,25 @@ where no_client_timer: bool, } +impl HttpServiceBuilder> +where + Io: IoStream + Send, + F: Fn() -> H + Send + Clone + 'static, + H: IntoHttpHandler, + A: AcceptorServiceFactory, + ::InitError: fmt::Debug, +{ + /// Create http service builder with default pipeline factory + pub fn with_default_pipeline(factory: F, acceptor: A) -> Self { + Self { + factory, + acceptor, + pipeline: DefaultPipelineFactory::new(), + no_client_timer: false, + } + } +} + impl HttpServiceBuilder where F: Fn() -> H + Send + Clone + 'static, diff --git a/src/server/http.rs b/src/server/http.rs index 034f903e2..6344771b6 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -520,6 +520,21 @@ impl H + Send + Clone> HttpServer { self.start(); sys.run(); } + + /// Register current http server as actix-net's server service + pub fn register(self, mut srv: Server) -> Server { + for socket in self.sockets { + srv = socket.handler.register( + srv, + socket.lst, + self.host.clone(), + socket.addr, + self.keep_alive.clone(), + self.client_timeout, + ); + } + srv + } } fn create_tcp_listener( diff --git a/src/server/ssl/mod.rs b/src/server/ssl/mod.rs index 1d6b55b10..c09573fe3 100644 --- a/src/server/ssl/mod.rs +++ b/src/server/ssl/mod.rs @@ -1,7 +1,7 @@ #[cfg(any(feature = "alpn", feature = "ssl"))] mod openssl; #[cfg(any(feature = "alpn", feature = "ssl"))] -pub use self::openssl::*; +pub use self::openssl::{openssl_acceptor_with_flags, OpensslAcceptor}; #[cfg(feature = "tls")] mod nativetls; diff --git a/src/server/ssl/openssl.rs b/src/server/ssl/openssl.rs index 343155233..590dc0bbb 100644 --- a/src/server/ssl/openssl.rs +++ b/src/server/ssl/openssl.rs @@ -1,14 +1,34 @@ use std::net::Shutdown; use std::{io, time}; +use actix_net::ssl; use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder}; +use tokio_io::{AsyncRead, AsyncWrite}; use tokio_openssl::SslStream; use server::{IoStream, ServerFlags}; -/// Configure `SslAcceptorBuilder` with enabled `HTTP/2` and `HTTP1.1` support. -pub fn openssl_acceptor(builder: SslAcceptorBuilder) -> io::Result { - openssl_acceptor_with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2) +/// Support `SSL` connections via openssl package +/// +/// `ssl` feature enables `OpensslAcceptor` type +pub struct OpensslAcceptor { + _t: ssl::OpensslAcceptor, +} + +impl OpensslAcceptor { + /// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. + pub fn new(builder: SslAcceptorBuilder) -> io::Result> { + OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2) + } + + /// Create `OpensslAcceptor` with custom server flags. + pub fn with_flags( + mut builder: SslAcceptorBuilder, flags: ServerFlags, + ) -> io::Result> { + let acceptor = openssl_acceptor_with_flags(builder, flags)?; + + Ok(ssl::OpensslAcceptor::new(acceptor)) + } } /// Configure `SslAcceptorBuilder` with custom server flags. From c1e0b4f32275b212992c9f9991e3f4797e66c152 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 14:43:06 -0700 Subject: [PATCH 149/219] expose internal http server types and allow to create custom http pipelines --- src/server/builder.rs | 132 +++++------------------------------------ src/server/channel.rs | 12 ++-- src/server/error.rs | 34 +++++++++++ src/server/h1.rs | 12 ++-- src/server/h2.rs | 14 ++--- src/server/http.rs | 34 ++++++----- src/server/incoming.rs | 8 ++- src/server/mod.rs | 8 +-- src/server/service.rs | 12 ++-- src/server/settings.rs | 24 ++++---- tests/test_server.rs | 37 ++++++++++++ 11 files changed, 148 insertions(+), 179 deletions(-) diff --git a/src/server/builder.rs b/src/server/builder.rs index c9a97af3e..8e7f82f80 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -1,4 +1,3 @@ -use std::marker::PhantomData; use std::{fmt, net}; use actix_net::either::Either; @@ -9,61 +8,39 @@ use super::acceptor::{ AcceptorServiceFactory, AcceptorTimeout, ServerMessageAcceptor, TcpAcceptor, }; use super::error::AcceptorError; -use super::handler::{HttpHandler, IntoHttpHandler}; +use super::handler::IntoHttpHandler; use super::service::HttpService; use super::settings::{ServerSettings, WorkerSettings}; -use super::{IoStream, KeepAlive}; +use super::KeepAlive; pub(crate) trait ServiceProvider { fn register( - &self, server: Server, lst: net::TcpListener, host: Option, + &self, server: Server, lst: net::TcpListener, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, ) -> Server; } /// Utility type that builds complete http pipeline -pub struct HttpServiceBuilder +pub struct HttpServiceBuilder where F: Fn() -> H + Send + Clone, { factory: F, acceptor: A, - pipeline: P, no_client_timer: bool, } -impl HttpServiceBuilder> -where - Io: IoStream + Send, - F: Fn() -> H + Send + Clone + 'static, - H: IntoHttpHandler, - A: AcceptorServiceFactory, - ::InitError: fmt::Debug, -{ - /// Create http service builder with default pipeline factory - pub fn with_default_pipeline(factory: F, acceptor: A) -> Self { - Self { - factory, - acceptor, - pipeline: DefaultPipelineFactory::new(), - no_client_timer: false, - } - } -} - -impl HttpServiceBuilder +impl HttpServiceBuilder where F: Fn() -> H + Send + Clone + 'static, H: IntoHttpHandler, A: AcceptorServiceFactory, ::InitError: fmt::Debug, - P: HttpPipelineFactory, { /// Create http service builder - pub fn new(factory: F, acceptor: A, pipeline: P) -> Self { + pub fn new(factory: F, acceptor: A) -> Self { Self { factory, - pipeline, acceptor, no_client_timer: false, } @@ -75,34 +52,20 @@ where } /// Use different acceptor factory - pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder + pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder where A1: AcceptorServiceFactory, ::InitError: fmt::Debug, { HttpServiceBuilder { acceptor, - pipeline: self.pipeline, - factory: self.factory.clone(), - no_client_timer: self.no_client_timer, - } - } - - /// Use different pipeline factory - pub fn pipeline(self, pipeline: P1) -> HttpServiceBuilder - where - P1: HttpPipelineFactory, - { - HttpServiceBuilder { - pipeline, - acceptor: self.acceptor, factory: self.factory.clone(), no_client_timer: self.no_client_timer, } } fn finish( - &self, host: Option, addr: net::SocketAddr, keep_alive: KeepAlive, + &self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, ) -> impl ServiceFactory { let timeout = if self.no_client_timer { @@ -111,7 +74,6 @@ where client_timeout }; let factory = self.factory.clone(); - let pipeline = self.pipeline.clone(); let acceptor = self.acceptor.clone(); move || { let app = (factory)().into_handler(); @@ -119,7 +81,7 @@ where app, keep_alive, timeout as u64, - ServerSettings::new(Some(addr), &host, false), + ServerSettings::new(addr, &host, false), ); if timeout == 0 { @@ -129,8 +91,7 @@ where .map_err(|_| ()) .map_init_err(|_| ()) .and_then( - pipeline - .create(settings) + HttpService::new(settings) .map_init_err(|_| ()) .map_err(|_| ()), ), @@ -142,8 +103,7 @@ where .map_err(|_| ()) .map_init_err(|_| ()) .and_then( - pipeline - .create(settings) + HttpService::new(settings) .map_init_err(|_| ()) .map_err(|_| ()), ), @@ -153,33 +113,30 @@ where } } -impl Clone for HttpServiceBuilder +impl Clone for HttpServiceBuilder where F: Fn() -> H + Send + Clone, H: IntoHttpHandler, A: AcceptorServiceFactory, - P: HttpPipelineFactory, { fn clone(&self) -> Self { HttpServiceBuilder { factory: self.factory.clone(), acceptor: self.acceptor.clone(), - pipeline: self.pipeline.clone(), no_client_timer: self.no_client_timer, } } } -impl ServiceProvider for HttpServiceBuilder +impl ServiceProvider for HttpServiceBuilder where F: Fn() -> H + Send + Clone + 'static, A: AcceptorServiceFactory, ::InitError: fmt::Debug, - P: HttpPipelineFactory, H: IntoHttpHandler, { fn register( - &self, server: Server, lst: net::TcpListener, host: Option, + &self, server: Server, lst: net::TcpListener, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, ) -> Server { server.listen2( @@ -189,64 +146,3 @@ where ) } } - -pub trait HttpPipelineFactory: Send + Clone + 'static { - type Io: IoStream; - type NewService: NewService; - - fn create(&self, settings: WorkerSettings) -> Self::NewService; -} - -impl HttpPipelineFactory for F -where - F: Fn(WorkerSettings) -> T + Send + Clone + 'static, - T: NewService, - T::Request: IoStream, - H: HttpHandler, -{ - type Io = T::Request; - type NewService = T; - - fn create(&self, settings: WorkerSettings) -> T { - (self)(settings) - } -} - -pub(crate) struct DefaultPipelineFactory { - _t: PhantomData<(H, Io)>, -} - -unsafe impl Send for DefaultPipelineFactory {} - -impl DefaultPipelineFactory -where - Io: IoStream + Send, - H: HttpHandler + 'static, -{ - pub fn new() -> Self { - Self { _t: PhantomData } - } -} - -impl Clone for DefaultPipelineFactory -where - Io: IoStream, - H: HttpHandler, -{ - fn clone(&self) -> Self { - Self { _t: PhantomData } - } -} - -impl HttpPipelineFactory for DefaultPipelineFactory -where - Io: IoStream, - H: HttpHandler + 'static, -{ - type Io = Io; - type NewService = HttpService; - - fn create(&self, settings: WorkerSettings) -> Self::NewService { - HttpService::new(settings) - } -} diff --git a/src/server/channel.rs b/src/server/channel.rs index c1e6b6b24..3cea291fd 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -6,6 +6,7 @@ use futures::{Async, Future, Poll}; use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; +use super::error::HttpDispatchError; use super::settings::WorkerSettings; use super::{h1, h2, HttpHandler, IoStream}; @@ -86,7 +87,7 @@ where H: HttpHandler + 'static, { type Item = (); - type Error = (); + type Error = HttpDispatchError; fn poll(&mut self) -> Poll { // keep-alive timer @@ -127,6 +128,7 @@ where return h2.poll(); } Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => { + let mut err = None; let mut disconnect = false; match io.read_available(buf) { Ok(Async::Ready((read_some, stream_closed))) => { @@ -136,14 +138,16 @@ where disconnect = true; } } - Err(_) => { - disconnect = true; + Err(e) => { + err = Some(e.into()); } _ => (), } if disconnect { debug!("Ignored premature client disconnection"); - return Err(()); + return Ok(Async::Ready(())); + } else if let Some(e) = err { + return Err(e); } if buf.len() >= 14 { diff --git a/src/server/error.rs b/src/server/error.rs index ff8b831a7..b8b602266 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -1,6 +1,7 @@ use std::io; use futures::{Async, Poll}; +use http2; use super::{helpers, HttpHandlerTask, Writer}; use http::{StatusCode, Version}; @@ -19,6 +20,39 @@ pub enum AcceptorError { Timeout, } +#[derive(Fail, Debug)] +/// A set of errors that can occur during dispatching http requests +pub enum HttpDispatchError { + /// Application error + #[fail(display = "Application specific error")] + AppError, + + /// An `io::Error` that occurred while trying to read or write to a network + /// stream. + #[fail(display = "IO error: {}", _0)] + Io(io::Error), + + /// The first request did not complete within the specified timeout. + #[fail(display = "The first request did not complete within the specified timeout")] + SlowRequestTimeout, + + /// HTTP2 error + #[fail(display = "HTTP2 error: {}", _0)] + Http2(http2::Error), +} + +impl From for HttpDispatchError { + fn from(err: io::Error) -> Self { + HttpDispatchError::Io(err) + } +} + +impl From for HttpDispatchError { + fn from(err: http2::Error) -> Self { + HttpDispatchError::Http2(err) + } +} + pub(crate) struct ServerError(Version, StatusCode); impl ServerError { diff --git a/src/server/h1.rs b/src/server/h1.rs index 76c0d4b6e..b17981225 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -10,7 +10,7 @@ use error::{Error, PayloadError}; use http::{StatusCode, Version}; use payload::{Payload, PayloadStatus, PayloadWriter}; -use super::error::ServerError; +use super::error::{HttpDispatchError, ServerError}; use super::h1decoder::{DecoderError, H1Decoder, Message}; use super::h1writer::H1Writer; use super::input::PayloadType; @@ -172,7 +172,7 @@ where } #[inline] - pub fn poll(&mut self) -> Poll<(), ()> { + pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { // check connection keep-alive if !self.poll_keep_alive() { return Ok(Async::Ready(())); @@ -190,7 +190,7 @@ where Ok(Async::Ready(_)) => return Ok(Async::Ready(())), Err(err) => { debug!("Error sending data: {}", err); - return Err(()); + return Err(err.into()); } } } @@ -303,7 +303,7 @@ where } } - pub fn poll_handler(&mut self) -> Poll { + pub fn poll_handler(&mut self) -> Poll { let retry = self.can_read(); // check in-flight messages @@ -321,7 +321,7 @@ where return Ok(Async::NotReady); } self.flags.insert(Flags::ERROR); - return Err(()); + return Err(HttpDispatchError::AppError); } match self.tasks[idx].pipe.poll_io(&mut self.stream) { @@ -404,7 +404,7 @@ where debug!("Error sending data: {}", err); self.read_disconnected(); self.write_disconnected(); - return Err(()); + return Err(err.into()); } Ok(Async::Ready(_)) => { // non consumed payload in that case close connection diff --git a/src/server/h2.rs b/src/server/h2.rs index d9ca2f64a..589e77c2d 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -19,7 +19,7 @@ use http::{StatusCode, Version}; use payload::{Payload, PayloadStatus, PayloadWriter}; use uri::Url; -use super::error::ServerError; +use super::error::{HttpDispatchError, ServerError}; use super::h2writer::H2Writer; use super::input::PayloadType; use super::settings::WorkerSettings; @@ -86,7 +86,7 @@ where &self.settings } - pub fn poll(&mut self) -> Poll<(), ()> { + pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { // server if let State::Connection(ref mut conn) = self.state { // keep-alive timer @@ -244,9 +244,7 @@ where } } else { // keep-alive disable, drop connection - return conn.poll_close().map_err(|e| { - error!("Error during connection close: {}", e) - }); + return conn.poll_close().map_err(|e| e.into()); } } else { // keep-alive unset, rely on operating system @@ -267,9 +265,7 @@ where if not_ready { if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED) { - return conn - .poll_close() - .map_err(|e| error!("Error during connection close: {}", e)); + return conn.poll_close().map_err(|e| e.into()); } else { return Ok(Async::NotReady); } @@ -284,7 +280,7 @@ where Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => { trace!("Error handling connection: {}", err); - return Err(()); + return Err(err.into()); } } } else { diff --git a/src/server/http.rs b/src/server/http.rs index 6344771b6..311c53cb2 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -18,7 +18,7 @@ use openssl::ssl::SslAcceptorBuilder; use rustls::ServerConfig; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor}; -use super::builder::{DefaultPipelineFactory, HttpServiceBuilder, ServiceProvider}; +use super::builder::{HttpServiceBuilder, ServiceProvider}; use super::{IntoHttpHandler, KeepAlive}; struct Socket { @@ -131,7 +131,7 @@ where self } - /// Set server client timneout in milliseconds for first request. + /// Set server client timeout in milliseconds for first request. /// /// Defines a timeout for reading client request header. If a client does not transmit /// the entire set headers within this time, the request is terminated with @@ -218,11 +218,8 @@ where addr, scheme: "http", handler: Box::new( - HttpServiceBuilder::new( - self.factory.clone(), - DefaultAcceptor, - DefaultPipelineFactory::new(), - ).no_client_timer(), + HttpServiceBuilder::new(self.factory.clone(), DefaultAcceptor) + .no_client_timer(), ), }); @@ -231,7 +228,7 @@ where #[doc(hidden)] /// Use listener for accepting incoming connection requests - pub(crate) fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self + pub fn listen_with(mut self, lst: net::TcpListener, acceptor: A) -> Self where A: AcceptorServiceFactory, ::InitError: fmt::Debug, @@ -241,11 +238,7 @@ where lst, addr, scheme: "https", - handler: Box::new(HttpServiceBuilder::new( - self.factory.clone(), - acceptor, - DefaultPipelineFactory::new(), - )), + handler: Box::new(HttpServiceBuilder::new(self.factory.clone(), acceptor)), }); self @@ -339,7 +332,6 @@ where handler: Box::new(HttpServiceBuilder::new( self.factory.clone(), acceptor.clone(), - DefaultPipelineFactory::new(), )), }); } @@ -483,10 +475,15 @@ impl H + Send + Clone> HttpServer { let sockets = mem::replace(&mut self.sockets, Vec::new()); for socket in sockets { + let host = self + .host + .as_ref() + .map(|h| h.to_owned()) + .unwrap_or_else(|| format!("{}", socket.addr)); srv = socket.handler.register( srv, socket.lst, - self.host.clone(), + host, socket.addr, self.keep_alive.clone(), self.client_timeout, @@ -524,10 +521,15 @@ impl H + Send + Clone> HttpServer { /// Register current http server as actix-net's server service pub fn register(self, mut srv: Server) -> Server { for socket in self.sockets { + let host = self + .host + .as_ref() + .map(|h| h.to_owned()) + .unwrap_or_else(|| format!("{}", socket.addr)); srv = socket.handler.register( srv, socket.lst, - self.host.clone(), + host, socket.addr, self.keep_alive.clone(), self.client_timeout, diff --git a/src/server/incoming.rs b/src/server/incoming.rs index 7ab289d04..c77280084 100644 --- a/src/server/incoming.rs +++ b/src/server/incoming.rs @@ -2,7 +2,7 @@ use std::{io, net}; use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message}; -use futures::Stream; +use futures::{Future, Stream}; use tokio_io::{AsyncRead, AsyncWrite}; use super::channel::{HttpChannel, WrapperStream}; @@ -36,7 +36,7 @@ where apps, self.keep_alive, self.client_timeout as u64, - ServerSettings::new(Some(addr), &self.host, secure), + ServerSettings::new(addr, "127.0.0.1:8080", secure), ); // start server @@ -65,6 +65,8 @@ where type Result = (); fn handle(&mut self, msg: WrapperStream, _: &mut Context) -> Self::Result { - Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg, None)); + Arbiter::spawn( + HttpChannel::new(self.settings.clone(), msg, None).map_err(|_| ()), + ); } } diff --git a/src/server/mod.rs b/src/server/mod.rs index 1e145571c..f9d2b585e 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -140,15 +140,15 @@ mod ssl; pub use self::handler::*; pub use self::http::HttpServer; pub use self::message::Request; -pub use self::settings::ServerSettings; pub use self::ssl::*; +pub use self::error::{AcceptorError, HttpDispatchError}; +pub use self::service::HttpService; +pub use self::settings::{ServerSettings, WorkerSettings}; + #[doc(hidden)] pub use self::helpers::write_content_length; -#[doc(hidden)] -pub use self::builder::HttpServiceBuilder; - use body::Binary; use extensions::Extensions; use header::ContentEncoding; diff --git a/src/server/service.rs b/src/server/service.rs index 042c86ed4..2988bc661 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -5,11 +5,12 @@ use futures::future::{ok, FutureResult}; use futures::{Async, Poll}; use super::channel::HttpChannel; +use super::error::HttpDispatchError; use super::handler::HttpHandler; use super::settings::WorkerSettings; use super::IoStream; -pub(crate) struct HttpService +pub struct HttpService where H: HttpHandler, Io: IoStream, @@ -23,6 +24,7 @@ where H: HttpHandler, Io: IoStream, { + /// Create new `HttpService` instance. pub fn new(settings: WorkerSettings) -> Self { HttpService { settings, @@ -38,17 +40,17 @@ where { type Request = Io; type Response = (); - type Error = (); + type Error = HttpDispatchError; type InitError = (); type Service = HttpServiceHandler; - type Future = FutureResult; + type Future = FutureResult; fn new_service(&self) -> Self::Future { ok(HttpServiceHandler::new(self.settings.clone())) } } -pub(crate) struct HttpServiceHandler +pub struct HttpServiceHandler where H: HttpHandler, Io: IoStream, @@ -84,7 +86,7 @@ where { type Request = Io; type Response = (); - type Error = (); + type Error = HttpDispatchError; type Future = HttpChannel; fn poll_ready(&mut self) -> Poll<(), Self::Error> { diff --git a/src/server/settings.rs b/src/server/settings.rs index 5ca777290..fbe515f99 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -43,7 +43,7 @@ lazy_static! { /// Various server settings pub struct ServerSettings { - addr: Option, + addr: net::SocketAddr, secure: bool, host: String, cpu_pool: LazyCell, @@ -65,7 +65,7 @@ impl Clone for ServerSettings { impl Default for ServerSettings { fn default() -> Self { ServerSettings { - addr: None, + addr: "127.0.0.1:8080".parse().unwrap(), secure: false, host: "localhost:8080".to_owned(), responses: HttpResponsePool::get_pool(), @@ -76,16 +76,8 @@ impl Default for ServerSettings { impl ServerSettings { /// Crate server settings instance - pub(crate) fn new( - addr: Option, host: &Option, secure: bool, - ) -> ServerSettings { - let host = if let Some(ref host) = *host { - host.clone() - } else if let Some(ref addr) = addr { - format!("{}", addr) - } else { - "localhost".to_owned() - }; + pub fn new(addr: net::SocketAddr, host: &str, secure: bool) -> ServerSettings { + let host = host.to_owned(); let cpu_pool = LazyCell::new(); let responses = HttpResponsePool::get_pool(); ServerSettings { @@ -98,7 +90,7 @@ impl ServerSettings { } /// Returns the socket address of the local half of this TCP connection - pub fn local_addr(&self) -> Option { + pub fn local_addr(&self) -> net::SocketAddr { self.addr } @@ -153,7 +145,7 @@ impl Clone for WorkerSettings { } impl WorkerSettings { - pub(crate) fn new( + pub fn new( handler: H, keep_alive: KeepAlive, client_timeout: u64, settings: ServerSettings, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { @@ -188,11 +180,13 @@ impl WorkerSettings { } #[inline] + /// Keep alive duration if configured. pub fn keep_alive(&self) -> Option { self.0.keep_alive } #[inline] + /// Return state of connection keep-alive funcitonality pub fn keep_alive_enabled(&self) -> bool { self.0.ka_enabled } @@ -217,6 +211,7 @@ impl WorkerSettings { impl WorkerSettings { #[inline] + /// Client timeout for first request. pub fn client_timer(&self) -> Option { let delay = self.0.client_timeout; if delay != 0 { @@ -227,6 +222,7 @@ impl WorkerSettings { } #[inline] + /// Return keep-alive timer delay is configured. pub fn keep_alive_timer(&self) -> Option { if let Some(ka) = self.0.keep_alive { Some(Delay::new(self.now() + ka)) diff --git a/tests/test_server.rs b/tests/test_server.rs index c1dbf531d..66b96ecce 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1,4 +1,5 @@ extern crate actix; +extern crate actix_net; extern crate actix_web; #[cfg(feature = "brotli")] extern crate brotli2; @@ -18,6 +19,7 @@ use std::io::{Read, Write}; use std::sync::Arc; use std::{thread, time}; +use actix_net::server::Server; #[cfg(feature = "brotli")] use brotli2::write::{BrotliDecoder, BrotliEncoder}; use bytes::{Bytes, BytesMut}; @@ -1010,3 +1012,38 @@ fn test_server_cookies() { assert_eq!(cookies[1], first_cookie); } } + +#[test] +fn test_custom_pipeline() { + use actix::System; + use actix_web::server::{HttpService, KeepAlive, ServerSettings, WorkerSettings}; + + let addr = test::TestServer::unused_addr(); + + thread::spawn(move || { + Server::new() + .bind("test", addr, move || { + let app = App::new() + .route("/", http::Method::GET, |_: HttpRequest| "OK") + .finish(); + let settings = WorkerSettings::new( + app, + KeepAlive::Disabled, + 10, + ServerSettings::new(addr, "localhost", false), + ); + + HttpService::new(settings) + }).unwrap() + .run(); + }); + + let mut sys = System::new("test"); + { + let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) + .finish() + .unwrap(); + let response = sys.block_on(req.send()).unwrap(); + assert!(response.status().is_success()); + } +} From 2217a152cb0fcbbc5a5485936ceeb684bb532e41 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 15:19:49 -0700 Subject: [PATCH 150/219] expose app error by http service --- src/client/connector.rs | 7 ++----- src/server/error.rs | 14 ++++++++++++-- src/server/h1.rs | 14 +++++++++++--- src/server/settings.rs | 1 + 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 3f4916afa..88d6dfd6b 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -285,12 +285,9 @@ impl Default for ClientConnector { Arc::new(config) } + #[cfg_attr(rustfmt, rustfmt_skip)] #[cfg(not(any( - feature = "alpn", - feature = "ssl", - feature = "tls", - feature = "rust-tls", - )))] + feature = "alpn", feature = "ssl", feature = "tls", feature = "rust-tls")))] { () } diff --git a/src/server/error.rs b/src/server/error.rs index b8b602266..4396e6a2a 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -24,8 +24,8 @@ pub enum AcceptorError { /// A set of errors that can occur during dispatching http requests pub enum HttpDispatchError { /// Application error - #[fail(display = "Application specific error")] - AppError, + #[fail(display = "Application specific error: {}", _0)] + App(Error), /// An `io::Error` that occurred while trying to read or write to a network /// stream. @@ -39,6 +39,16 @@ pub enum HttpDispatchError { /// HTTP2 error #[fail(display = "HTTP2 error: {}", _0)] Http2(http2::Error), + + /// Unknown error + #[fail(display = "Unknown error")] + Unknown, +} + +impl From for HttpDispatchError { + fn from(err: Error) -> Self { + HttpDispatchError::App(err) + } } impl From for HttpDispatchError { diff --git a/src/server/h1.rs b/src/server/h1.rs index b17981225..a1a6c0af4 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -49,6 +49,7 @@ pub(crate) struct Http1 { payload: Option, buf: BytesMut, tasks: VecDeque>, + error: Option, ka_enabled: bool, ka_expire: Instant, ka_timer: Option, @@ -113,6 +114,7 @@ where decoder: H1Decoder::new(), payload: None, tasks: VecDeque::new(), + error: None, addr, buf, settings, @@ -321,7 +323,11 @@ where return Ok(Async::NotReady); } self.flags.insert(Flags::ERROR); - return Err(HttpDispatchError::AppError); + return Err(self + .error + .take() + .map(|e| e.into()) + .unwrap_or(HttpDispatchError::Unknown)); } match self.tasks[idx].pipe.poll_io(&mut self.stream) { @@ -357,12 +363,13 @@ where io = true; } Err(err) => { + error!("Unhandled error1: {}", err); // it is not possible to recover from error // during pipe handling, so just drop connection self.read_disconnected(); self.write_disconnected(); self.tasks[idx].flags.insert(EntryFlags::ERROR); - error!("Unhandled error1: {}", err); + self.error = Some(err); continue; } } @@ -373,10 +380,11 @@ where self.tasks[idx].flags.insert(EntryFlags::FINISHED) } Err(err) => { + error!("Unhandled error: {}", err); self.read_disconnected(); self.write_disconnected(); self.tasks[idx].flags.insert(EntryFlags::ERROR); - error!("Unhandled error: {}", err); + self.error = Some(err); continue; } } diff --git a/src/server/settings.rs b/src/server/settings.rs index fbe515f99..fe9cd82a3 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -145,6 +145,7 @@ impl Clone for WorkerSettings { } impl WorkerSettings { + /// Create instance of `WorkerSettings` pub fn new( handler: H, keep_alive: KeepAlive, client_timeout: u64, settings: ServerSettings, ) -> WorkerSettings { From 91af3ca148e7be9b48cd1d9bcaa316b442e2457c Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 19:18:24 -0700 Subject: [PATCH 151/219] simplify h1 dispatcher --- src/lib.rs | 4 - src/server/error.rs | 12 ++ src/server/h1.rs | 425 ++++++++++++++++++---------------------- src/server/h1decoder.rs | 1 + src/server/handler.rs | 21 +- src/server/http.rs | 4 +- src/server/incoming.rs | 4 +- src/server/message.rs | 21 ++ 8 files changed, 249 insertions(+), 243 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 099b0b16c..df3c3817e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,10 +81,6 @@ specialization, // for impl ErrorResponse for std::error::Error extern_prelude, ))] -#![cfg_attr( - feature = "cargo-clippy", - allow(decimal_literal_representation, suspicious_arithmetic_impl) -)] #![warn(missing_docs)] #[macro_use] diff --git a/src/server/error.rs b/src/server/error.rs index 4396e6a2a..eb3e88478 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -36,10 +36,22 @@ pub enum HttpDispatchError { #[fail(display = "The first request did not complete within the specified timeout")] SlowRequestTimeout, + /// Shutdown timeout + #[fail(display = "Connection shutdown timeout")] + ShutdownTimeout, + /// HTTP2 error #[fail(display = "HTTP2 error: {}", _0)] Http2(http2::Error), + /// Malformed request + #[fail(display = "Malformed request")] + MalformedRequest, + + /// Internal error + #[fail(display = "Internal error")] + InternalError, + /// Unknown error #[fail(display = "Unknown error")] Unknown, diff --git a/src/server/h1.rs b/src/server/h1.rs index a1a6c0af4..f3c71e3c2 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -4,6 +4,7 @@ use std::time::Instant; use bytes::BytesMut; use futures::{Async, Future, Poll}; +use tokio_current_thread::spawn; use tokio_timer::Delay; use error::{Error, PayloadError}; @@ -13,17 +14,16 @@ use payload::{Payload, PayloadStatus, PayloadWriter}; use super::error::{HttpDispatchError, ServerError}; use super::h1decoder::{DecoderError, H1Decoder, Message}; use super::h1writer::H1Writer; +use super::handler::{HttpHandler, HttpHandlerTask, HttpHandlerTaskFut}; use super::input::PayloadType; use super::settings::WorkerSettings; -use super::Writer; -use super::{HttpHandler, HttpHandlerTask, IoStream}; +use super::{IoStream, Writer}; const MAX_PIPELINED_MESSAGES: usize = 16; bitflags! { pub struct Flags: u8 { const STARTED = 0b0000_0001; - const ERROR = 0b0000_0010; const KEEPALIVE = 0b0000_0100; const SHUTDOWN = 0b0000_1000; const READ_DISCONNECTED = 0b0001_0000; @@ -32,14 +32,6 @@ bitflags! { } } -bitflags! { - struct EntryFlags: u8 { - const EOF = 0b0000_0001; - const ERROR = 0b0000_0010; - const FINISHED = 0b0000_0100; - } -} - pub(crate) struct Http1 { flags: Flags, settings: WorkerSettings, @@ -49,39 +41,40 @@ pub(crate) struct Http1 { payload: Option, buf: BytesMut, tasks: VecDeque>, - error: Option, + error: Option, ka_enabled: bool, ka_expire: Instant, ka_timer: Option, } -struct Entry { - pipe: EntryPipe, - flags: EntryFlags, -} - -enum EntryPipe { +enum Entry { Task(H::Task), Error(Box), } -impl EntryPipe { +impl Entry { + fn into_task(self) -> H::Task { + match self { + Entry::Task(task) => task, + Entry::Error(_) => panic!(), + } + } fn disconnected(&mut self) { match *self { - EntryPipe::Task(ref mut task) => task.disconnected(), - EntryPipe::Error(ref mut task) => task.disconnected(), + Entry::Task(ref mut task) => task.disconnected(), + Entry::Error(ref mut task) => task.disconnected(), } } fn poll_io(&mut self, io: &mut Writer) -> Poll { match *self { - EntryPipe::Task(ref mut task) => task.poll_io(io), - EntryPipe::Error(ref mut task) => task.poll_io(io), + Entry::Task(ref mut task) => task.poll_io(io), + Entry::Error(ref mut task) => task.poll_io(io), } } fn poll_completed(&mut self) -> Poll<(), Error> { match *self { - EntryPipe::Task(ref mut task) => task.poll_completed(), - EntryPipe::Error(ref mut task) => task.poll_completed(), + Entry::Task(ref mut task) => task.poll_completed(), + Entry::Error(ref mut task) => task.poll_completed(), } } } @@ -136,10 +129,7 @@ where #[inline] fn can_read(&self) -> bool { - if self - .flags - .intersects(Flags::ERROR | Flags::READ_DISCONNECTED) - { + if self.flags.intersects(Flags::READ_DISCONNECTED) { return false; } @@ -150,41 +140,46 @@ where } } - fn write_disconnected(&mut self) { - self.flags.insert(Flags::WRITE_DISCONNECTED); - - // notify all tasks - self.stream.disconnected(); - for task in &mut self.tasks { - task.pipe.disconnected(); - } - } - - fn read_disconnected(&mut self) { - self.flags.insert( - Flags::READ_DISCONNECTED - // on parse error, stop reading stream but tasks need to be - // completed - | Flags::ERROR, - ); - + // if checked is set to true, delay disconnect until all tasks have finished. + fn client_disconnected(&mut self, checked: bool) { + self.flags.insert(Flags::READ_DISCONNECTED); if let Some(mut payload) = self.payload.take() { payload.set_error(PayloadError::Incomplete); } + + if !checked || self.tasks.is_empty() { + self.flags.insert(Flags::WRITE_DISCONNECTED); + self.stream.disconnected(); + + // notify all tasks + for mut task in self.tasks.drain(..) { + task.disconnected(); + match task.poll_completed() { + Ok(Async::NotReady) => { + // spawn not completed task, it does not require access to io + // at this point + spawn(HttpHandlerTaskFut::new(task.into_task())); + } + Ok(Async::Ready(_)) => (), + Err(err) => { + error!("Unhandled application error: {}", err); + } + } + } + } } #[inline] pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { // check connection keep-alive - if !self.poll_keep_alive() { - return Ok(Async::Ready(())); - } + self.poll_keep_alive()?; // shutdown if self.flags.contains(Flags::SHUTDOWN) { - if self.flags.intersects( - Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED, - ) { + if self + .flags + .intersects(Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED) + { return Ok(Async::Ready(())); } match self.stream.poll_completed(true) { @@ -197,44 +192,46 @@ where } } - self.poll_io(); + self.poll_io()?; - loop { + if !self.flags.contains(Flags::WRITE_DISCONNECTED) { match self.poll_handler()? { - Async::Ready(true) => { - self.poll_io(); - } + Async::Ready(true) => self.poll(), Async::Ready(false) => { self.flags.insert(Flags::SHUTDOWN); - return self.poll(); + self.poll() } Async::NotReady => { // deal with keep-alive and steam eof (client-side write shutdown) if self.tasks.is_empty() { // handle stream eof - if self.flags.contains(Flags::READ_DISCONNECTED) { - self.flags.insert(Flags::SHUTDOWN); - return self.poll(); + if self.flags.intersects( + Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED, + ) { + return Ok(Async::Ready(())); } // no keep-alive - if self.flags.contains(Flags::ERROR) - || (!self.flags.contains(Flags::KEEPALIVE) - || !self.ka_enabled) - && self.flags.contains(Flags::STARTED) + if self.flags.contains(Flags::STARTED) + && (!self.ka_enabled + || !self.flags.contains(Flags::KEEPALIVE)) { self.flags.insert(Flags::SHUTDOWN); return self.poll(); } } - return Ok(Async::NotReady); + Ok(Async::NotReady) } } + } else if let Some(err) = self.error.take() { + Err(err) + } else { + Ok(Async::Ready(())) } } /// keep-alive timer. returns `true` is keep-alive, otherwise drop - fn poll_keep_alive(&mut self) -> bool { - let timer = if let Some(ref mut timer) = self.ka_timer { + fn poll_keep_alive(&mut self) -> Result<(), HttpDispatchError> { + if let Some(ref mut timer) = self.ka_timer { match timer.poll() { Ok(Async::Ready(_)) => { if timer.deadline() >= self.ka_expire { @@ -242,43 +239,39 @@ where if self.tasks.is_empty() { // if we get timer during shutdown, just drop connection if self.flags.contains(Flags::SHUTDOWN) { - return false; + return Err(HttpDispatchError::ShutdownTimeout); } else { trace!("Keep-alive timeout, close connection"); self.flags.insert(Flags::SHUTDOWN); - None + // TODO: start shutdown timer + return Ok(()); } - } else { - self.settings.keep_alive_timer() + } else if let Some(deadline) = self.settings.keep_alive_expire() + { + timer.reset(deadline) } } else { - Some(Delay::new(self.ka_expire)) + timer.reset(self.ka_expire) } } - Ok(Async::NotReady) => None, + Ok(Async::NotReady) => (), Err(e) => { error!("Timer error {:?}", e); - return false; + return Err(HttpDispatchError::Unknown); } } - } else { - None - }; - - if let Some(mut timer) = timer { - let _ = timer.poll(); - self.ka_timer = Some(timer); } - true + + Ok(()) } #[inline] /// read data from stream - pub fn poll_io(&mut self) { + pub fn poll_io(&mut self) -> Result<(), HttpDispatchError> { if !self.flags.contains(Flags::POLLED) { - self.parse(); + self.parse()?; self.flags.insert(Flags::POLLED); - return; + return Ok(()); } // read io from socket @@ -286,136 +279,118 @@ where match self.stream.get_mut().read_available(&mut self.buf) { Ok(Async::Ready((read_some, disconnected))) => { if read_some { - self.parse(); + self.parse()?; } if disconnected { - self.read_disconnected(); - // delay disconnect until all tasks have finished. - if self.tasks.is_empty() { - self.write_disconnected(); - } + self.client_disconnected(true); } } Ok(Async::NotReady) => (), - Err(_) => { - self.read_disconnected(); - self.write_disconnected(); + Err(err) => { + self.client_disconnected(false); + return Err(err.into()); } } } + Ok(()) } pub fn poll_handler(&mut self) -> Poll { let retry = self.can_read(); - // check in-flight messages - let mut io = false; - let mut idx = 0; - while idx < self.tasks.len() { - // only one task can do io operation in http/1 - if !io - && !self.tasks[idx].flags.contains(EntryFlags::EOF) - && !self.flags.contains(Flags::WRITE_DISCONNECTED) - { - // io is corrupted, send buffer - if self.tasks[idx].flags.contains(EntryFlags::ERROR) { - if let Ok(Async::NotReady) = self.stream.poll_completed(true) { - return Ok(Async::NotReady); - } - self.flags.insert(Flags::ERROR); - return Err(self - .error - .take() - .map(|e| e.into()) - .unwrap_or(HttpDispatchError::Unknown)); - } - - match self.tasks[idx].pipe.poll_io(&mut self.stream) { - Ok(Async::Ready(ready)) => { - // override keep-alive state - if self.stream.keepalive() { - self.flags.insert(Flags::KEEPALIVE); - } else { - self.flags.remove(Flags::KEEPALIVE); - } - // prepare stream for next response - self.stream.reset(); - - if ready { - self.tasks[idx] - .flags - .insert(EntryFlags::EOF | EntryFlags::FINISHED); - } else { - self.tasks[idx].flags.insert(EntryFlags::EOF); - } - } - // no more IO for this iteration - Ok(Async::NotReady) => { - // check if we need timer - if self.ka_timer.is_some() && self.stream.upgrade() { - self.ka_timer.take(); - } - - // check if previously read backpressure was enabled - if self.can_read() && !retry { - return Ok(Async::Ready(true)); - } - io = true; - } - Err(err) => { - error!("Unhandled error1: {}", err); - // it is not possible to recover from error - // during pipe handling, so just drop connection - self.read_disconnected(); - self.write_disconnected(); - self.tasks[idx].flags.insert(EntryFlags::ERROR); - self.error = Some(err); - continue; - } - } - } else if !self.tasks[idx].flags.contains(EntryFlags::FINISHED) { - match self.tasks[idx].pipe.poll_completed() { - Ok(Async::NotReady) => (), - Ok(Async::Ready(_)) => { - self.tasks[idx].flags.insert(EntryFlags::FINISHED) - } - Err(err) => { - error!("Unhandled error: {}", err); - self.read_disconnected(); - self.write_disconnected(); - self.tasks[idx].flags.insert(EntryFlags::ERROR); - self.error = Some(err); - continue; - } - } - } - idx += 1; - } - - // cleanup finished tasks + // process first pipelined response, only one task can do io operation in http/1 while !self.tasks.is_empty() { - if self.tasks[0] - .flags - .contains(EntryFlags::EOF | EntryFlags::FINISHED) - { - self.tasks.pop_front(); - } else { - break; + match self.tasks[0].poll_io(&mut self.stream) { + Ok(Async::Ready(ready)) => { + // override keep-alive state + if self.stream.keepalive() { + self.flags.insert(Flags::KEEPALIVE); + } else { + self.flags.remove(Flags::KEEPALIVE); + } + // prepare stream for next response + self.stream.reset(); + + let task = self.tasks.pop_front().unwrap(); + if !ready { + // task is done with io operations but still needs to do more work + spawn(HttpHandlerTaskFut::new(task.into_task())); + } + } + Ok(Async::NotReady) => { + // check if we need timer + if self.ka_timer.is_some() && self.stream.upgrade() { + self.ka_timer.take(); + } + + // if read-backpressure is enabled and we consumed some data. + // we may read more data + if !retry && self.can_read() { + return Ok(Async::Ready(true)); + } + break; + } + Err(err) => { + error!("Unhandled error1: {}", err); + // it is not possible to recover from error + // during pipe handling, so just drop connection + self.client_disconnected(false); + return Err(err.into()); + } } } - // check stream state + // check in-flight messages. all tasks must be alive, + // they need to produce response. if app returned error + // and we can not continue processing incoming requests. + let mut idx = 1; + while idx < self.tasks.len() { + let stop = match self.tasks[idx].poll_completed() { + Ok(Async::NotReady) => false, + Ok(Async::Ready(_)) => true, + Err(err) => { + self.error = Some(err.into()); + true + } + }; + if stop { + // error in task handling or task is completed, + // so no response for this task which means we can not read more requests + // because pipeline sequence is broken. + // but we can safely complete existing tasks + self.flags.insert(Flags::READ_DISCONNECTED); + + for mut task in self.tasks.drain(idx..) { + task.disconnected(); + match task.poll_completed() { + Ok(Async::NotReady) => { + // spawn not completed task, it does not require access to io + // at this point + spawn(HttpHandlerTaskFut::new(task.into_task())); + } + Ok(Async::Ready(_)) => (), + Err(err) => { + error!("Unhandled application error: {}", err); + } + } + } + break; + } else { + idx += 1; + } + } + + // flush stream if self.flags.contains(Flags::STARTED) { match self.stream.poll_completed(false) { Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => { debug!("Error sending data: {}", err); - self.read_disconnected(); - self.write_disconnected(); + self.client_disconnected(false); return Err(err.into()); } Ok(Async::Ready(_)) => { - // non consumed payload in that case close connection + // if payload is not consumed we can not use connection if self.payload.is_some() && self.tasks.is_empty() { return Ok(Async::Ready(false)); } @@ -427,13 +402,11 @@ where } fn push_response_entry(&mut self, status: StatusCode) { - self.tasks.push_back(Entry { - pipe: EntryPipe::Error(ServerError::err(Version::HTTP_11, status)), - flags: EntryFlags::empty(), - }); + self.tasks + .push_back(Entry::Error(ServerError::err(Version::HTTP_11, status))); } - pub fn parse(&mut self) { + pub fn parse(&mut self) -> Result<(), HttpDispatchError> { let mut updated = false; 'outer: loop { @@ -457,9 +430,9 @@ where // search handler for request match self.settings.handler().handle(msg) { - Ok(mut pipe) => { + Ok(mut task) => { if self.tasks.is_empty() { - match pipe.poll_io(&mut self.stream) { + match task.poll_io(&mut self.stream) { Ok(Async::Ready(ready)) => { // override keep-alive state if self.stream.keepalive() { @@ -471,42 +444,28 @@ where self.stream.reset(); if !ready { - let item = Entry { - pipe: EntryPipe::Task(pipe), - flags: EntryFlags::EOF, - }; - self.tasks.push_back(item); + // task is done with io operations + // but still needs to do more work + spawn(HttpHandlerTaskFut::new(task)); } continue 'outer; } Ok(Async::NotReady) => (), Err(err) => { error!("Unhandled error: {}", err); - self.flags.insert(Flags::ERROR); - return; + self.client_disconnected(false); + return Err(err.into()); } } } - self.tasks.push_back(Entry { - pipe: EntryPipe::Task(pipe), - flags: EntryFlags::empty(), - }); + self.tasks.push_back(Entry::Task(task)); continue 'outer; } Err(_) => { // handler is not found - self.tasks.push_back(Entry { - pipe: EntryPipe::Error(ServerError::err( - Version::HTTP_11, - StatusCode::NOT_FOUND, - )), - flags: EntryFlags::empty(), - }); + self.push_response_entry(StatusCode::NOT_FOUND); } } - - // handler is not found - self.push_response_entry(StatusCode::NOT_FOUND); } Ok(Some(Message::Chunk(chunk))) => { updated = true; @@ -514,8 +473,9 @@ where payload.feed_data(chunk); } else { error!("Internal server error: unexpected payload chunk"); - self.flags.insert(Flags::ERROR); + self.flags.insert(Flags::READ_DISCONNECTED); self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); + self.error = Some(HttpDispatchError::InternalError); break; } } @@ -525,23 +485,19 @@ where payload.feed_eof(); } else { error!("Internal server error: unexpected eof"); - self.flags.insert(Flags::ERROR); + self.flags.insert(Flags::READ_DISCONNECTED); self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); + self.error = Some(HttpDispatchError::InternalError); break; } } Ok(None) => { if self.flags.contains(Flags::READ_DISCONNECTED) { - self.read_disconnected(); - if self.tasks.is_empty() { - self.write_disconnected(); - } + self.client_disconnected(true); } break; } Err(e) => { - updated = false; - self.flags.insert(Flags::ERROR); if let Some(mut payload) = self.payload.take() { let e = match e { DecoderError::Io(e) => PayloadError::Io(e), @@ -550,8 +506,10 @@ where payload.set_error(e); } - //Malformed requests should be responded with 400 + // Malformed requests should be responded with 400 self.push_response_entry(StatusCode::BAD_REQUEST); + self.flags.insert(Flags::READ_DISCONNECTED); + self.error = Some(HttpDispatchError::MalformedRequest); break; } } @@ -562,6 +520,7 @@ where self.ka_expire = expire; } } + Ok(()) } } @@ -708,15 +667,15 @@ mod tests { #[test] fn test_req_parse_err() { let mut sys = System::new("test"); - sys.block_on(future::lazy(|| { + let _ = sys.block_on(future::lazy(|| { let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); let readbuf = BytesMut::new(); let settings = wrk_settings(); let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); - h1.poll_io(); - h1.poll_io(); - assert!(h1.flags.contains(Flags::ERROR)); + assert!(h1.poll_io().is_ok()); + assert!(h1.poll_io().is_ok()); + assert!(h1.flags.contains(Flags::READ_DISCONNECTED)); assert_eq!(h1.tasks.len(), 1); future::ok::<_, ()>(()) })); diff --git a/src/server/h1decoder.rs b/src/server/h1decoder.rs index 084ae8b2f..a7531bbbd 100644 --- a/src/server/h1decoder.rs +++ b/src/server/h1decoder.rs @@ -18,6 +18,7 @@ pub(crate) struct H1Decoder { decoder: Option, } +#[derive(Debug)] pub(crate) enum Message { Message { msg: Request, payload: bool }, Chunk(Bytes), diff --git a/src/server/handler.rs b/src/server/handler.rs index 0700e1961..33e50ac34 100644 --- a/src/server/handler.rs +++ b/src/server/handler.rs @@ -1,4 +1,4 @@ -use futures::{Async, Poll}; +use futures::{Async, Future, Poll}; use super::message::Request; use super::Writer; @@ -42,6 +42,25 @@ impl HttpHandlerTask for Box { } } +pub(super) struct HttpHandlerTaskFut { + task: T, +} + +impl HttpHandlerTaskFut { + pub(crate) fn new(task: T) -> Self { + Self { task } + } +} + +impl Future for HttpHandlerTaskFut { + type Item = (); + type Error = (); + + fn poll(&mut self) -> Poll<(), ()> { + self.task.poll_completed().map_err(|_| ()) + } +} + /// Conversion helper trait pub trait IntoHttpHandler { /// The associated type which is result of conversion. diff --git a/src/server/http.rs b/src/server/http.rs index 311c53cb2..511b1832e 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -485,7 +485,7 @@ impl H + Send + Clone> HttpServer { socket.lst, host, socket.addr, - self.keep_alive.clone(), + self.keep_alive, self.client_timeout, ); } @@ -531,7 +531,7 @@ impl H + Send + Clone> HttpServer { socket.lst, host, socket.addr, - self.keep_alive.clone(), + self.keep_alive, self.client_timeout, ); } diff --git a/src/server/incoming.rs b/src/server/incoming.rs index c77280084..a56ccb80f 100644 --- a/src/server/incoming.rs +++ b/src/server/incoming.rs @@ -41,9 +41,7 @@ where // start server HttpIncoming::create(move |ctx| { - ctx.add_message_stream( - stream.map_err(|_| ()).map(move |t| WrapperStream::new(t)), - ); + ctx.add_message_stream(stream.map_err(|_| ()).map(WrapperStream::new)); HttpIncoming { settings } }); } diff --git a/src/server/message.rs b/src/server/message.rs index 43f7e1425..9c4bc1ec4 100644 --- a/src/server/message.rs +++ b/src/server/message.rs @@ -1,5 +1,6 @@ use std::cell::{Cell, Ref, RefCell, RefMut}; use std::collections::VecDeque; +use std::fmt; use std::net::SocketAddr; use std::rc::Rc; @@ -220,6 +221,26 @@ impl Request { } } +impl fmt::Debug for Request { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!( + f, + "\nRequest {:?} {}:{}", + self.version(), + self.method(), + self.path() + )?; + if let Some(q) = self.uri().query().as_ref() { + writeln!(f, " query: ?{:?}", q)?; + } + writeln!(f, " headers:")?; + for (key, val) in self.headers().iter() { + writeln!(f, " {:?}: {:?}", key, val)?; + } + Ok(()) + } +} + pub(crate) struct RequestPool( RefCell>>, RefCell, From 16945a554abd5ddc9b3aaec4f102f9eeaae5e1a8 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 20:04:16 -0700 Subject: [PATCH 152/219] add client shutdown timeout --- CHANGES.md | 7 ++ src/server/acceptor.rs | 8 +-- src/server/builder.rs | 26 +++----- src/server/h1.rs | 12 +++- src/server/http.rs | 31 ++++++++- src/server/incoming.rs | 3 +- src/server/mod.rs | 2 +- src/server/settings.rs | 142 ++++++++++++++++++++++++++++++++++++++++- tests/test_server.rs | 15 +++-- 9 files changed, 208 insertions(+), 38 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 517f8cbe5..32d2bea7b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,6 +2,13 @@ ## [0.7.9] - 2018-09-x +### Added + +* Added client shutdown timeout setting + +* Added slow request timeout setting + + ### Fixed * HTTP1 decoding errors are reported to the client. #512 diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index bad8847dc..15d66112a 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -176,11 +176,11 @@ where /// Applies timeout to request prcoessing. pub(crate) struct AcceptorTimeout { inner: T, - timeout: usize, + timeout: u64, } impl AcceptorTimeout { - pub(crate) fn new(timeout: usize, inner: T) -> Self { + pub(crate) fn new(timeout: u64, inner: T) -> Self { Self { inner, timeout } } } @@ -204,7 +204,7 @@ impl NewService for AcceptorTimeout { #[doc(hidden)] pub(crate) struct AcceptorTimeoutFut { fut: T::Future, - timeout: usize, + timeout: u64, } impl Future for AcceptorTimeoutFut { @@ -215,7 +215,7 @@ impl Future for AcceptorTimeoutFut { let inner = try_ready!(self.fut.poll()); Ok(Async::Ready(AcceptorTimeoutService { inner, - timeout: self.timeout as u64, + timeout: self.timeout, })) } } diff --git a/src/server/builder.rs b/src/server/builder.rs index 8e7f82f80..9e9323537 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -16,12 +16,13 @@ use super::KeepAlive; pub(crate) trait ServiceProvider { fn register( &self, server: Server, lst: net::TcpListener, host: String, - addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, + addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: u64, + client_shutdown: u64, ) -> Server; } /// Utility type that builds complete http pipeline -pub struct HttpServiceBuilder +pub(crate) struct HttpServiceBuilder where F: Fn() -> H + Send + Clone, { @@ -51,22 +52,9 @@ where self } - /// Use different acceptor factory - pub fn acceptor(self, acceptor: A1) -> HttpServiceBuilder - where - A1: AcceptorServiceFactory, - ::InitError: fmt::Debug, - { - HttpServiceBuilder { - acceptor, - factory: self.factory.clone(), - no_client_timer: self.no_client_timer, - } - } - fn finish( &self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, - client_timeout: usize, + client_timeout: u64, client_shutdown: u64, ) -> impl ServiceFactory { let timeout = if self.no_client_timer { 0 @@ -81,6 +69,7 @@ where app, keep_alive, timeout as u64, + client_shutdown, ServerSettings::new(addr, &host, false), ); @@ -137,12 +126,13 @@ where { fn register( &self, server: Server, lst: net::TcpListener, host: String, - addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: usize, + addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: u64, + client_shutdown: u64, ) -> Server { server.listen2( "actix-web", lst, - self.finish(host, addr, keep_alive, client_timeout), + self.finish(host, addr, keep_alive, client_timeout, client_shutdown), ) } } diff --git a/src/server/h1.rs b/src/server/h1.rs index f3c71e3c2..f5e2bf2f5 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -243,8 +243,15 @@ where } else { trace!("Keep-alive timeout, close connection"); self.flags.insert(Flags::SHUTDOWN); - // TODO: start shutdown timer - return Ok(()); + + // start shutdown timer + if let Some(deadline) = + self.settings.client_shutdown_timer() + { + timer.reset(deadline) + } else { + return Ok(()); + } } } else if let Some(deadline) = self.settings.keep_alive_expire() { @@ -548,6 +555,7 @@ mod tests { App::new().into_handler(), KeepAlive::Os, 5000, + 2000, ServerSettings::default(), ) } diff --git a/src/server/http.rs b/src/server/http.rs index 511b1832e..5e1d33512 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -41,7 +41,8 @@ where pub(super) factory: F, pub(super) host: Option, pub(super) keep_alive: KeepAlive, - pub(super) client_timeout: usize, + pub(super) client_timeout: u64, + pub(super) client_shutdown: u64, backlog: i32, threads: usize, exit: bool, @@ -73,6 +74,7 @@ where maxconn: 25_600, maxconnrate: 256, client_timeout: 5000, + client_shutdown: 5000, sockets: Vec::new(), } } @@ -140,11 +142,24 @@ where /// To disable timeout set value to 0. /// /// By default client timeout is set to 5000 milliseconds. - pub fn client_timeout(mut self, val: usize) -> Self { + pub fn client_timeout(mut self, val: u64) -> Self { self.client_timeout = val; self } + /// Set server connection shutdown timeout in milliseconds. + /// + /// Defines a timeout for shutdown connection. If a shutdown procedure does not complete + /// within this time, the request is dropped. + /// + /// To disable timeout set value to 0. + /// + /// By default client timeout is set to 5000 milliseconds. + pub fn client_shutdown(mut self, val: u64) -> Self { + self.client_shutdown = val; + self + } + /// Set server host name. /// /// Host name is used by application router aa a hostname for url @@ -480,6 +495,11 @@ impl H + Send + Clone> HttpServer { .as_ref() .map(|h| h.to_owned()) .unwrap_or_else(|| format!("{}", socket.addr)); + let client_shutdown = if socket.scheme == "https" { + self.client_shutdown + } else { + 0 + }; srv = socket.handler.register( srv, socket.lst, @@ -487,6 +507,7 @@ impl H + Send + Clone> HttpServer { socket.addr, self.keep_alive, self.client_timeout, + client_shutdown, ); } srv.start() @@ -526,6 +547,11 @@ impl H + Send + Clone> HttpServer { .as_ref() .map(|h| h.to_owned()) .unwrap_or_else(|| format!("{}", socket.addr)); + let client_shutdown = if socket.scheme == "https" { + self.client_shutdown + } else { + 0 + }; srv = socket.handler.register( srv, socket.lst, @@ -533,6 +559,7 @@ impl H + Send + Clone> HttpServer { socket.addr, self.keep_alive, self.client_timeout, + client_shutdown, ); } srv diff --git a/src/server/incoming.rs b/src/server/incoming.rs index a56ccb80f..c4e984b9d 100644 --- a/src/server/incoming.rs +++ b/src/server/incoming.rs @@ -35,7 +35,8 @@ where let settings = WorkerSettings::new( apps, self.keep_alive, - self.client_timeout as u64, + self.client_timeout, + self.client_shutdown, ServerSettings::new(addr, "127.0.0.1:8080", secure), ); diff --git a/src/server/mod.rs b/src/server/mod.rs index f9d2b585e..b72410516 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -144,7 +144,7 @@ pub use self::ssl::*; pub use self::error::{AcceptorError, HttpDispatchError}; pub use self::service::HttpService; -pub use self::settings::{ServerSettings, WorkerSettings}; +pub use self::settings::{ServerSettings, WorkerSettings, WorkerSettingsBuilder}; #[doc(hidden)] pub use self::helpers::write_content_length; diff --git a/src/server/settings.rs b/src/server/settings.rs index fe9cd82a3..ac79e4a46 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -76,7 +76,9 @@ impl Default for ServerSettings { impl ServerSettings { /// Crate server settings instance - pub fn new(addr: net::SocketAddr, host: &str, secure: bool) -> ServerSettings { + pub(crate) fn new( + addr: net::SocketAddr, host: &str, secure: bool, + ) -> ServerSettings { let host = host.to_owned(); let cpu_pool = LazyCell::new(); let responses = HttpResponsePool::get_pool(); @@ -131,6 +133,7 @@ struct Inner { handler: H, keep_alive: Option, client_timeout: u64, + client_shutdown: u64, ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, @@ -146,8 +149,9 @@ impl Clone for WorkerSettings { impl WorkerSettings { /// Create instance of `WorkerSettings` - pub fn new( - handler: H, keep_alive: KeepAlive, client_timeout: u64, settings: ServerSettings, + pub(crate) fn new( + handler: H, keep_alive: KeepAlive, client_timeout: u64, client_shutdown: u64, + settings: ServerSettings, ) -> WorkerSettings { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), @@ -165,6 +169,7 @@ impl WorkerSettings { keep_alive, ka_enabled, client_timeout, + client_shutdown, bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), node: RefCell::new(Node::head()), @@ -172,6 +177,11 @@ impl WorkerSettings { })) } + /// Create worker settings builder. + pub fn build(handler: H) -> WorkerSettingsBuilder { + WorkerSettingsBuilder::new(handler) + } + pub(crate) fn head(&self) -> RefMut> { self.0.node.borrow_mut() } @@ -222,6 +232,16 @@ impl WorkerSettings { } } + /// Client shutdown timer + pub fn client_shutdown_timer(&self) -> Option { + let delay = self.0.client_shutdown; + if delay != 0 { + Some(self.now() + Duration::from_millis(delay)) + } else { + None + } + } + #[inline] /// Return keep-alive timer delay is configured. pub fn keep_alive_timer(&self) -> Option { @@ -289,6 +309,121 @@ impl WorkerSettings { } } +/// An worker settings builder +/// +/// This type can be used to construct an instance of `WorkerSettings` through a +/// builder-like pattern. +pub struct WorkerSettingsBuilder { + handler: H, + keep_alive: KeepAlive, + client_timeout: u64, + client_shutdown: u64, + host: String, + addr: net::SocketAddr, + secure: bool, +} + +impl WorkerSettingsBuilder { + /// Create instance of `WorkerSettingsBuilder` + pub fn new(handler: H) -> WorkerSettingsBuilder { + WorkerSettingsBuilder { + handler, + keep_alive: KeepAlive::Timeout(5), + client_timeout: 5000, + client_shutdown: 5000, + secure: false, + host: "localhost".to_owned(), + addr: "127.0.0.1:8080".parse().unwrap(), + } + } + + /// Enable secure flag for current server. + /// + /// By default this flag is set to false. + pub fn secure(mut self) -> Self { + self.secure = true; + self + } + + /// Set server keep-alive setting. + /// + /// By default keep alive is set to a 5 seconds. + pub fn keep_alive>(mut self, val: T) -> Self { + self.keep_alive = val.into(); + self + } + + /// Set server client timeout in milliseconds for first request. + /// + /// Defines a timeout for reading client request header. If a client does not transmit + /// the entire set headers within this time, the request is terminated with + /// the 408 (Request Time-out) error. + /// + /// To disable timeout set value to 0. + /// + /// By default client timeout is set to 5000 milliseconds. + pub fn client_timeout(mut self, val: u64) -> Self { + self.client_timeout = val; + self + } + + /// Set server connection shutdown timeout in milliseconds. + /// + /// Defines a timeout for shutdown connection. If a shutdown procedure does not complete + /// within this time, the request is dropped. This timeout affects only secure connections. + /// + /// To disable timeout set value to 0. + /// + /// By default client timeout is set to 5000 milliseconds. + pub fn client_shutdown(mut self, val: u64) -> Self { + self.client_shutdown = val; + self + } + + /// Set server host name. + /// + /// Host name is used by application router aa a hostname for url + /// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo. + /// html#method.host) documentation for more information. + /// + /// By default host name is set to a "localhost" value. + pub fn server_hostname(mut self, val: &str) -> Self { + self.host = val.to_owned(); + self + } + + /// Set server ip address. + /// + /// Host name is used by application router aa a hostname for url + /// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo. + /// html#method.host) documentation for more information. + /// + /// By default server address is set to a "127.0.0.1:8080" + pub fn server_address(mut self, addr: S) -> Self { + match addr.to_socket_addrs() { + Err(err) => error!("Can not convert to SocketAddr: {}", err), + Ok(mut addrs) => if let Some(addr) = addrs.next() { + self.addr = addr; + }, + } + self + } + + /// Finish worker settings configuration and create `WorkerSettings` object. + pub fn finish(self) -> WorkerSettings { + let settings = ServerSettings::new(self.addr, &self.host, self.secure); + let client_shutdown = if self.secure { self.client_shutdown } else { 0 }; + + WorkerSettings::new( + self.handler, + self.keep_alive, + self.client_timeout, + client_shutdown, + settings, + ) + } +} + struct Date { current: Instant, bytes: [u8; DATE_VALUE_LENGTH], @@ -366,6 +501,7 @@ mod tests { (), KeepAlive::Os, 0, + 0, ServerSettings::default(), ); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); diff --git a/tests/test_server.rs b/tests/test_server.rs index 66b96ecce..f8fabef6d 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1016,7 +1016,7 @@ fn test_server_cookies() { #[test] fn test_custom_pipeline() { use actix::System; - use actix_web::server::{HttpService, KeepAlive, ServerSettings, WorkerSettings}; + use actix_web::server::{HttpService, KeepAlive, WorkerSettings}; let addr = test::TestServer::unused_addr(); @@ -1026,12 +1026,13 @@ fn test_custom_pipeline() { let app = App::new() .route("/", http::Method::GET, |_: HttpRequest| "OK") .finish(); - let settings = WorkerSettings::new( - app, - KeepAlive::Disabled, - 10, - ServerSettings::new(addr, "localhost", false), - ); + let settings = WorkerSettings::build(app) + .keep_alive(KeepAlive::Disabled) + .client_timeout(1000) + .client_shutdown(1000) + .server_hostname("localhost") + .server_address(addr) + .finish(); HttpService::new(settings) }).unwrap() From 1bac65de4c11409ba09ff8b5b040ca1d07f72d30 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 20:15:26 -0700 Subject: [PATCH 153/219] add websocket stopped test --- tests/test_ws.rs | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 49118fc7f..cf928349d 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -5,6 +5,10 @@ extern crate futures; extern crate http; extern crate rand; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::{thread, time}; + use bytes::Bytes; use futures::Stream; use rand::distributions::Alphanumeric; @@ -351,3 +355,44 @@ fn test_ws_server_rust_tls() { assert_eq!(item, data); } } + +struct WsStopped(Arc); + +impl Actor for WsStopped { + type Context = ws::WebsocketContext; + + fn stopped(&mut self, ctx: &mut Self::Context) { + self.0.fetch_add(1, Ordering::Relaxed); + } +} + +impl StreamHandler for WsStopped { + fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) { + match msg { + ws::Message::Text(text) => ctx.text(text), + _ => (), + } + } +} + +#[test] +fn test_ws_stopped() { + let num = Arc::new(AtomicUsize::new(0)); + let num2 = num.clone(); + + let _ = thread::spawn(move || { + let num3 = num2.clone(); + let mut srv = test::TestServer::new(move |app| { + let num4 = num3.clone(); + app.handler(move |req| ws::start(req, WsStopped(num4.clone()))) + }); + let (reader, mut writer) = srv.ws().unwrap(); + + writer.text("text"); + let (item, reader) = srv.execute(reader.into_future()).unwrap(); + assert_eq!(item, Some(ws::Message::Text("text".to_owned()))); + }); + + thread::sleep(time::Duration::from_secs(1)); + assert_eq!(num.load(Ordering::Relaxed), 1); +} From e4686f6c8d9519186061f4944cb6f0e3be0eb8e7 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 20:53:22 -0700 Subject: [PATCH 154/219] set socket linger to 0 on timeout --- src/server/h1.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index f5e2bf2f5..433a916b0 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -1,6 +1,6 @@ use std::collections::VecDeque; -use std::net::SocketAddr; -use std::time::Instant; +use std::net::{Shutdown, SocketAddr}; +use std::time::{Duration, Instant}; use bytes::BytesMut; use futures::{Async, Future, Poll}; @@ -239,6 +239,12 @@ where if self.tasks.is_empty() { // if we get timer during shutdown, just drop connection if self.flags.contains(Flags::SHUTDOWN) { + let io = self.stream.get_mut(); + let _ = IoStream::set_linger( + io, + Some(Duration::from_secs(0)), + ); + let _ = IoStream::shutdown(io, Shutdown::Both); return Err(HttpDispatchError::ShutdownTimeout); } else { trace!("Keep-alive timeout, close connection"); From 127af925411604c57a42b96318ca83d7ca07db99 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 21:16:56 -0700 Subject: [PATCH 155/219] clippy warnings --- src/application.rs | 2 +- src/client/writer.rs | 5 ++++- src/extensions.rs | 1 + src/handler.rs | 3 +-- src/httpresponse.rs | 2 +- src/info.rs | 5 ++++- src/lib.rs | 1 + src/middleware/defaultheaders.rs | 2 +- src/route.rs | 7 +++---- src/scope.rs | 5 ++++- src/server/h2writer.rs | 5 ++++- src/server/helpers.rs | 4 ++-- src/server/http.rs | 5 ++++- src/server/output.rs | 16 +++++++--------- src/server/settings.rs | 2 +- src/with.rs | 19 ------------------- src/ws/frame.rs | 2 +- src/ws/mask.rs | 9 ++++++--- 18 files changed, 46 insertions(+), 49 deletions(-) diff --git a/src/application.rs b/src/application.rs index 407268322..d8a6cbe7b 100644 --- a/src/application.rs +++ b/src/application.rs @@ -135,7 +135,7 @@ where /// instance for each thread, thus application state must be constructed /// multiple times. If you want to share state between different /// threads, a shared object should be used, e.g. `Arc`. Application - /// state does not need to be `Send` and `Sync`. + /// state does not need to be `Send` or `Sync`. pub fn with_state(state: S) -> App { App { parts: Some(ApplicationParts { diff --git a/src/client/writer.rs b/src/client/writer.rs index 45abfb773..e74f22332 100644 --- a/src/client/writer.rs +++ b/src/client/writer.rs @@ -1,4 +1,7 @@ -#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] +#![cfg_attr( + feature = "cargo-clippy", + allow(clippy::redundant_field_names) +)] use std::cell::RefCell; use std::fmt::Write as FmtWrite; diff --git a/src/extensions.rs b/src/extensions.rs index 3e3f24a24..430b87bda 100644 --- a/src/extensions.rs +++ b/src/extensions.rs @@ -31,6 +31,7 @@ impl Hasher for IdHasher { type AnyMap = HashMap, BuildHasherDefault>; +#[derive(Default)] /// A type map of request extensions. pub struct Extensions { map: AnyMap, diff --git a/src/handler.rs b/src/handler.rs index 2b6cc6604..399fd6ba3 100644 --- a/src/handler.rs +++ b/src/handler.rs @@ -530,8 +530,7 @@ where /// } /// /// /// extract path info using serde -/// fn index(data: (State, Path)) -> String { -/// let (state, path) = data; +/// fn index(state: State, path: Path)) -> String { /// format!("{} {}!", state.msg, path.username) /// } /// diff --git a/src/httpresponse.rs b/src/httpresponse.rs index f02570188..59815c58c 100644 --- a/src/httpresponse.rs +++ b/src/httpresponse.rs @@ -694,7 +694,7 @@ impl HttpResponseBuilder { } #[inline] -#[cfg_attr(feature = "cargo-clippy", allow(borrowed_box))] +#[cfg_attr(feature = "cargo-clippy", allow(clippy::borrowed_box))] fn parts<'a>( parts: &'a mut Option>, err: &Option, ) -> Option<&'a mut Box> { diff --git a/src/info.rs b/src/info.rs index aeffc5ba2..5a2f21805 100644 --- a/src/info.rs +++ b/src/info.rs @@ -16,7 +16,10 @@ pub struct ConnectionInfo { impl ConnectionInfo { /// Create *ConnectionInfo* instance for a request. - #[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))] + #[cfg_attr( + feature = "cargo-clippy", + allow(clippy::cyclomatic_complexity) + )] pub fn update(&mut self, req: &Request) { let mut host = None; let mut scheme = None; diff --git a/src/lib.rs b/src/lib.rs index df3c3817e..1ed408099 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -80,6 +80,7 @@ #![cfg_attr(actix_nightly, feature( specialization, // for impl ErrorResponse for std::error::Error extern_prelude, + tool_lints, ))] #![warn(missing_docs)] diff --git a/src/middleware/defaultheaders.rs b/src/middleware/defaultheaders.rs index a33fa6a33..d980a2503 100644 --- a/src/middleware/defaultheaders.rs +++ b/src/middleware/defaultheaders.rs @@ -48,7 +48,7 @@ impl DefaultHeaders { /// Set a header. #[inline] - #[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::match_wild_err_arm))] pub fn header(mut self, key: K, value: V) -> Self where HeaderName: HttpTryFrom, diff --git a/src/route.rs b/src/route.rs index e2635aa65..e4a7a9572 100644 --- a/src/route.rs +++ b/src/route.rs @@ -134,8 +134,7 @@ impl Route { /// } /// ``` /// - /// It is possible to use tuples for specifing multiple extractors for one - /// handler function. + /// It is possible to use multiple extractors for one handler function. /// /// ```rust /// # extern crate bytes; @@ -152,9 +151,9 @@ impl Route { /// /// /// extract path info using serde /// fn index( - /// info: (Path, Query>, Json), + /// path: Path, query: Query>, body: Json, /// ) -> Result { - /// Ok(format!("Welcome {}!", info.0.username)) + /// Ok(format!("Welcome {}!", path.username)) /// } /// /// fn main() { diff --git a/src/scope.rs b/src/scope.rs index bd3daf163..43789d427 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -59,7 +59,10 @@ pub struct Scope { middlewares: Rc>>>, } -#[cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))] +#[cfg_attr( + feature = "cargo-clippy", + allow(clippy::new_without_default_derive) +)] impl Scope { /// Create a new scope pub fn new(path: &str) -> Scope { diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index 4bfc1b7c1..0893b5b62 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -1,4 +1,7 @@ -#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] +#![cfg_attr( + feature = "cargo-clippy", + allow(clippy::redundant_field_names) +)] use std::{cmp, io}; diff --git a/src/server/helpers.rs b/src/server/helpers.rs index 9c0b7f40c..e4ccd8aef 100644 --- a/src/server/helpers.rs +++ b/src/server/helpers.rs @@ -78,7 +78,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) { let d1 = n << 1; unsafe { ptr::copy_nonoverlapping( - DEC_DIGITS_LUT.as_ptr().offset(d1 as isize), + DEC_DIGITS_LUT.as_ptr().add(d1), buf.as_mut_ptr().offset(18), 2, ); @@ -94,7 +94,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) { n /= 100; unsafe { ptr::copy_nonoverlapping( - DEC_DIGITS_LUT.as_ptr().offset(d1 as isize), + DEC_DIGITS_LUT.as_ptr().add(d1), buf.as_mut_ptr().offset(19), 2, ) diff --git a/src/server/http.rs b/src/server/http.rs index 5e1d33512..5a7200868 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -329,7 +329,10 @@ where /// Start listening for incoming connections with supplied acceptor. #[doc(hidden)] - #[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] + #[cfg_attr( + feature = "cargo-clippy", + allow(clippy::needless_pass_by_value) + )] pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result where S: net::ToSocketAddrs, diff --git a/src/server/output.rs b/src/server/output.rs index 74b083388..46b03c9dc 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -151,10 +151,9 @@ impl Output { let version = resp.version().unwrap_or_else(|| req.version); let mut len = 0; - #[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))] let has_body = match resp.body() { - &Body::Empty => false, - &Body::Binary(ref bin) => { + Body::Empty => false, + Body::Binary(ref bin) => { len = bin.len(); !(response_encoding == ContentEncoding::Auto && len < 96) } @@ -190,16 +189,15 @@ impl Output { #[cfg(not(any(feature = "brotli", feature = "flate2")))] let mut encoding = ContentEncoding::Identity; - #[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))] let transfer = match resp.body() { - &Body::Empty => { + Body::Empty => { if !info.head { info.length = ResponseLength::Zero; } *self = Output::Empty(buf); return; } - &Body::Binary(_) => { + Body::Binary(_) => { #[cfg(any(feature = "brotli", feature = "flate2"))] { if !(encoding == ContentEncoding::Identity @@ -244,7 +242,7 @@ impl Output { } return; } - &Body::Streaming(_) | &Body::Actor(_) => { + Body::Streaming(_) | Body::Actor(_) => { if resp.upgrade() { if version == Version::HTTP_2 { error!("Connection upgrade is forbidden for HTTP/2"); @@ -441,7 +439,7 @@ impl ContentEncoder { } } - #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] #[inline(always)] pub fn write_eof(&mut self) -> Result { let encoder = @@ -483,7 +481,7 @@ impl ContentEncoder { } } - #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] #[inline(always)] pub fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { match *self { diff --git a/src/server/settings.rs b/src/server/settings.rs index ac79e4a46..a50a07069 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -216,7 +216,7 @@ impl WorkerSettings { fn update_date(&self) { // Unsafe: WorkerSetting is !Sync and !Send - unsafe { (&mut *self.0.date.get()).0 = false }; + unsafe { (*self.0.date.get()).0 = false }; } } diff --git a/src/with.rs b/src/with.rs index 5e2c01414..c6d54dee8 100644 --- a/src/with.rs +++ b/src/with.rs @@ -12,7 +12,6 @@ trait FnWith: 'static { } impl R + 'static> FnWith for F { - #[cfg_attr(feature = "cargo-clippy", allow(boxed_local))] fn call_with(self: &Self, arg: T) -> R { (*self)(arg) } @@ -42,24 +41,6 @@ where fn create_with_config(self, T::Config) -> WithAsync; } -// impl WithFactory<(T1, T2, T3), S, R> for F -// where F: Fn(T1, T2, T3) -> R + 'static, -// T1: FromRequest + 'static, -// T2: FromRequest + 'static, -// T3: FromRequest + 'static, -// R: Responder + 'static, -// S: 'static, -// { -// fn create(self) -> With<(T1, T2, T3), S, R> { -// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), ( -// T1::Config::default(), T2::Config::default(), T3::Config::default())) -// } - -// fn create_with_config(self, cfg: (T1::Config, T2::Config, T3::Config,)) -> With<(T1, T2, T3), S, R> { -// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), cfg) -// } -// } - #[doc(hidden)] pub struct With where diff --git a/src/ws/frame.rs b/src/ws/frame.rs index 5e4fd8290..d5fa98272 100644 --- a/src/ws/frame.rs +++ b/src/ws/frame.rs @@ -46,7 +46,7 @@ impl Frame { Frame::message(payload, OpCode::Close, true, genmask) } - #[cfg_attr(feature = "cargo-clippy", allow(type_complexity))] + #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))] fn read_copy_md( pl: &mut PayloadBuffer, server: bool, max_size: usize, ) -> Poll)>, ProtocolError> diff --git a/src/ws/mask.rs b/src/ws/mask.rs index e9bfb3d56..a88c21afb 100644 --- a/src/ws/mask.rs +++ b/src/ws/mask.rs @@ -1,5 +1,5 @@ //! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs) -#![cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))] +#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] use std::ptr::copy_nonoverlapping; use std::slice; @@ -19,7 +19,7 @@ impl<'a> ShortSlice<'a> { /// Faster version of `apply_mask()` which operates on 8-byte blocks. #[inline] -#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))] +#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) { // Extend the mask to 64 bits let mut mask_u64 = ((mask_u32 as u64) << 32) | (mask_u32 as u64); @@ -50,7 +50,10 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) { // TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so // inefficient, it could be done better. The compiler does not understand that // a `ShortSlice` must be smaller than a u64. -#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] +#[cfg_attr( + feature = "cargo-clippy", + allow(clippy::needless_pass_by_value) +)] fn xor_short(buf: ShortSlice, mask: u64) { // Unsafe: we know that a `ShortSlice` fits in a u64 unsafe { From 84edc57fd9d9a2075e5d3aaff5257eecc0c206b8 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 21:19:27 -0700 Subject: [PATCH 156/219] increase sleep time --- tests/test_ws.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_ws.rs b/tests/test_ws.rs index cf928349d..67c4c5913 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -393,6 +393,6 @@ fn test_ws_stopped() { assert_eq!(item, Some(ws::Message::Text("text".to_owned()))); }); - thread::sleep(time::Duration::from_secs(1)); + thread::sleep(time::Duration::from_secs(3)); assert_eq!(num.load(Ordering::Relaxed), 1); } From 7c78797d9b9acc1653d6cc8338ef4ef71a756422 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 21:30:00 -0700 Subject: [PATCH 157/219] proper stop for test_ws_stopped test --- tests/test_ws.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 67c4c5913..f67314e8a 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -361,7 +361,7 @@ struct WsStopped(Arc); impl Actor for WsStopped { type Context = ws::WebsocketContext; - fn stopped(&mut self, ctx: &mut Self::Context) { + fn stopped(&mut self, _: &mut Self::Context) { self.0.fetch_add(1, Ordering::Relaxed); } } @@ -387,12 +387,10 @@ fn test_ws_stopped() { app.handler(move |req| ws::start(req, WsStopped(num4.clone()))) }); let (reader, mut writer) = srv.ws().unwrap(); - writer.text("text"); - let (item, reader) = srv.execute(reader.into_future()).unwrap(); + let (item, _) = srv.execute(reader.into_future()).unwrap(); assert_eq!(item, Some(ws::Message::Text("text".to_owned()))); - }); + }).join(); - thread::sleep(time::Duration::from_secs(3)); assert_eq!(num.load(Ordering::Relaxed), 1); } From c674ea912691d86379d610a4794a50cef4b2feac Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 22:23:02 -0700 Subject: [PATCH 158/219] add StreamConfiguration service --- src/client/connector.rs | 5 ++ src/server/channel.rs | 8 +++- src/server/h1.rs | 37 ++++++++------- src/server/mod.rs | 16 ++++++- src/server/service.rs | 93 +++++++++++++++++++++++++++++++++---- src/server/ssl/nativetls.rs | 5 ++ src/server/ssl/openssl.rs | 5 ++ src/server/ssl/rustls.rs | 10 ++++ tests/test_server.rs | 9 +++- tests/test_ws.rs | 2 +- 10 files changed, 160 insertions(+), 30 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 88d6dfd6b..88be77f9b 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -1283,6 +1283,11 @@ impl IoStream for Connection { fn set_linger(&mut self, dur: Option) -> io::Result<()> { IoStream::set_linger(&mut *self.stream, dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + IoStream::set_keepalive(&mut *self.stream, dur) + } } impl io::Read for Connection { diff --git a/src/server/channel.rs b/src/server/channel.rs index 3cea291fd..d8cad9707 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -13,7 +13,7 @@ use super::{h1, h2, HttpHandler, IoStream}; const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; enum HttpProtocol { - H1(h1::Http1), + H1(h1::Http1Dispatcher), H2(h2::Http2), Unknown(WorkerSettings, Option, T, BytesMut), } @@ -167,7 +167,7 @@ where if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() { match kind { ProtocolKind::Http1 => { - self.proto = Some(HttpProtocol::H1(h1::Http1::new( + self.proto = Some(HttpProtocol::H1(h1::Http1Dispatcher::new( settings, io, addr, @@ -311,6 +311,10 @@ where fn set_linger(&mut self, _: Option) -> io::Result<()> { Ok(()) } + #[inline] + fn set_keepalive(&mut self, _: Option) -> io::Result<()> { + Ok(()) + } } impl io::Read for WrapperStream diff --git a/src/server/h1.rs b/src/server/h1.rs index 433a916b0..6875972ee 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -24,15 +24,18 @@ const MAX_PIPELINED_MESSAGES: usize = 16; bitflags! { pub struct Flags: u8 { const STARTED = 0b0000_0001; + const KEEPALIVE_ENABLED = 0b0000_0010; const KEEPALIVE = 0b0000_0100; const SHUTDOWN = 0b0000_1000; const READ_DISCONNECTED = 0b0001_0000; const WRITE_DISCONNECTED = 0b0010_0000; const POLLED = 0b0100_0000; + } } -pub(crate) struct Http1 { +/// Dispatcher for HTTP/1.1 protocol +pub struct Http1Dispatcher { flags: Flags, settings: WorkerSettings, addr: Option, @@ -42,7 +45,6 @@ pub(crate) struct Http1 { buf: BytesMut, tasks: VecDeque>, error: Option, - ka_enabled: bool, ka_expire: Instant, ka_timer: Option, } @@ -79,7 +81,7 @@ impl Entry { } } -impl Http1 +impl Http1Dispatcher where T: IoStream, H: HttpHandler + 'static, @@ -88,7 +90,6 @@ where settings: WorkerSettings, stream: T, addr: Option, buf: BytesMut, is_eof: bool, keepalive_timer: Option, ) -> Self { - let ka_enabled = settings.keep_alive_enabled(); let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer { (delay.deadline(), Some(delay)) } else if let Some(delay) = settings.keep_alive_timer() { @@ -97,12 +98,16 @@ where (settings.now(), None) }; - Http1 { - flags: if is_eof { - Flags::READ_DISCONNECTED - } else { - Flags::KEEPALIVE - }, + let mut flags = if is_eof { + Flags::READ_DISCONNECTED + } else if settings.keep_alive_enabled() { + Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED + } else { + Flags::empty() + }; + + Http1Dispatcher { + flags, stream: H1Writer::new(stream, settings.clone()), decoder: H1Decoder::new(), payload: None, @@ -113,7 +118,6 @@ where settings, ka_timer, ka_expire, - ka_enabled, } } @@ -212,7 +216,7 @@ where } // no keep-alive if self.flags.contains(Flags::STARTED) - && (!self.ka_enabled + && (!self.flags.contains(Flags::KEEPALIVE_ENABLED) || !self.flags.contains(Flags::KEEPALIVE)) { self.flags.insert(Flags::SHUTDOWN); @@ -280,7 +284,7 @@ where #[inline] /// read data from stream - pub fn poll_io(&mut self) -> Result<(), HttpDispatchError> { + pub(self) fn poll_io(&mut self) -> Result<(), HttpDispatchError> { if !self.flags.contains(Flags::POLLED) { self.parse()?; self.flags.insert(Flags::POLLED); @@ -308,7 +312,7 @@ where Ok(()) } - pub fn poll_handler(&mut self) -> Poll { + pub(self) fn poll_handler(&mut self) -> Poll { let retry = self.can_read(); // process first pipelined response, only one task can do io operation in http/1 @@ -419,7 +423,7 @@ where .push_back(Entry::Error(ServerError::err(Version::HTTP_11, status))); } - pub fn parse(&mut self) -> Result<(), HttpDispatchError> { + pub(self) fn parse(&mut self) -> Result<(), HttpDispatchError> { let mut updated = false; 'outer: loop { @@ -686,7 +690,8 @@ mod tests { let readbuf = BytesMut::new(); let settings = wrk_settings(); - let mut h1 = Http1::new(settings.clone(), buf, None, readbuf, false, None); + let mut h1 = + Http1Dispatcher::new(settings.clone(), buf, None, readbuf, false, None); assert!(h1.poll_io().is_ok()); assert!(h1.poll_io().is_ok()); assert!(h1.flags.contains(Flags::READ_DISCONNECTED)); diff --git a/src/server/mod.rs b/src/server/mod.rs index b72410516..456b46183 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -143,9 +143,11 @@ pub use self::message::Request; pub use self::ssl::*; pub use self::error::{AcceptorError, HttpDispatchError}; -pub use self::service::HttpService; pub use self::settings::{ServerSettings, WorkerSettings, WorkerSettingsBuilder}; +#[doc(hidden)] +pub use self::service::{HttpService, StreamConfiguration}; + #[doc(hidden)] pub use self::helpers::write_content_length; @@ -268,6 +270,8 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static { fn set_linger(&mut self, dur: Option) -> io::Result<()>; + fn set_keepalive(&mut self, dur: Option) -> io::Result<()>; + fn read_available(&mut self, buf: &mut BytesMut) -> Poll<(bool, bool), io::Error> { let mut read_some = false; loop { @@ -324,6 +328,11 @@ impl IoStream for ::tokio_uds::UnixStream { fn set_linger(&mut self, _dur: Option) -> io::Result<()> { Ok(()) } + + #[inline] + fn set_keepalive(&mut self, _nodelay: bool) -> io::Result<()> { + Ok(()) + } } impl IoStream for TcpStream { @@ -341,4 +350,9 @@ impl IoStream for TcpStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { TcpStream::set_linger(self, dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + TcpStream::set_keepalive(self, dur) + } } diff --git a/src/server/service.rs b/src/server/service.rs index 2988bc661..89a58af75 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -1,4 +1,5 @@ use std::marker::PhantomData; +use std::time::Duration; use actix_net::service::{NewService, Service}; use futures::future::{ok, FutureResult}; @@ -10,6 +11,7 @@ use super::handler::HttpHandler; use super::settings::WorkerSettings; use super::IoStream; +/// `NewService` implementation for HTTP1/HTTP2 transports pub struct HttpService where H: HttpHandler, @@ -56,7 +58,6 @@ where Io: IoStream, { settings: WorkerSettings, - // tcp_ka: Option, _t: PhantomData, } @@ -66,12 +67,6 @@ where Io: IoStream, { fn new(settings: WorkerSettings) -> HttpServiceHandler { - // let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive { - // Some(Duration::new(val as u64, 0)) - // } else { - // None - // }; - HttpServiceHandler { settings, _t: PhantomData, @@ -94,7 +89,89 @@ where } fn call(&mut self, mut req: Self::Request) -> Self::Future { - let _ = req.set_nodelay(true); HttpChannel::new(self.settings.clone(), req, None) } } + +/// `NewService` implementation for stream configuration service +pub struct StreamConfiguration { + no_delay: Option, + tcp_ka: Option>, + _t: PhantomData<(T, E)>, +} + +impl StreamConfiguration { + /// Create new `StreamConfigurationService` instance. + pub fn new() -> Self { + Self { + no_delay: None, + tcp_ka: None, + _t: PhantomData, + } + } + + /// Sets the value of the `TCP_NODELAY` option on this socket. + pub fn nodelay(mut self, nodelay: bool) -> Self { + self.no_delay = Some(nodelay); + self + } + + /// Sets whether keepalive messages are enabled to be sent on this socket. + pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { + self.tcp_ka = Some(keepalive); + self + } +} + +impl NewService for StreamConfiguration { + type Request = T; + type Response = T; + type Error = E; + type InitError = (); + type Service = StreamConfigurationService; + type Future = FutureResult; + + fn new_service(&self) -> Self::Future { + ok(StreamConfigurationService { + no_delay: self.no_delay.clone(), + tcp_ka: self.tcp_ka.clone(), + _t: PhantomData, + }) + } +} + +/// Stream configuration service +pub struct StreamConfigurationService { + no_delay: Option, + tcp_ka: Option>, + _t: PhantomData<(T, E)>, +} + +impl Service for StreamConfigurationService +where + T: IoStream, +{ + type Request = T; + type Response = T; + type Error = E; + type Future = FutureResult; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + Ok(Async::Ready(())) + } + + fn call(&mut self, mut req: Self::Request) -> Self::Future { + if let Some(no_delay) = self.no_delay { + if req.set_nodelay(no_delay).is_err() { + error!("Can not set socket no-delay option"); + } + } + if let Some(keepalive) = self.tcp_ka { + if req.set_keepalive(keepalive).is_err() { + error!("Can not set socket keep-alive option"); + } + } + + ok(req) + } +} diff --git a/src/server/ssl/nativetls.rs b/src/server/ssl/nativetls.rs index d59948c79..e56b4521b 100644 --- a/src/server/ssl/nativetls.rs +++ b/src/server/ssl/nativetls.rs @@ -21,4 +21,9 @@ impl IoStream for TlsStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { self.get_mut().get_mut().set_linger(dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().get_mut().set_keepalive(dur) + } } diff --git a/src/server/ssl/openssl.rs b/src/server/ssl/openssl.rs index 590dc0bbb..99ca40e03 100644 --- a/src/server/ssl/openssl.rs +++ b/src/server/ssl/openssl.rs @@ -74,4 +74,9 @@ impl IoStream for SslStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { self.get_mut().get_mut().set_linger(dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().get_mut().set_keepalive(dur) + } } diff --git a/src/server/ssl/rustls.rs b/src/server/ssl/rustls.rs index c74b62ea4..df78d1dc6 100644 --- a/src/server/ssl/rustls.rs +++ b/src/server/ssl/rustls.rs @@ -51,6 +51,11 @@ impl IoStream for TlsStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { self.get_mut().0.set_linger(dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().0.set_keepalive(dur) + } } impl IoStream for TlsStream { @@ -69,4 +74,9 @@ impl IoStream for TlsStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { self.get_mut().0.set_linger(dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().0.set_keepalive(dur) + } } diff --git a/tests/test_server.rs b/tests/test_server.rs index f8fabef6d..a74cb809a 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1016,7 +1016,10 @@ fn test_server_cookies() { #[test] fn test_custom_pipeline() { use actix::System; - use actix_web::server::{HttpService, KeepAlive, WorkerSettings}; + use actix_net::service::NewServiceExt; + use actix_web::server::{ + HttpService, KeepAlive, StreamConfiguration, WorkerSettings, + }; let addr = test::TestServer::unused_addr(); @@ -1034,7 +1037,9 @@ fn test_custom_pipeline() { .server_address(addr) .finish(); - HttpService::new(settings) + StreamConfiguration::new() + .nodelay(true) + .and_then(HttpService::new(settings)) }).unwrap() .run(); }); diff --git a/tests/test_ws.rs b/tests/test_ws.rs index f67314e8a..3baa48eb7 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -7,7 +7,7 @@ extern crate rand; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use std::{thread, time}; +use std::thread; use bytes::Bytes; use futures::Stream; From 368f73513a733f360c4edc63f6191510989ed8ac Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 22:25:53 -0700 Subject: [PATCH 159/219] set tcp-keepalive for test as well --- tests/test_server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_server.rs b/tests/test_server.rs index a74cb809a..a85c5c329 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1039,6 +1039,7 @@ fn test_custom_pipeline() { StreamConfiguration::new() .nodelay(true) + .tcp_keepalive(Some(time::Duration::from_secs(10))) .and_then(HttpService::new(settings)) }).unwrap() .run(); From fdfadb52e1846e6dec09f205ddbbe830927ae949 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 22:29:30 -0700 Subject: [PATCH 160/219] fix doc test for State --- src/handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handler.rs b/src/handler.rs index 399fd6ba3..88210fbc0 100644 --- a/src/handler.rs +++ b/src/handler.rs @@ -530,7 +530,7 @@ where /// } /// /// /// extract path info using serde -/// fn index(state: State, path: Path)) -> String { +/// fn index(state: State, path: Path) -> String { /// format!("{} {}!", state.msg, path.username) /// } /// From f007860a1650e89deae7aae9c5835632a15db16b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 1 Oct 2018 22:48:11 -0700 Subject: [PATCH 161/219] cleanup warnings --- src/client/connector.rs | 5 +++++ src/server/h1.rs | 5 ++++- src/server/service.rs | 8 +++++++- src/server/ssl/openssl.rs | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 88be77f9b..90a2e1c88 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -1335,4 +1335,9 @@ impl IoStream for TlsStream { fn set_linger(&mut self, dur: Option) -> io::Result<()> { self.get_mut().get_mut().set_linger(dur) } + + #[inline] + fn set_keepalive(&mut self, dur: Option) -> io::Result<()> { + self.get_mut().get_mut().set_keepalive(dur) + } } diff --git a/src/server/h1.rs b/src/server/h1.rs index 6875972ee..fe8f976b7 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -98,7 +98,7 @@ where (settings.now(), None) }; - let mut flags = if is_eof { + let flags = if is_eof { Flags::READ_DISCONNECTED } else if settings.keep_alive_enabled() { Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED @@ -664,6 +664,9 @@ mod tests { fn set_linger(&mut self, _: Option) -> io::Result<()> { Ok(()) } + fn set_keepalive(&mut self, _: Option) -> io::Result<()> { + Ok(()) + } } impl io::Write for Buffer { fn write(&mut self, buf: &[u8]) -> io::Result { diff --git a/src/server/service.rs b/src/server/service.rs index 89a58af75..231ac599e 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -88,12 +88,15 @@ where Ok(Async::Ready(())) } - fn call(&mut self, mut req: Self::Request) -> Self::Future { + fn call(&mut self, req: Self::Request) -> Self::Future { HttpChannel::new(self.settings.clone(), req, None) } } /// `NewService` implementation for stream configuration service +/// +/// Stream configuration service allows to change some socket level +/// parameters. for example `tcp nodelay` or `tcp keep-alive`. pub struct StreamConfiguration { no_delay: Option, tcp_ka: Option>, @@ -141,6 +144,9 @@ impl NewService for StreamConfiguration { } /// Stream configuration service +/// +/// Stream configuration service allows to change some socket level +/// parameters. for example `tcp nodelay` or `tcp keep-alive`. pub struct StreamConfigurationService { no_delay: Option, tcp_ka: Option>, diff --git a/src/server/ssl/openssl.rs b/src/server/ssl/openssl.rs index 99ca40e03..f9e0e1774 100644 --- a/src/server/ssl/openssl.rs +++ b/src/server/ssl/openssl.rs @@ -23,7 +23,7 @@ impl OpensslAcceptor { /// Create `OpensslAcceptor` with custom server flags. pub fn with_flags( - mut builder: SslAcceptorBuilder, flags: ServerFlags, + builder: SslAcceptorBuilder, flags: ServerFlags, ) -> io::Result> { let acceptor = openssl_acceptor_with_flags(builder, flags)?; From f3ce6574e4d7e6ec2308bbd2a0235a7b25b8caf4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 00:19:28 -0700 Subject: [PATCH 162/219] fix client timer and add slow request tests --- CHANGES.md | 2 ++ Cargo.toml | 3 ++- src/server/builder.rs | 55 ++++++++++++++++++------------------------ src/server/channel.rs | 30 ++++++++++++++--------- src/server/h1.rs | 36 ++++++++++++++++++++++++++- src/server/http.rs | 22 +++++++++-------- src/server/settings.rs | 10 ++++++++ tests/test_server.rs | 40 ++++++++++++++++++++++++++++++ 8 files changed, 144 insertions(+), 54 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 32d2bea7b..145caec1d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,8 @@ * Added slow request timeout setting +* Respond with 408 response on slow request timeout #523 + ### Fixed diff --git a/Cargo.toml b/Cargo.toml index 205e178b9..8997fa5ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,6 +86,7 @@ language-tags = "0.2" lazy_static = "1.0" lazycell = "1.0.0" parking_lot = "0.6" +serde_urlencoded = "^0.5.3" url = { version="1.7", features=["query_encoding"] } cookie = { version="0.11", features=["percent-encode"] } brotli2 = { version="^0.3.2", optional = true } @@ -125,7 +126,7 @@ webpki-roots = { version = "0.15", optional = true } # unix sockets tokio-uds = { version="0.2", optional = true } -serde_urlencoded = "^0.5.3" +backtrace="*" [dev-dependencies] env_logger = "0.5" diff --git a/src/server/builder.rs b/src/server/builder.rs index 9e9323537..6bafb4607 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -16,7 +16,7 @@ use super::KeepAlive; pub(crate) trait ServiceProvider { fn register( &self, server: Server, lst: net::TcpListener, host: String, - addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: u64, + addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64, client_shutdown: u64, ) -> Server; } @@ -28,7 +28,6 @@ where { factory: F, acceptor: A, - no_client_timer: bool, } impl HttpServiceBuilder @@ -40,27 +39,13 @@ where { /// Create http service builder pub fn new(factory: F, acceptor: A) -> Self { - Self { - factory, - acceptor, - no_client_timer: false, - } - } - - pub(crate) fn no_client_timer(mut self) -> Self { - self.no_client_timer = true; - self + Self { factory, acceptor } } fn finish( - &self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, + &self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64, client_shutdown: u64, ) -> impl ServiceFactory { - let timeout = if self.no_client_timer { - 0 - } else { - client_timeout - }; let factory = self.factory.clone(); let acceptor = self.acceptor.clone(); move || { @@ -68,12 +53,12 @@ where let settings = WorkerSettings::new( app, keep_alive, - timeout as u64, + client_timeout, client_shutdown, ServerSettings::new(addr, &host, false), ); - if timeout == 0 { + if secure { Either::A(ServerMessageAcceptor::new( settings.clone(), TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service)) @@ -88,14 +73,16 @@ where } else { Either::B(ServerMessageAcceptor::new( settings.clone(), - TcpAcceptor::new(AcceptorTimeout::new(timeout, acceptor.create())) - .map_err(|_| ()) - .map_init_err(|_| ()) - .and_then( - HttpService::new(settings) - .map_init_err(|_| ()) - .map_err(|_| ()), - ), + TcpAcceptor::new(AcceptorTimeout::new( + client_timeout, + acceptor.create(), + )).map_err(|_| ()) + .map_init_err(|_| ()) + .and_then( + HttpService::new(settings) + .map_init_err(|_| ()) + .map_err(|_| ()), + ), )) } } @@ -112,7 +99,6 @@ where HttpServiceBuilder { factory: self.factory.clone(), acceptor: self.acceptor.clone(), - no_client_timer: self.no_client_timer, } } } @@ -126,13 +112,20 @@ where { fn register( &self, server: Server, lst: net::TcpListener, host: String, - addr: net::SocketAddr, keep_alive: KeepAlive, client_timeout: u64, + addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64, client_shutdown: u64, ) -> Server { server.listen2( "actix-web", lst, - self.finish(host, addr, keep_alive, client_timeout, client_shutdown), + self.finish( + host, + addr, + keep_alive, + secure, + client_timeout, + client_shutdown, + ), ) } } diff --git a/src/server/channel.rs b/src/server/channel.rs index d8cad9707..f57806209 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -9,6 +9,7 @@ use tokio_timer::Delay; use super::error::HttpDispatchError; use super::settings::WorkerSettings; use super::{h1, h2, HttpHandler, IoStream}; +use http::StatusCode; const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; @@ -42,11 +43,9 @@ where pub(crate) fn new( settings: WorkerSettings, io: T, peer: Option, ) -> HttpChannel { - let ka_timeout = settings.client_timer(); - HttpChannel { - ka_timeout, node: None, + ka_timeout: settings.client_timer(), proto: Some(HttpProtocol::Unknown( settings, peer, @@ -91,10 +90,23 @@ where fn poll(&mut self) -> Poll { // keep-alive timer - if let Some(ref mut timer) = self.ka_timeout { - match timer.poll() { + if self.ka_timeout.is_some() { + match self.ka_timeout.as_mut().unwrap().poll() { Ok(Async::Ready(_)) => { trace!("Slow request timed out, close connection"); + if let Some(HttpProtocol::Unknown(settings, _, io, buf)) = + self.proto.take() + { + self.proto = + Some(HttpProtocol::H1(h1::Http1Dispatcher::for_error( + settings, + io, + StatusCode::REQUEST_TIMEOUT, + self.ka_timeout.take(), + buf, + ))); + return self.poll(); + } return Ok(Async::Ready(())); } Ok(Async::NotReady) => (), @@ -121,12 +133,8 @@ where let mut is_eof = false; let kind = match self.proto { - Some(HttpProtocol::H1(ref mut h1)) => { - return h1.poll(); - } - Some(HttpProtocol::H2(ref mut h2)) => { - return h2.poll(); - } + Some(HttpProtocol::H1(ref mut h1)) => return h1.poll(), + Some(HttpProtocol::H2(ref mut h2)) => return h2.poll(), Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => { let mut err = None; let mut disconnect = false; diff --git a/src/server/h1.rs b/src/server/h1.rs index fe8f976b7..205be9494 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -121,6 +121,31 @@ where } } + pub(crate) fn for_error( + settings: WorkerSettings, stream: T, status: StatusCode, + mut keepalive_timer: Option, buf: BytesMut, + ) -> Self { + if let Some(deadline) = settings.client_timer_expire() { + let _ = keepalive_timer.as_mut().map(|delay| delay.reset(deadline)); + } + + let mut disp = Http1Dispatcher { + flags: Flags::STARTED | Flags::READ_DISCONNECTED, + stream: H1Writer::new(stream, settings.clone()), + decoder: H1Decoder::new(), + payload: None, + tasks: VecDeque::new(), + error: None, + addr: None, + ka_timer: keepalive_timer, + ka_expire: settings.now(), + buf, + settings, + }; + disp.push_response_entry(status); + disp + } + #[inline] pub fn settings(&self) -> &WorkerSettings { &self.settings @@ -133,7 +158,7 @@ where #[inline] fn can_read(&self) -> bool { - if self.flags.intersects(Flags::READ_DISCONNECTED) { + if self.flags.contains(Flags::READ_DISCONNECTED) { return false; } @@ -250,6 +275,15 @@ where ); let _ = IoStream::shutdown(io, Shutdown::Both); return Err(HttpDispatchError::ShutdownTimeout); + } else if !self.flags.contains(Flags::STARTED) { + // timeout on first request (slow request) return 408 + trace!("Slow request timeout"); + self.flags + .insert(Flags::STARTED | Flags::READ_DISCONNECTED); + self.tasks.push_back(Entry::Error(ServerError::err( + Version::HTTP_11, + StatusCode::REQUEST_TIMEOUT, + ))); } else { trace!("Keep-alive timeout, close connection"); self.flags.insert(Flags::SHUTDOWN); diff --git a/src/server/http.rs b/src/server/http.rs index 5a7200868..91f5d73e0 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -232,10 +232,10 @@ where lst, addr, scheme: "http", - handler: Box::new( - HttpServiceBuilder::new(self.factory.clone(), DefaultAcceptor) - .no_client_timer(), - ), + handler: Box::new(HttpServiceBuilder::new( + self.factory.clone(), + DefaultAcceptor, + )), }); self @@ -498,10 +498,10 @@ impl H + Send + Clone> HttpServer { .as_ref() .map(|h| h.to_owned()) .unwrap_or_else(|| format!("{}", socket.addr)); - let client_shutdown = if socket.scheme == "https" { - self.client_shutdown + let (secure, client_shutdown) = if socket.scheme == "https" { + (true, self.client_shutdown) } else { - 0 + (false, 0) }; srv = socket.handler.register( srv, @@ -509,6 +509,7 @@ impl H + Send + Clone> HttpServer { host, socket.addr, self.keep_alive, + secure, self.client_timeout, client_shutdown, ); @@ -550,10 +551,10 @@ impl H + Send + Clone> HttpServer { .as_ref() .map(|h| h.to_owned()) .unwrap_or_else(|| format!("{}", socket.addr)); - let client_shutdown = if socket.scheme == "https" { - self.client_shutdown + let (secure, client_shutdown) = if socket.scheme == "https" { + (true, self.client_shutdown) } else { - 0 + (false, 0) }; srv = socket.handler.register( srv, @@ -561,6 +562,7 @@ impl H + Send + Clone> HttpServer { host, socket.addr, self.keep_alive, + secure, self.client_timeout, client_shutdown, ); diff --git a/src/server/settings.rs b/src/server/settings.rs index a50a07069..2f306073c 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -232,6 +232,16 @@ impl WorkerSettings { } } + /// Client timeout for first request. + pub fn client_timer_expire(&self) -> Option { + let delay = self.0.client_timeout; + if delay != 0 { + Some(self.now() + Duration::from_millis(delay)) + } else { + None + } + } + /// Client shutdown timer pub fn client_shutdown_timer(&self) -> Option { let delay = self.0.client_shutdown; diff --git a/tests/test_server.rs b/tests/test_server.rs index a85c5c329..269a1cd7d 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1054,3 +1054,43 @@ fn test_custom_pipeline() { assert!(response.status().is_success()); } } + +#[test] +fn test_slow_request() { + use actix::System; + use std::net; + use std::sync::mpsc; + let (tx, rx) = mpsc::channel(); + + let addr = test::TestServer::unused_addr(); + + thread::spawn(move || { + System::run(move || { + let srv = server::new(|| { + vec![App::new().resource("/", |r| { + r.method(http::Method::GET).f(|_| HttpResponse::Ok()) + })] + }); + + let srv = srv.bind(addr).unwrap(); + srv.client_timeout(200).start(); + let _ = tx.send(System::current()); + }); + }); + let sys = rx.recv().unwrap(); + + thread::sleep(time::Duration::from_millis(200)); + + let mut stream = net::TcpStream::connect(addr).unwrap(); + let mut data = String::new(); + let _ = stream.read_to_string(&mut data); + assert!(data.starts_with("HTTP/1.1 408 Request Timeou")); + + let mut stream = net::TcpStream::connect(addr).unwrap(); + let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n"); + let mut data = String::new(); + let _ = stream.read_to_string(&mut data); + assert!(data.starts_with("HTTP/1.1 408 Request Timeou")); + + sys.stop(); +} From eed377e77356f2c89b4cf9cda9ab4e76c0dbe146 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 00:20:27 -0700 Subject: [PATCH 163/219] uneeded dep --- Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 8997fa5ee..cedb38da3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -126,8 +126,6 @@ webpki-roots = { version = "0.15", optional = true } # unix sockets tokio-uds = { version="0.2", optional = true } -backtrace="*" - [dev-dependencies] env_logger = "0.5" serde_derive = "1.0" From c8505bb53f6d93d4f4091c4a491e4077a5df370d Mon Sep 17 00:00:00 2001 From: Danil Berestov Date: Wed, 3 Oct 2018 00:15:48 +0800 Subject: [PATCH 164/219] content-length bug fix (#525) * content-length bug fix * changes.md is updated * typo --- CHANGES.md | 2 ++ src/server/output.rs | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 145caec1d..375f2882f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -19,6 +19,8 @@ * Websocket server finished() isn't called if client disconnects #511 +* Responses with the following codes: 100, 101, 102, 204 -- are sent without Content-Length header. #521 + ## [0.7.8] - 2018-09-17 diff --git a/src/server/output.rs b/src/server/output.rs index 46b03c9dc..70c24facc 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -11,7 +11,7 @@ use flate2::write::{GzEncoder, ZlibEncoder}; #[cfg(feature = "flate2")] use flate2::Compression; use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH}; -use http::Version; +use http::{StatusCode, Version}; use super::message::InnerRequest; use body::{Binary, Body}; @@ -192,7 +192,13 @@ impl Output { let transfer = match resp.body() { Body::Empty => { if !info.head { - info.length = ResponseLength::Zero; + info.length = match resp.status() { + StatusCode::NO_CONTENT + | StatusCode::CONTINUE + | StatusCode::SWITCHING_PROTOCOLS + | StatusCode::PROCESSING => ResponseLength::None, + _ => ResponseLength::Zero, + }; } *self = Output::Empty(buf); return; From f8b176de9ec17bd338229f96a1adbdaaadda0abb Mon Sep 17 00:00:00 2001 From: Douman Date: Tue, 2 Oct 2018 20:09:31 +0300 Subject: [PATCH 165/219] Fix no_http2 flag in HttpServer (#526) --- CHANGES.md | 1 + src/server/http.rs | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 375f2882f..a55ef7ec2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,6 +21,7 @@ * Responses with the following codes: 100, 101, 102, 204 -- are sent without Content-Length header. #521 +* Correct usage of `no_http2` flag in `bind_*` methods. #519 ## [0.7.8] - 2018-09-17 diff --git a/src/server/http.rs b/src/server/http.rs index 91f5d73e0..6a7790c13 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -414,7 +414,7 @@ where use actix_net::service::NewServiceExt; // alpn support - let flags = if !self.no_http2 { + let flags = if self.no_http2 { ServerFlags::HTTP1 } else { ServerFlags::HTTP1 | ServerFlags::HTTP2 @@ -437,7 +437,7 @@ where use actix_net::service::NewServiceExt; // alpn support - let flags = if !self.no_http2 { + let flags = if self.no_http2 { ServerFlags::HTTP1 } else { ServerFlags::HTTP1 | ServerFlags::HTTP2 From 61c7534e0362953159f302416611ad9fa020ac80 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 10:43:23 -0700 Subject: [PATCH 166/219] fix stream flushing --- src/server/error.rs | 4 + src/server/h1.rs | 162 +++++++++++++++++++++-------------------- src/server/h1writer.rs | 4 + tests/test_server.rs | 32 ++++++++ tests/test_ws.rs | 14 ++-- 5 files changed, 131 insertions(+), 85 deletions(-) diff --git a/src/server/error.rs b/src/server/error.rs index eb3e88478..70f100998 100644 --- a/src/server/error.rs +++ b/src/server/error.rs @@ -44,6 +44,10 @@ pub enum HttpDispatchError { #[fail(display = "HTTP2 error: {}", _0)] Http2(http2::Error), + /// Payload is not consumed + #[fail(display = "Task is completed but request's payload is not consumed")] + PayloadIsNotConsumed, + /// Malformed request #[fail(display = "Malformed request")] MalformedRequest, diff --git a/src/server/h1.rs b/src/server/h1.rs index 205be9494..cd9134275 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -30,7 +30,7 @@ bitflags! { const READ_DISCONNECTED = 0b0001_0000; const WRITE_DISCONNECTED = 0b0010_0000; const POLLED = 0b0100_0000; - + const FLUSHED = 0b1000_0000; } } @@ -99,9 +99,9 @@ where }; let flags = if is_eof { - Flags::READ_DISCONNECTED + Flags::READ_DISCONNECTED | Flags::FLUSHED } else if settings.keep_alive_enabled() { - Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED + Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED | Flags::FLUSHED } else { Flags::empty() }; @@ -130,7 +130,7 @@ where } let mut disp = Http1Dispatcher { - flags: Flags::STARTED | Flags::READ_DISCONNECTED, + flags: Flags::STARTED | Flags::READ_DISCONNECTED | Flags::FLUSHED, stream: H1Writer::new(stream, settings.clone()), decoder: H1Decoder::new(), payload: None, @@ -177,7 +177,8 @@ where } if !checked || self.tasks.is_empty() { - self.flags.insert(Flags::WRITE_DISCONNECTED); + self.flags + .insert(Flags::WRITE_DISCONNECTED | Flags::FLUSHED); self.stream.disconnected(); // notify all tasks @@ -205,54 +206,70 @@ where // shutdown if self.flags.contains(Flags::SHUTDOWN) { - if self - .flags - .intersects(Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED) - { + if self.flags.intersects(Flags::WRITE_DISCONNECTED) { return Ok(Async::Ready(())); } - match self.stream.poll_completed(true) { - Ok(Async::NotReady) => return Ok(Async::NotReady), - Ok(Async::Ready(_)) => return Ok(Async::Ready(())), - Err(err) => { - debug!("Error sending data: {}", err); - return Err(err.into()); - } - } + return self.poll_flush(true); } - self.poll_io()?; - + // process incoming requests if !self.flags.contains(Flags::WRITE_DISCONNECTED) { - match self.poll_handler()? { - Async::Ready(true) => self.poll(), - Async::Ready(false) => { - self.flags.insert(Flags::SHUTDOWN); - self.poll() + self.poll_handler()?; + + // flush stream + self.poll_flush(false)?; + + // deal with keep-alive and stream eof (client-side write shutdown) + if self.tasks.is_empty() && self.flags.intersects(Flags::FLUSHED) { + // handle stream eof + if self + .flags + .intersects(Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED) + { + return Ok(Async::Ready(())); } - Async::NotReady => { - // deal with keep-alive and steam eof (client-side write shutdown) - if self.tasks.is_empty() { - // handle stream eof - if self.flags.intersects( - Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED, - ) { - return Ok(Async::Ready(())); - } - // no keep-alive - if self.flags.contains(Flags::STARTED) - && (!self.flags.contains(Flags::KEEPALIVE_ENABLED) - || !self.flags.contains(Flags::KEEPALIVE)) - { - self.flags.insert(Flags::SHUTDOWN); - return self.poll(); - } + // no keep-alive + if self.flags.contains(Flags::STARTED) + && (!self.flags.contains(Flags::KEEPALIVE_ENABLED) + || !self.flags.contains(Flags::KEEPALIVE)) + { + self.flags.insert(Flags::SHUTDOWN); + return self.poll(); + } + } + Ok(Async::NotReady) + } else if let Some(err) = self.error.take() { + Err(err) + } else { + Ok(Async::Ready(())) + } + } + + /// Flush stream + fn poll_flush(&mut self, shutdown: bool) -> Poll<(), HttpDispatchError> { + if shutdown || self.flags.contains(Flags::STARTED) { + match self.stream.poll_completed(shutdown) { + Ok(Async::NotReady) => { + // mark stream + if !self.stream.flushed() { + self.flags.remove(Flags::FLUSHED); } Ok(Async::NotReady) } + Err(err) => { + debug!("Error sending data: {}", err); + self.client_disconnected(false); + return Err(err.into()); + } + Ok(Async::Ready(_)) => { + // if payload is not consumed we can not use connection + if self.payload.is_some() && self.tasks.is_empty() { + return Err(HttpDispatchError::PayloadIsNotConsumed); + } + self.flags.insert(Flags::FLUSHED); + Ok(Async::Ready(())) + } } - } else if let Some(err) = self.error.take() { - Err(err) } else { Ok(Async::Ready(())) } @@ -317,20 +334,23 @@ where } #[inline] - /// read data from stream - pub(self) fn poll_io(&mut self) -> Result<(), HttpDispatchError> { + /// read data from the stream + pub(self) fn poll_io(&mut self) -> Result { if !self.flags.contains(Flags::POLLED) { - self.parse()?; + let updated = self.parse()?; self.flags.insert(Flags::POLLED); - return Ok(()); + return Ok(updated); } // read io from socket + let mut updated = false; if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES { match self.stream.get_mut().read_available(&mut self.buf) { Ok(Async::Ready((read_some, disconnected))) => { if read_some { - self.parse()?; + if self.parse()? { + updated = true; + } } if disconnected { self.client_disconnected(true); @@ -343,13 +363,14 @@ where } } } - Ok(()) + Ok(updated) } - pub(self) fn poll_handler(&mut self) -> Poll { - let retry = self.can_read(); + pub(self) fn poll_handler(&mut self) -> Result<(), HttpDispatchError> { + self.poll_io()?; + let mut retry = self.can_read(); - // process first pipelined response, only one task can do io operation in http/1 + // process first pipelined response, only first task can do io operation in http/1 while !self.tasks.is_empty() { match self.tasks[0].poll_io(&mut self.stream) { Ok(Async::Ready(ready)) => { @@ -375,9 +396,12 @@ where } // if read-backpressure is enabled and we consumed some data. - // we may read more data + // we may read more dataand retry if !retry && self.can_read() { - return Ok(Async::Ready(true)); + if self.poll_io()? { + retry = self.can_read(); + continue; + } } break; } @@ -431,25 +455,7 @@ where } } - // flush stream - if self.flags.contains(Flags::STARTED) { - match self.stream.poll_completed(false) { - Ok(Async::NotReady) => return Ok(Async::NotReady), - Err(err) => { - debug!("Error sending data: {}", err); - self.client_disconnected(false); - return Err(err.into()); - } - Ok(Async::Ready(_)) => { - // if payload is not consumed we can not use connection - if self.payload.is_some() && self.tasks.is_empty() { - return Ok(Async::Ready(false)); - } - } - } - } - - Ok(Async::NotReady) + Ok(()) } fn push_response_entry(&mut self, status: StatusCode) { @@ -457,7 +463,7 @@ where .push_back(Entry::Error(ServerError::err(Version::HTTP_11, status))); } - pub(self) fn parse(&mut self) -> Result<(), HttpDispatchError> { + pub(self) fn parse(&mut self) -> Result { let mut updated = false; 'outer: loop { @@ -524,7 +530,7 @@ where payload.feed_data(chunk); } else { error!("Internal server error: unexpected payload chunk"); - self.flags.insert(Flags::READ_DISCONNECTED); + self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED); self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); self.error = Some(HttpDispatchError::InternalError); break; @@ -536,7 +542,7 @@ where payload.feed_eof(); } else { error!("Internal server error: unexpected eof"); - self.flags.insert(Flags::READ_DISCONNECTED); + self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED); self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR); self.error = Some(HttpDispatchError::InternalError); break; @@ -559,7 +565,7 @@ where // Malformed requests should be responded with 400 self.push_response_entry(StatusCode::BAD_REQUEST); - self.flags.insert(Flags::READ_DISCONNECTED); + self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED); self.error = Some(HttpDispatchError::MalformedRequest); break; } @@ -571,7 +577,7 @@ where self.ka_expire = expire; } } - Ok(()) + Ok(updated) } } diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 3036aa089..5c32de3aa 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -62,6 +62,10 @@ impl H1Writer { self.flags = Flags::KEEPALIVE; } + pub fn flushed(&mut self) -> bool { + self.buffer.is_empty() + } + pub fn disconnected(&mut self) { self.flags.insert(Flags::DISCONNECTED); } diff --git a/tests/test_server.rs b/tests/test_server.rs index 269a1cd7d..03a89642e 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1094,3 +1094,35 @@ fn test_slow_request() { sys.stop(); } + +#[test] +fn test_malformed_request() { + use actix::System; + use std::net; + use std::sync::mpsc; + let (tx, rx) = mpsc::channel(); + + let addr = test::TestServer::unused_addr(); + thread::spawn(move || { + System::run(move || { + let srv = server::new(|| { + vec![App::new().resource("/", |r| { + r.method(http::Method::GET).f(|_| HttpResponse::Ok()) + })] + }); + + let _ = srv.bind(addr).unwrap().start(); + let _ = tx.send(System::current()); + }); + }); + let sys = rx.recv().unwrap(); + thread::sleep(time::Duration::from_millis(200)); + + let mut stream = net::TcpStream::connect(addr).unwrap(); + let _ = stream.write_all(b"GET /test/tests/test HTTP1.1\r\n"); + let mut data = String::new(); + let _ = stream.read_to_string(&mut data); + assert!(data.starts_with("HTTP/1.1 400 Bad Request")); + + sys.stop(); +} diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 3baa48eb7..522832e00 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -7,7 +7,7 @@ extern crate rand; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; -use std::thread; +use std::{thread, time}; use bytes::Bytes; use futures::Stream; @@ -380,17 +380,17 @@ fn test_ws_stopped() { let num = Arc::new(AtomicUsize::new(0)); let num2 = num.clone(); - let _ = thread::spawn(move || { + let mut srv = test::TestServer::new(move |app| { let num3 = num2.clone(); - let mut srv = test::TestServer::new(move |app| { - let num4 = num3.clone(); - app.handler(move |req| ws::start(req, WsStopped(num4.clone()))) - }); + app.handler(move |req| ws::start(req, WsStopped(num3.clone()))) + }); + { let (reader, mut writer) = srv.ws().unwrap(); writer.text("text"); let (item, _) = srv.execute(reader.into_future()).unwrap(); assert_eq!(item, Some(ws::Message::Text("text".to_owned()))); - }).join(); + } + thread::sleep(time::Duration::from_millis(1000)); assert_eq!(num.load(Ordering::Relaxed), 1); } From 724668910b5817e6e9a5f9efab92da871d2b6941 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 11:18:59 -0700 Subject: [PATCH 167/219] fix ssh handshake timeout --- src/server/acceptor.rs | 13 +++++--- src/server/builder.rs | 38 +++++++--------------- tests/test_server.rs | 72 +++++++++++++++++++++++++++++++++++++++--- tests/test_ws.rs | 4 +-- 4 files changed, 90 insertions(+), 37 deletions(-) diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index 15d66112a..3dcd8ac88 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -176,12 +176,15 @@ where /// Applies timeout to request prcoessing. pub(crate) struct AcceptorTimeout { inner: T, - timeout: u64, + timeout: Duration, } impl AcceptorTimeout { pub(crate) fn new(timeout: u64, inner: T) -> Self { - Self { inner, timeout } + Self { + inner, + timeout: Duration::from_millis(timeout), + } } } @@ -204,7 +207,7 @@ impl NewService for AcceptorTimeout { #[doc(hidden)] pub(crate) struct AcceptorTimeoutFut { fut: T::Future, - timeout: u64, + timeout: Duration, } impl Future for AcceptorTimeoutFut { @@ -225,7 +228,7 @@ impl Future for AcceptorTimeoutFut { /// Applies timeout to request prcoessing. pub(crate) struct AcceptorTimeoutService { inner: T, - timeout: u64, + timeout: Duration, } impl Service for AcceptorTimeoutService { @@ -241,7 +244,7 @@ impl Service for AcceptorTimeoutService { fn call(&mut self, req: Self::Request) -> Self::Future { AcceptorTimeoutResponse { fut: self.inner.call(req), - sleep: sleep(Duration::from_millis(self.timeout)), + sleep: sleep(self.timeout), } } } diff --git a/src/server/builder.rs b/src/server/builder.rs index 6bafb4607..8a979752e 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -59,18 +59,6 @@ where ); if secure { - Either::A(ServerMessageAcceptor::new( - settings.clone(), - TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service)) - .map_err(|_| ()) - .map_init_err(|_| ()) - .and_then( - HttpService::new(settings) - .map_init_err(|_| ()) - .map_err(|_| ()), - ), - )) - } else { Either::B(ServerMessageAcceptor::new( settings.clone(), TcpAcceptor::new(AcceptorTimeout::new( @@ -84,25 +72,23 @@ where .map_err(|_| ()), ), )) + } else { + Either::A(ServerMessageAcceptor::new( + settings.clone(), + TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service)) + .map_err(|_| ()) + .map_init_err(|_| ()) + .and_then( + HttpService::new(settings) + .map_init_err(|_| ()) + .map_err(|_| ()), + ), + )) } } } } -impl Clone for HttpServiceBuilder -where - F: Fn() -> H + Send + Clone, - H: IntoHttpHandler, - A: AcceptorServiceFactory, -{ - fn clone(&self) -> Self { - HttpServiceBuilder { - factory: self.factory.clone(), - acceptor: self.acceptor.clone(), - } - } -} - impl ServiceProvider for HttpServiceBuilder where F: Fn() -> H + Send + Clone + 'static, diff --git a/tests/test_server.rs b/tests/test_server.rs index 03a89642e..4f33e3137 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -15,6 +15,9 @@ extern crate tokio_current_thread as current_thread; extern crate tokio_reactor; extern crate tokio_tcp; +#[cfg(feature = "ssl")] +extern crate openssl; + use std::io::{Read, Write}; use std::sync::Arc; use std::{thread, time}; @@ -1084,13 +1087,13 @@ fn test_slow_request() { let mut stream = net::TcpStream::connect(addr).unwrap(); let mut data = String::new(); let _ = stream.read_to_string(&mut data); - assert!(data.starts_with("HTTP/1.1 408 Request Timeou")); + assert!(data.starts_with("HTTP/1.1 408 Request Timeout")); let mut stream = net::TcpStream::connect(addr).unwrap(); let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n"); let mut data = String::new(); let _ = stream.read_to_string(&mut data); - assert!(data.starts_with("HTTP/1.1 408 Request Timeou")); + assert!(data.starts_with("HTTP/1.1 408 Request Timeout")); sys.stop(); } @@ -1106,9 +1109,9 @@ fn test_malformed_request() { thread::spawn(move || { System::run(move || { let srv = server::new(|| { - vec![App::new().resource("/", |r| { + App::new().resource("/", |r| { r.method(http::Method::GET).f(|_| HttpResponse::Ok()) - })] + }) }); let _ = srv.bind(addr).unwrap().start(); @@ -1126,3 +1129,64 @@ fn test_malformed_request() { sys.stop(); } + +#[test] +fn test_app_404() { + let mut srv = test::TestServer::with_factory(|| { + App::new().prefix("/prefix").resource("/", |r| { + r.method(http::Method::GET).f(|_| HttpResponse::Ok()) + }) + }); + + let request = srv.client(http::Method::GET, "/prefix/").finish().unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert!(response.status().is_success()); + + let request = srv.client(http::Method::GET, "/").finish().unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert_eq!(response.status(), http::StatusCode::NOT_FOUND); +} + +#[test] +#[cfg(feature = "ssl")] +fn test_ssl_handshake_timeout() { + use actix::System; + use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; + use std::net; + use std::sync::mpsc; + + let (tx, rx) = mpsc::channel(); + let addr = test::TestServer::unused_addr(); + + // load ssl keys + let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); + builder + .set_private_key_file("tests/key.pem", SslFiletype::PEM) + .unwrap(); + builder + .set_certificate_chain_file("tests/cert.pem") + .unwrap(); + + thread::spawn(move || { + System::run(move || { + let srv = server::new(|| { + App::new().resource("/", |r| { + r.method(http::Method::GET).f(|_| HttpResponse::Ok()) + }) + }); + + srv.bind_ssl(addr, builder) + .unwrap() + .workers(1) + .client_timeout(200) + .start(); + let _ = tx.send(System::current()); + }); + }); + let sys = rx.recv().unwrap(); + + let mut stream = net::TcpStream::connect(addr).unwrap(); + let mut data = String::new(); + let _ = stream.read_to_string(&mut data); + assert!(data.is_empty()) +} diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 522832e00..ebb5ff297 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -14,7 +14,7 @@ use futures::Stream; use rand::distributions::Alphanumeric; use rand::Rng; -#[cfg(feature = "alpn")] +#[cfg(feature = "ssl")] extern crate openssl; #[cfg(feature = "rust-tls")] extern crate rustls; @@ -282,7 +282,7 @@ fn test_server_send_bin() { } #[test] -#[cfg(feature = "alpn")] +#[cfg(feature = "ssl")] fn test_ws_server_ssl() { extern crate openssl; use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; From b59712c439c438ddc73efaf0df17c72b2fd5e9d9 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 11:32:43 -0700 Subject: [PATCH 168/219] add ssl handshake timeout tests --- src/server/h1.rs | 4 +- tests/test_server.rs | 94 ++++++++++++++++++++++++++++++++++++++++++++ tests/test_ws.rs | 2 - 3 files changed, 96 insertions(+), 4 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index cd9134275..af7e65297 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -206,7 +206,7 @@ where // shutdown if self.flags.contains(Flags::SHUTDOWN) { - if self.flags.intersects(Flags::WRITE_DISCONNECTED) { + if self.flags.contains(Flags::WRITE_DISCONNECTED) { return Ok(Async::Ready(())); } return self.poll_flush(true); @@ -220,7 +220,7 @@ where self.poll_flush(false)?; // deal with keep-alive and stream eof (client-side write shutdown) - if self.tasks.is_empty() && self.flags.intersects(Flags::FLUSHED) { + if self.tasks.is_empty() && self.flags.contains(Flags::FLUSHED) { // handle stream eof if self .flags diff --git a/tests/test_server.rs b/tests/test_server.rs index 4f33e3137..9c17fd665 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -890,6 +890,100 @@ fn test_brotli_encoding_large() { assert_eq!(bytes, Bytes::from(data)); } +#[cfg(all(feature = "brotli", future = "ssl"))] +#[test] +fn test_ssl_brotli_encoding_large() { + use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; + // load ssl keys + let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); + builder + .set_private_key_file("tests/key.pem", SslFiletype::PEM) + .unwrap(); + builder + .set_certificate_chain_file("tests/cert.pem") + .unwrap(); + + let data = STR.repeat(10); + let mut srv = test::TestServer::build().ssl(builder).start(|app| { + app.handler(|req: &HttpRequest| { + req.body() + .and_then(|bytes: Bytes| { + Ok(HttpResponse::Ok() + .content_encoding(http::ContentEncoding::Identity) + .body(bytes)) + }).responder() + }) + }); + + let mut e = BrotliEncoder::new(Vec::new(), 5); + e.write_all(data.as_ref()).unwrap(); + let enc = e.finish().unwrap(); + + // client request + let request = srv + .post() + .header(http::header::CONTENT_ENCODING, "br") + .body(enc) + .unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert!(response.status().is_success()); + + // read response + let bytes = srv.execute(response.body()).unwrap(); + assert_eq!(bytes, Bytes::from(data)); +} + +#[cfg(future = "rust-ssl")] +#[test] +fn test_reading_deflate_encoding_large_random_ssl() { + use rustls::internal::pemfile::{certs, rsa_private_keys}; + use rustls::{NoClientAuth, ServerConfig}; + use std::fs::File; + use std::io::BufReader; + + // load ssl keys + let mut config = ServerConfig::new(NoClientAuth::new()); + let cert_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap()); + let key_file = &mut BufReader::new(File::open("tests/key.pem").unwrap()); + let cert_chain = certs(cert_file).unwrap(); + let mut keys = rsa_private_keys(key_file).unwrap(); + config.set_single_cert(cert_chain, keys.remove(0)).unwrap(); + + let data = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(160_000) + .collect::(); + + let mut srv = test::TestServer::build().rustls(config).start(|app| { + app.handler(|req: &HttpRequest| { + req.body() + .and_then(|bytes: Bytes| { + Ok(HttpResponse::Ok() + .content_encoding(http::ContentEncoding::Identity) + .body(bytes)) + }).responder() + }) + }); + + let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); + e.write_all(data.as_ref()).unwrap(); + let enc = e.finish().unwrap(); + + // client request + let request = srv + .post() + .header(http::header::CONTENT_ENCODING, "deflate") + .body(enc) + .unwrap(); + let response = srv.execute(request.send()).unwrap(); + assert!(response.status().is_success()); + + // read response + let bytes = srv.execute(response.body()).unwrap(); + assert_eq!(bytes.len(), data.len()); + assert_eq!(bytes, Bytes::from(data)); +} + #[test] fn test_h2() { let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR))); diff --git a/tests/test_ws.rs b/tests/test_ws.rs index ebb5ff297..5a0ce204f 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -284,7 +284,6 @@ fn test_server_send_bin() { #[test] #[cfg(feature = "ssl")] fn test_ws_server_ssl() { - extern crate openssl; use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; // load ssl keys @@ -320,7 +319,6 @@ fn test_ws_server_ssl() { #[test] #[cfg(feature = "rust-tls")] fn test_ws_server_rust_tls() { - extern crate rustls; use rustls::internal::pemfile::{certs, rsa_private_keys}; use rustls::{NoClientAuth, ServerConfig}; use std::fs::File; From d7379bd10b19ac0aa8778b89c9d41a2538d5f5d3 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 13:41:33 -0700 Subject: [PATCH 169/219] update server ssl tests; upgrade rustls --- CHANGES.md | 1 + Cargo.toml | 4 +- tests/identity.pfx | Bin 0 -> 5549 bytes tests/test_server.rs | 146 ++++++++++++++++++++++++++++++++++++++----- 4 files changed, 133 insertions(+), 18 deletions(-) create mode 100644 tests/identity.pfx diff --git a/CHANGES.md b/CHANGES.md index a55ef7ec2..3c55c3f64 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,6 +23,7 @@ * Correct usage of `no_http2` flag in `bind_*` methods. #519 + ## [0.7.8] - 2018-09-17 ### Added diff --git a/Cargo.toml b/Cargo.toml index cedb38da3..46719d709 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,8 +118,8 @@ openssl = { version="0.10", optional = true } tokio-openssl = { version="0.2", optional = true } #rustls -rustls = { version = "^0.13.1", optional = true } -tokio-rustls = { version = "^0.7.2", optional = true } +rustls = { version = "0.14", optional = true } +tokio-rustls = { version = "0.8", optional = true } webpki = { version = "0.18", optional = true } webpki-roots = { version = "0.15", optional = true } diff --git a/tests/identity.pfx b/tests/identity.pfx new file mode 100644 index 0000000000000000000000000000000000000000..946e3b8b8ae10e19a11e7ac6eead66b12fff0014 GIT binary patch literal 5549 zcmY+GRZtv&vTbn*Fa-DD5S(Fv;O=h0gF_fx!{8nycyM=jcXvr}cXtMNxH(n#z4P8j zS68jQyT2EE0A2|kEIfMvo;?yO<4>8N_ZYCqu-O54MhF3T`v0&tdjMMWe%lL7KUFAFV5{u;owkU`~uKqm}O*fBSo5RVYIG` zPAKF6ePO1*(FhW)-QpFAvtO?cVWGD0hs0WO4>qy>9T48E19Q`LuD0r#V<$Vj_c2yp z)4}gHs(AF<^Bo{R_`YId+=%k!c;why`_I0GXxv)FEmD)RG7Vs?g`z@xC^k*0+`Ps8 zZQH$QkvVVeOE(P8=g*0kvj|2!A! zCaUuJ^XO0NPlE$=oQRK9ti`Ic7JJnq;osnZ4OA`G7m{`HKP{cH&`YLS z&7cXaq1TKaOG&uTJqqY25!-%6M82yrFSuJv{`JhJUOE4ZMT1J%h8u3pSuQ$qle0}C z2}0Lk)3OpsFm1yeJ1|XW9gt0oX1PIiJ+HG_ccQbIH}C6hb|aPpRO(@Rt|u~DJIX%l zC+^x;>&_>s!^v1hwDp-biu2rspT|oc!3;MUJ>ui2i(Lamzq&B9OCcN`j&N`^wTcd1 z`DFH$T83h&1Ws&{N(khR##{-N3RSG@_ZPyM3SQ_TAXRRqyV#LUkE0&kEvE8Y61{fKJ+Y= z$eQ*1oOaeF^Z9A#-r@E7LmI)arBxsyT>V+(Ruq)&tBqzOi1sjBwNjPe_jraq(=2r^ zB|%@2bxrAnZr&d!&MNXZn&!iHPAs)aINufHy*7>&8Ap}7vY+Ji590{9Gq+KBpjkN0 zeCWDYHbCrNc%062ljm(+!6eM>ku}@T8$$TYjIT%~Y3z>YH2FxhEJ6lUL_#W3!=?ks zFo969*bgk}a!GRqXMSjjgd&?6Y%hBVuhL~74jyLlXNnJzvi?lWEo_HD7ZkXXB+vYf z**da2rqVV>3tDz^j`grt{G5Ye8`Q@KgQelPxGF%zLkZ=m7N>eor9pQmK6B$F&s9TaFeP0w?b5Q2p`A;be~m;x!mwj;I4ye%g!Op2Z^tb4S}i z#o}zOP0;F#mw3YEF_5DmxuBJQ{MZN!`^i6PwkgfA(n$bs9PQx6aZB5p$2&d$+`VjO zsH)tO=SExUR825&T#?}~JqRDb1{y6YsTMn zYINH#Ru0n1kxJc|=rVjd@`uh*nzIv3n@lt8^$P6GC5xIW1?7XLu5&GtX$Ho%rp5@a z<;*{8D{Rd@ocNX}STxWU(2moCw*4dI^{&HccPIwaXF^%V^P@gTFF*ow(`z_5$lNR7B+?<1E> zULlb}pDx~mAAb5{pg@(IH|PW09Pz6r7!}<9>(fAn8=3nRL|EV^f5o-!1D@`uY_E0= zXWciaPcKF60&?K+MAnU0gz}SuZb>wVAeRY>Lz9(&Zu_rB_JiKSV8HOqO3;vSD41G~ zIYpO4JcJ5K0+;O6x0VlNvNY%;v*XCt2V>-|%pxixP0jfH+xE^8^D3ZXs zS4AQ;$|oz)t4F{7(j7F7l{R}lJhwq)ekCh$|7`g4CXbIghAm6OSZ1L&K5-Y1w$mQa z_MBdP-vu45dbf@y=ls^2$D{4YS(f+0N}W49&$hLSRWl_WZB;uN`i7GQSU2{> z%tc5FX16D4Hc~M^2Q-}r|Th>@2!HT<`AbzLpW6XWl;-{}i)bscjNo9~Z zt>K*BFR$SlC#sBVi?$v)Y?HUyl)E2s`8W6ZXD9n+OhlM3@csT_+o|7G5+{-kChNBE zGoihxT`ld2`0D_n8Kx;B37>D}D7${tb4|NGV4*rXJY< zrP!d-L?ATkL~#^D#VB&5mZjsJsQ(E>4kqY74x%)7DdB0tEho2OB+t;4_6jhwgA|cO znS?z?-;5Y{h$UXY8kALQOM~}sKX{KB4C#6j4aK&Qk9w{6xr6GoWSrQBZ-V^FUe|IuEu|!Ho zP~_-_Q3K`aH^E3h7qd#rjmRUmA*ZPMaLSD!4zIf>se1cE%=z*bSCvx#B3|QUR$0#m9GAZ3L&y-$F1DIC& zT8gc<4SckFy#eY%&9U^Kz5>cD`L{z)t6x+lxMS@K!T}DzX*QtVExm3-I%(wjkMP#> z1(Cx)JgRslsAW|p`13(`X}#T<>eipuNW4PA4R2-@N+Vc&(y87pk_BSeS+d6)P$^TP z`?$x%WPFPWh9eb*vW#gb#gty{p~gAkXo)>T5Uf}c#r*~bw?jGhA9M7i8I5Xoby}f^ z^p4Qzi_ghn7*)HR6CmO5t+f(DYnCA3GX_!BFzYWFViu9Jn~Rh{W4lS>Ien~yruGbu z>(;O4jayyFSMEP1yQ9A{eQ^Hh(X@0+auu!ON=}-efn0d?U5LMh$zI#vo9nqvQ$058 z0%`dSn#OpIe|FctVo1fv^b^C(=MqC(usOYfc9zoBNAJEY_F!S;aKK8MEaXZ2?J0*q z%9PBkwW53|md4e|UjFDs!BwW$KTztoZy9^)QA!(U)itkV!h$GWP*eYJRt+x@x5V%i z1J7`#SEWSE_@>C3i32lz2g3U@e}8X+mAz+L`pS5%*YB7Dxr&GE4t*>KB6I zE-@2=x9^2U&Ux`Z5<+-#U+{a@#CzMU?Leqv9AhmP6iaWp>Oq!NsxHbS=EW1!zJLz{ zMUGYEB7n6q3Er#o<|Z`u0MwrUN4&EGP-_taP%Ho8(tlHkg!X?l`~xi9ztHXDeK*m&V~nDC|pLVujbH(b=fk1EfnQhXPjX1C48B*(7^BO>cFO~ zAH*SPpf#PD###N!E9oD!h87(U5h5>lHJLd?LymUDW$S z^HqD#EbaAcgn)aq3MVH(f*KmTMjHO5dW|_+katLspc=S!v^H3wli*n6ojf^qf3K=y zKr%Yay%^*Xvh0;}J@-TW@bcRzax<}Q{(@FjI+hfy@)|L*hscS(TttTvz_%Nv+(z8GGr?Ic>si}%oD zjS`B@EE+;f!(VuH>B7DBvpXijW%<{YjhQlo9fhwvuO{nH%@)BNiq<1`YY&2FCcm1v z)V&H!Kcj^3Bt0y%<I%;IH=P zJ41oky-lS{I9?XOY~Id^5UX<7RGwt!+GLyLENxHkj-snrrD9wg(faIi6I{jBh1go{ zM*ViN9ui7wip-=5;2)#r3!%>8z?BacE<1_@ALhkFNVIX%-ABNFWSr=jiLQ6%-{KRS zWwq<$wd1t!5~>t;g)EsTUt{vq;Fq-VE-~ml6ZXxgTNfUz_@@T7 zjsLv05k4~6I(7sOoX;Me9Mx*!2n`*&G62*4lL#eRXMj)}# zKr96@p5g@T(q@%oV#NRfPt?05WUo1wQk&aO{Pmwj7rdpbgcv6`Z78Sujv^Cw!}t9r`^Lc8t$WvLza7iBfkmc zDDY9Tit~6VzqS0L=Ayqpm%buj8Ag(=8<}4__o+jk*7sRDS~t>ecLxvu9W`08b;!5* zVykt!s+BvMXq)=q=y6Z4d=}r*-a;}j-*Qyy?4M4cOLy-!>N4}f4H%^$+ju=$d;iJ7 zPw8!Js)`{-TTglxv%Sm^K45gm#!5u2ni5W%dv9_+kEJG*<)*>LnTIO3 zP6HOD&&FrUjtF+byLCKHD-ACb%_vi{l0xPj|E3=MNnc)wn90{jb#I)vL$jsxtHlPu zCN(WOd)y*If;(Pc2D_T*H8TGC6#FjyA&L=1eB$U3^IK?5o$tR7-J`4e7hlpb7w~C+Agyessj{qzudSzQHf)p zE3bG;_o5JE@|R;^XOzYSMRqL`ey& z7cxd^&=J@~sxSdNJ~4`WKG3Pf5(-q%1&@Bp@h^T2p}7IVJL}{Ir#}ZYrY(`qOdx+V z!4-keXG9fPwZ72Z>0InsQ!U51)YwT!#5(o7Q73y)O7Vj^xant+Mw#45BH$9?B1vV- zK-S5Bmx_T9S^3JTc)fx@Q&R72RSU{u74roB{&3hlzknbj{dy%m?DxD5GH*40Tk$q; zDxcEj{-~XXePdPnzs!=S2lkV$IweaW8lBM@z7iZlAu5N%pYus=+D*DQyFs=GYfXl#{yYa^Ur z^wdzws}mB6Y~FeN1FIrbx)Pe=LH~}x^wgbl7Mnn)HlS|~MKWejJ*=#vF+_%p7!)FC z*>GliQit=X=~)<^UO-kl>$hKAhy@PbN%T6E8AFDRIPqP?3nKbu9SC1*x-_q%oa>un z)jigLjSg;Ie=Sf4&{X0(O6asc*}f}KP)&dmbcEmjIu<#h|51e3-$TXpM$ACpy4wx^ z7~&b%^6mSNlP@DXnZzWuf4O5+N;qfkl$8#UJw?Z-lL=nl=k*Qj5K!X_jNe5*ceN|1 zJO(G;HL??jza#IvW7CL&O%M}9wD`Q<7M?twO`H?+^WCB~JG8Y5`NHaesh58pW(2{G0QEPc8si#*pBZg zbmb#%Lx#-6A2aE}_`5AxI8xhZ#FL@NXPYMwK!#-*abrgDL>W=V)NBx_S5!i=ATh~B zoT!rujGkse$2dK9!A$E8ILE})=TNqjus$Y5VZgwNlvHx9Q@B&7X378MKMHcNc9XmD z{1dA1N8T7+{i%3V@mO!?q$Lgg=zTVwG9vk)dN9s4cwe literal 0 HcmV?d00001 diff --git a/tests/test_server.rs b/tests/test_server.rs index 9c17fd665..240a5ddc0 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -15,8 +15,12 @@ extern crate tokio_current_thread as current_thread; extern crate tokio_reactor; extern crate tokio_tcp; +#[cfg(feature = "tls")] +extern crate native_tls; #[cfg(feature = "ssl")] extern crate openssl; +#[cfg(feature = "rust-tls")] +extern crate rustls; use std::io::{Read, Write}; use std::sync::Arc; @@ -890,10 +894,13 @@ fn test_brotli_encoding_large() { assert_eq!(bytes, Bytes::from(data)); } -#[cfg(all(feature = "brotli", future = "ssl"))] +#[cfg(all(feature = "brotli", feature = "ssl"))] #[test] -fn test_ssl_brotli_encoding_large() { - use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; +fn test_brotli_encoding_large_ssl() { + use actix::{Actor, System}; + use openssl::ssl::{ + SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode, + }; // load ssl keys let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); builder @@ -904,7 +911,7 @@ fn test_ssl_brotli_encoding_large() { .unwrap(); let data = STR.repeat(10); - let mut srv = test::TestServer::build().ssl(builder).start(|app| { + let srv = test::TestServer::build().ssl(builder).start(|app| { app.handler(|req: &HttpRequest| { req.body() .and_then(|bytes: Bytes| { @@ -914,28 +921,39 @@ fn test_ssl_brotli_encoding_large() { }).responder() }) }); + let mut rt = System::new("test"); + // client connector + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_verify(SslVerifyMode::NONE); + let conn = client::ClientConnector::with_connector(builder.build()).start(); + + // body let mut e = BrotliEncoder::new(Vec::new(), 5); e.write_all(data.as_ref()).unwrap(); let enc = e.finish().unwrap(); // client request - let request = srv - .post() + let request = client::ClientRequest::build() + .uri(srv.url("/")) + .method(http::Method::POST) .header(http::header::CONTENT_ENCODING, "br") + .with_connector(conn) .body(enc) .unwrap(); - let response = srv.execute(request.send()).unwrap(); + let response = rt.block_on(request.send()).unwrap(); assert!(response.status().is_success()); // read response - let bytes = srv.execute(response.body()).unwrap(); + let bytes = rt.block_on(response.body()).unwrap(); assert_eq!(bytes, Bytes::from(data)); } -#[cfg(future = "rust-ssl")] +#[cfg(all(feature = "rust-tls", feature = "ssl"))] #[test] fn test_reading_deflate_encoding_large_random_ssl() { + use actix::{Actor, System}; + use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; use rustls::internal::pemfile::{certs, rsa_private_keys}; use rustls::{NoClientAuth, ServerConfig}; use std::fs::File; @@ -954,7 +972,7 @@ fn test_reading_deflate_encoding_large_random_ssl() { .take(160_000) .collect::(); - let mut srv = test::TestServer::build().rustls(config).start(|app| { + let srv = test::TestServer::build().rustls(config).start(|app| { app.handler(|req: &HttpRequest| { req.body() .and_then(|bytes: Bytes| { @@ -965,25 +983,120 @@ fn test_reading_deflate_encoding_large_random_ssl() { }) }); + let mut rt = System::new("test"); + + // client connector + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_verify(SslVerifyMode::NONE); + let conn = client::ClientConnector::with_connector(builder.build()).start(); + + // encode data let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(data.as_ref()).unwrap(); let enc = e.finish().unwrap(); // client request - let request = srv - .post() + let request = client::ClientRequest::build() + .uri(srv.url("/")) + .method(http::Method::POST) .header(http::header::CONTENT_ENCODING, "deflate") + .with_connector(conn) .body(enc) .unwrap(); - let response = srv.execute(request.send()).unwrap(); + let response = rt.block_on(request.send()).unwrap(); assert!(response.status().is_success()); // read response - let bytes = srv.execute(response.body()).unwrap(); + let bytes = rt.block_on(response.body()).unwrap(); assert_eq!(bytes.len(), data.len()); assert_eq!(bytes, Bytes::from(data)); } +#[cfg(all(feature = "tls", feature = "ssl"))] +#[test] +fn test_reading_deflate_encoding_large_random_tls() { + use native_tls::{Identity, TlsAcceptor}; + use openssl::ssl::{ + SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode, + }; + use std::fs::File; + use std::sync::mpsc; + + use actix::{Actor, System}; + let (tx, rx) = mpsc::channel(); + + // load ssl keys + let mut file = File::open("tests/identity.pfx").unwrap(); + let mut identity = vec![]; + file.read_to_end(&mut identity).unwrap(); + let identity = Identity::from_pkcs12(&identity, "1").unwrap(); + let acceptor = TlsAcceptor::new(identity).unwrap(); + + // load ssl keys + let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); + builder + .set_private_key_file("tests/key.pem", SslFiletype::PEM) + .unwrap(); + builder + .set_certificate_chain_file("tests/cert.pem") + .unwrap(); + + let data = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(160_000) + .collect::(); + + let addr = test::TestServer::unused_addr(); + thread::spawn(move || { + System::run(move || { + server::new(|| { + App::new().handler("/", |req: &HttpRequest| { + req.body() + .and_then(|bytes: Bytes| { + Ok(HttpResponse::Ok() + .content_encoding(http::ContentEncoding::Identity) + .body(bytes)) + }).responder() + }) + }).bind_tls(addr, acceptor) + .unwrap() + .start(); + let _ = tx.send(System::current()); + }); + }); + let sys = rx.recv().unwrap(); + + let mut rt = System::new("test"); + + // client connector + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_verify(SslVerifyMode::NONE); + let conn = client::ClientConnector::with_connector(builder.build()).start(); + + // encode data + let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); + e.write_all(data.as_ref()).unwrap(); + let enc = e.finish().unwrap(); + + // client request + let request = client::ClientRequest::build() + .uri(format!("https://{}/", addr)) + .method(http::Method::POST) + .header(http::header::CONTENT_ENCODING, "deflate") + .with_connector(conn) + .body(enc) + .unwrap(); + let response = rt.block_on(request.send()).unwrap(); + assert!(response.status().is_success()); + + // read response + let bytes = rt.block_on(response.body()).unwrap(); + assert_eq!(bytes.len(), data.len()); + assert_eq!(bytes, Bytes::from(data)); + + let _ = sys.stop(); +} + #[test] fn test_h2() { let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR))); @@ -1160,7 +1273,6 @@ fn test_slow_request() { let (tx, rx) = mpsc::channel(); let addr = test::TestServer::unused_addr(); - thread::spawn(move || { System::run(move || { let srv = server::new(|| { @@ -1282,5 +1394,7 @@ fn test_ssl_handshake_timeout() { let mut stream = net::TcpStream::connect(addr).unwrap(); let mut data = String::new(); let _ = stream.read_to_string(&mut data); - assert!(data.is_empty()) + assert!(data.is_empty()); + + let _ = sys.stop(); } From ae5c4dfb7812caaa95b550f379fa3312dd6fcd01 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 15:25:32 -0700 Subject: [PATCH 170/219] refactor http channels list; rename WorkerSettings --- src/client/connector.rs | 1 + src/server/acceptor.rs | 17 +++-- src/server/builder.rs | 4 +- src/server/channel.rs | 148 ++++++++++++++++++++-------------------- src/server/h1.rs | 32 ++++----- src/server/h1decoder.rs | 6 +- src/server/h1writer.rs | 6 +- src/server/h2.rs | 10 +-- src/server/h2writer.rs | 8 +-- src/server/incoming.rs | 6 +- src/server/mod.rs | 2 +- src/server/service.rs | 20 ++++-- src/server/settings.rs | 43 ++++++------ tests/test_server.rs | 4 +- 14 files changed, 157 insertions(+), 150 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 90a2e1c88..07c7b646d 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -293,6 +293,7 @@ impl Default for ClientConnector { } }; + #[cfg_attr(feature = "cargo-clippy", allow(clippy::let_unit_value))] ClientConnector::with_connector_impl(connector) } } diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index 3dcd8ac88..79d133d2d 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -9,9 +9,10 @@ use tokio_reactor::Handle; use tokio_tcp::TcpStream; use tokio_timer::{sleep, Delay}; +use super::channel::HttpProtocol; use super::error::AcceptorError; use super::handler::HttpHandler; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::IoStream; /// This trait indicates types that can create acceptor service for http server. @@ -271,7 +272,7 @@ impl Future for AcceptorTimeoutResponse { pub(crate) struct ServerMessageAcceptor { inner: T, - settings: WorkerSettings, + settings: ServiceConfig, } impl ServerMessageAcceptor @@ -279,7 +280,7 @@ where H: HttpHandler, T: NewService, { - pub(crate) fn new(settings: WorkerSettings, inner: T) -> Self { + pub(crate) fn new(settings: ServiceConfig, inner: T) -> Self { ServerMessageAcceptor { inner, settings } } } @@ -310,7 +311,7 @@ where T: NewService, { fut: T::Future, - settings: WorkerSettings, + settings: ServiceConfig, } impl Future for ServerMessageAcceptorResponse @@ -334,7 +335,7 @@ where pub(crate) struct ServerMessageAcceptorService { inner: T, - settings: WorkerSettings, + settings: ServiceConfig, } impl Service for ServerMessageAcceptorService @@ -359,9 +360,11 @@ where fut: self.inner.call(stream), }) } - ServerMessage::Shutdown(timeout) => Either::B(ok(())), + ServerMessage::Shutdown(_) => Either::B(ok(())), ServerMessage::ForceShutdown => { - // self.settings.head().traverse::(); + self.settings + .head() + .traverse(|proto: &mut HttpProtocol| proto.shutdown()); Either::B(ok(())) } } diff --git a/src/server/builder.rs b/src/server/builder.rs index 8a979752e..ec6ce9923 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -10,7 +10,7 @@ use super::acceptor::{ use super::error::AcceptorError; use super::handler::IntoHttpHandler; use super::service::HttpService; -use super::settings::{ServerSettings, WorkerSettings}; +use super::settings::{ServerSettings, ServiceConfig}; use super::KeepAlive; pub(crate) trait ServiceProvider { @@ -50,7 +50,7 @@ where let acceptor = self.acceptor.clone(); move || { let app = (factory)().into_handler(); - let settings = WorkerSettings::new( + let settings = ServiceConfig::new( app, keep_alive, client_timeout, diff --git a/src/server/channel.rs b/src/server/channel.rs index f57806209..513601ac9 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -1,5 +1,5 @@ use std::net::{Shutdown, SocketAddr}; -use std::{io, ptr, time}; +use std::{io, mem, time}; use bytes::{Buf, BufMut, BytesMut}; use futures::{Async, Future, Poll}; @@ -7,16 +7,35 @@ use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Delay; use super::error::HttpDispatchError; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::{h1, h2, HttpHandler, IoStream}; use http::StatusCode; const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; -enum HttpProtocol { +pub(crate) enum HttpProtocol { H1(h1::Http1Dispatcher), H2(h2::Http2), - Unknown(WorkerSettings, Option, T, BytesMut), + Unknown(ServiceConfig, Option, T, BytesMut), + None, +} + +impl HttpProtocol { + pub(crate) fn shutdown(&mut self) { + match self { + HttpProtocol::H1(ref mut h1) => { + let io = h1.io(); + let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); + let _ = IoStream::shutdown(io, Shutdown::Both); + } + HttpProtocol::H2(ref mut h2) => h2.shutdown(), + HttpProtocol::Unknown(_, _, io, _) => { + let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); + let _ = IoStream::shutdown(io, Shutdown::Both); + } + HttpProtocol::None => (), + } + } } enum ProtocolKind { @@ -30,8 +49,8 @@ where T: IoStream, H: HttpHandler + 'static, { - proto: Option>, - node: Option>>, + node: Node>, + node_reg: bool, ka_timeout: Option, } @@ -41,12 +60,14 @@ where H: HttpHandler + 'static, { pub(crate) fn new( - settings: WorkerSettings, io: T, peer: Option, + settings: ServiceConfig, io: T, peer: Option, ) -> HttpChannel { + let ka_timeout = settings.client_timer(); + HttpChannel { - node: None, - ka_timeout: settings.client_timer(), - proto: Some(HttpProtocol::Unknown( + ka_timeout, + node_reg: false, + node: Node::new(HttpProtocol::Unknown( settings, peer, io, @@ -54,18 +75,6 @@ where )), } } - - pub(crate) fn shutdown(&mut self) { - match self.proto { - Some(HttpProtocol::H1(ref mut h1)) => { - let io = h1.io(); - let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); - let _ = IoStream::shutdown(io, Shutdown::Both); - } - Some(HttpProtocol::H2(ref mut h2)) => h2.shutdown(), - _ => (), - } - } } impl Drop for HttpChannel @@ -74,9 +83,7 @@ where H: HttpHandler + 'static, { fn drop(&mut self) { - if let Some(mut node) = self.node.take() { - node.remove() - } + self.node.remove(); } } @@ -94,17 +101,16 @@ where match self.ka_timeout.as_mut().unwrap().poll() { Ok(Async::Ready(_)) => { trace!("Slow request timed out, close connection"); - if let Some(HttpProtocol::Unknown(settings, _, io, buf)) = - self.proto.take() - { - self.proto = - Some(HttpProtocol::H1(h1::Http1Dispatcher::for_error( + let proto = mem::replace(self.node.get_mut(), HttpProtocol::None); + if let HttpProtocol::Unknown(settings, _, io, buf) = proto { + *self.node.get_mut() = + HttpProtocol::H1(h1::Http1Dispatcher::for_error( settings, io, StatusCode::REQUEST_TIMEOUT, self.ka_timeout.take(), buf, - ))); + )); return self.poll(); } return Ok(Async::Ready(())); @@ -114,28 +120,22 @@ where } } - if self.node.is_none() { - let el = self as *mut _; - self.node = Some(Node::new(el)); - let _ = match self.proto { - Some(HttpProtocol::H1(ref mut h1)) => { - self.node.as_mut().map(|n| h1.settings().head().insert(n)) - } - Some(HttpProtocol::H2(ref mut h2)) => { - self.node.as_mut().map(|n| h2.settings().head().insert(n)) - } - Some(HttpProtocol::Unknown(ref mut settings, _, _, _)) => { - self.node.as_mut().map(|n| settings.head().insert(n)) - } - None => unreachable!(), + if !self.node_reg { + self.node_reg = true; + let settings = match self.node.get_mut() { + HttpProtocol::H1(ref mut h1) => h1.settings().clone(), + HttpProtocol::H2(ref mut h2) => h2.settings().clone(), + HttpProtocol::Unknown(ref mut settings, _, _, _) => settings.clone(), + HttpProtocol::None => unreachable!(), }; + settings.head().insert(&mut self.node); } let mut is_eof = false; - let kind = match self.proto { - Some(HttpProtocol::H1(ref mut h1)) => return h1.poll(), - Some(HttpProtocol::H2(ref mut h2)) => return h2.poll(), - Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => { + let kind = match self.node.get_mut() { + HttpProtocol::H1(ref mut h1) => return h1.poll(), + HttpProtocol::H2(ref mut h2) => return h2.poll(), + HttpProtocol::Unknown(_, _, ref mut io, ref mut buf) => { let mut err = None; let mut disconnect = false; match io.read_available(buf) { @@ -168,31 +168,32 @@ where return Ok(Async::NotReady); } } - None => unreachable!(), + HttpProtocol::None => unreachable!(), }; // upgrade to specific http protocol - if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() { + let proto = mem::replace(self.node.get_mut(), HttpProtocol::None); + if let HttpProtocol::Unknown(settings, addr, io, buf) = proto { match kind { ProtocolKind::Http1 => { - self.proto = Some(HttpProtocol::H1(h1::Http1Dispatcher::new( + *self.node.get_mut() = HttpProtocol::H1(h1::Http1Dispatcher::new( settings, io, addr, buf, is_eof, self.ka_timeout.take(), - ))); + )); return self.poll(); } ProtocolKind::Http2 => { - self.proto = Some(HttpProtocol::H2(h2::Http2::new( + *self.node.get_mut() = HttpProtocol::H2(h2::Http2::new( settings, io, addr, buf.freeze(), self.ka_timeout.take(), - ))); + )); return self.poll(); } } @@ -204,18 +205,22 @@ where pub(crate) struct Node { next: Option<*mut Node>, prev: Option<*mut Node>, - element: *mut T, + element: T, } impl Node { - fn new(el: *mut T) -> Self { + fn new(element: T) -> Self { Node { + element, next: None, prev: None, - element: el, } } + fn get_mut(&mut self) -> &mut T { + &mut self.element + } + fn insert(&mut self, next_el: &mut Node) { let next: *mut Node = next_el as *const _ as *mut _; @@ -235,7 +240,6 @@ impl Node { } fn remove(&mut self) { - self.element = ptr::null_mut(); let next = self.next.take(); let prev = self.prev.take(); @@ -257,30 +261,28 @@ impl Node<()> { Node { next: None, prev: None, - element: ptr::null_mut(), + element: (), } } - pub(crate) fn traverse)>(&self, f: F) + pub(crate) fn traverse)>(&self, f: F) where T: IoStream, H: HttpHandler + 'static, { - let mut next = self.next.as_ref(); - loop { - if let Some(n) = next { - unsafe { - let n: &Node<()> = &*(n.as_ref().unwrap() as *const _); - next = n.next.as_ref(); + if let Some(n) = self.next.as_ref() { + unsafe { + let mut next: &mut Node> = + &mut *(n.as_ref().unwrap() as *const _ as *mut _); + loop { + f(&mut next.element); - if !n.element.is_null() { - let ch: &mut HttpChannel = - &mut *(&mut *(n.element as *mut _) as *mut () as *mut _); - f(ch); + next = if let Some(n) = next.next.as_ref() { + &mut **n + } else { + return; } } - } else { - return; } } } diff --git a/src/server/h1.rs b/src/server/h1.rs index af7e65297..53c4e2cf5 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -16,7 +16,7 @@ use super::h1decoder::{DecoderError, H1Decoder, Message}; use super::h1writer::H1Writer; use super::handler::{HttpHandler, HttpHandlerTask, HttpHandlerTaskFut}; use super::input::PayloadType; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::{IoStream, Writer}; const MAX_PIPELINED_MESSAGES: usize = 16; @@ -37,7 +37,7 @@ bitflags! { /// Dispatcher for HTTP/1.1 protocol pub struct Http1Dispatcher { flags: Flags, - settings: WorkerSettings, + settings: ServiceConfig, addr: Option, stream: H1Writer, decoder: H1Decoder, @@ -87,7 +87,7 @@ where H: HttpHandler + 'static, { pub fn new( - settings: WorkerSettings, stream: T, addr: Option, buf: BytesMut, + settings: ServiceConfig, stream: T, addr: Option, buf: BytesMut, is_eof: bool, keepalive_timer: Option, ) -> Self { let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer { @@ -122,7 +122,7 @@ where } pub(crate) fn for_error( - settings: WorkerSettings, stream: T, status: StatusCode, + settings: ServiceConfig, stream: T, status: StatusCode, mut keepalive_timer: Option, buf: BytesMut, ) -> Self { if let Some(deadline) = settings.client_timer_expire() { @@ -147,7 +147,7 @@ where } #[inline] - pub fn settings(&self) -> &WorkerSettings { + pub fn settings(&self) -> &ServiceConfig { &self.settings } @@ -259,7 +259,7 @@ where Err(err) => { debug!("Error sending data: {}", err); self.client_disconnected(false); - return Err(err.into()); + Err(err.into()) } Ok(Async::Ready(_)) => { // if payload is not consumed we can not use connection @@ -347,10 +347,8 @@ where if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES { match self.stream.get_mut().read_available(&mut self.buf) { Ok(Async::Ready((read_some, disconnected))) => { - if read_some { - if self.parse()? { - updated = true; - } + if read_some && self.parse()? { + updated = true; } if disconnected { self.client_disconnected(true); @@ -397,11 +395,9 @@ where // if read-backpressure is enabled and we consumed some data. // we may read more dataand retry - if !retry && self.can_read() { - if self.poll_io()? { - retry = self.can_read(); - continue; - } + if !retry && self.can_read() && self.poll_io()? { + retry = self.can_read(); + continue; } break; } @@ -597,11 +593,11 @@ mod tests { use httpmessage::HttpMessage; use server::h1decoder::Message; use server::handler::IntoHttpHandler; - use server::settings::{ServerSettings, WorkerSettings}; + use server::settings::{ServerSettings, ServiceConfig}; use server::{KeepAlive, Request}; - fn wrk_settings() -> WorkerSettings { - WorkerSettings::::new( + fn wrk_settings() -> ServiceConfig { + ServiceConfig::::new( App::new().into_handler(), KeepAlive::Os, 5000, diff --git a/src/server/h1decoder.rs b/src/server/h1decoder.rs index a7531bbbd..434dc42df 100644 --- a/src/server/h1decoder.rs +++ b/src/server/h1decoder.rs @@ -5,7 +5,7 @@ use futures::{Async, Poll}; use httparse; use super::message::{MessageFlags, Request}; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use error::ParseError; use http::header::{HeaderName, HeaderValue}; use http::{header, HttpTryFrom, Method, Uri, Version}; @@ -43,7 +43,7 @@ impl H1Decoder { } pub fn decode( - &mut self, src: &mut BytesMut, settings: &WorkerSettings, + &mut self, src: &mut BytesMut, settings: &ServiceConfig, ) -> Result, DecoderError> { // read payload if self.decoder.is_some() { @@ -80,7 +80,7 @@ impl H1Decoder { } fn parse_message( - &self, buf: &mut BytesMut, settings: &WorkerSettings, + &self, buf: &mut BytesMut, settings: &ServiceConfig, ) -> Poll<(Request, Option), ParseError> { // Parse http message let mut has_upgrade = false; diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index 5c32de3aa..c27a4c44a 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -8,7 +8,7 @@ use tokio_io::AsyncWrite; use super::helpers; use super::output::{Output, ResponseInfo, ResponseLength}; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::Request; use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE}; use body::{Binary, Body}; @@ -37,11 +37,11 @@ pub(crate) struct H1Writer { headers_size: u32, buffer: Output, buffer_capacity: usize, - settings: WorkerSettings, + settings: ServiceConfig, } impl H1Writer { - pub fn new(stream: T, settings: WorkerSettings) -> H1Writer { + pub fn new(stream: T, settings: ServiceConfig) -> H1Writer { H1Writer { flags: Flags::KEEPALIVE, written: 0, diff --git a/src/server/h2.rs b/src/server/h2.rs index 589e77c2d..312b51df9 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -22,7 +22,7 @@ use uri::Url; use super::error::{HttpDispatchError, ServerError}; use super::h2writer::H2Writer; use super::input::PayloadType; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::{HttpHandler, HttpHandlerTask, IoStream, Writer}; bitflags! { @@ -38,7 +38,7 @@ where H: HttpHandler + 'static, { flags: Flags, - settings: WorkerSettings, + settings: ServiceConfig, addr: Option, state: State>, tasks: VecDeque>, @@ -58,7 +58,7 @@ where H: HttpHandler + 'static, { pub fn new( - settings: WorkerSettings, io: T, addr: Option, buf: Bytes, + settings: ServiceConfig, io: T, addr: Option, buf: Bytes, keepalive_timer: Option, ) -> Self { let extensions = io.extensions(); @@ -82,7 +82,7 @@ where self.keepalive_timer.take(); } - pub fn settings(&self) -> &WorkerSettings { + pub fn settings(&self) -> &ServiceConfig { &self.settings } @@ -338,7 +338,7 @@ struct Entry { impl Entry { fn new( parts: Parts, recv: RecvStream, resp: SendResponse, - addr: Option, settings: WorkerSettings, + addr: Option, settings: ServiceConfig, extensions: Option>, ) -> Entry where diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index 0893b5b62..51d4dce6f 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -14,7 +14,7 @@ use modhttp::Response; use super::helpers; use super::message::Request; use super::output::{Output, ResponseInfo, ResponseLength}; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE}; use body::{Binary, Body}; use header::ContentEncoding; @@ -42,13 +42,11 @@ pub(crate) struct H2Writer { written: u64, buffer: Output, buffer_capacity: usize, - settings: WorkerSettings, + settings: ServiceConfig, } impl H2Writer { - pub fn new( - respond: SendResponse, settings: WorkerSettings, - ) -> H2Writer { + pub fn new(respond: SendResponse, settings: ServiceConfig) -> H2Writer { H2Writer { stream: None, flags: Flags::empty(), diff --git a/src/server/incoming.rs b/src/server/incoming.rs index c4e984b9d..f2bc1d8f5 100644 --- a/src/server/incoming.rs +++ b/src/server/incoming.rs @@ -8,7 +8,7 @@ use tokio_io::{AsyncRead, AsyncWrite}; use super::channel::{HttpChannel, WrapperStream}; use super::handler::{HttpHandler, IntoHttpHandler}; use super::http::HttpServer; -use super::settings::{ServerSettings, WorkerSettings}; +use super::settings::{ServerSettings, ServiceConfig}; impl Message for WrapperStream { type Result = (); @@ -32,7 +32,7 @@ where // set server settings let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); let apps = (self.factory)().into_handler(); - let settings = WorkerSettings::new( + let settings = ServiceConfig::new( apps, self.keep_alive, self.client_timeout, @@ -49,7 +49,7 @@ where } struct HttpIncoming { - settings: WorkerSettings, + settings: ServiceConfig, } impl Actor for HttpIncoming { diff --git a/src/server/mod.rs b/src/server/mod.rs index 456b46183..d6e9f26b1 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -143,7 +143,7 @@ pub use self::message::Request; pub use self::ssl::*; pub use self::error::{AcceptorError, HttpDispatchError}; -pub use self::settings::{ServerSettings, WorkerSettings, WorkerSettingsBuilder}; +pub use self::settings::{ServerSettings, ServiceConfig, ServiceConfigBuilder}; #[doc(hidden)] pub use self::service::{HttpService, StreamConfiguration}; diff --git a/src/server/service.rs b/src/server/service.rs index 231ac599e..ec71a1f1f 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -8,7 +8,7 @@ use futures::{Async, Poll}; use super::channel::HttpChannel; use super::error::HttpDispatchError; use super::handler::HttpHandler; -use super::settings::WorkerSettings; +use super::settings::ServiceConfig; use super::IoStream; /// `NewService` implementation for HTTP1/HTTP2 transports @@ -17,7 +17,7 @@ where H: HttpHandler, Io: IoStream, { - settings: WorkerSettings, + settings: ServiceConfig, _t: PhantomData, } @@ -27,7 +27,7 @@ where Io: IoStream, { /// Create new `HttpService` instance. - pub fn new(settings: WorkerSettings) -> Self { + pub fn new(settings: ServiceConfig) -> Self { HttpService { settings, _t: PhantomData, @@ -57,7 +57,7 @@ where H: HttpHandler, Io: IoStream, { - settings: WorkerSettings, + settings: ServiceConfig, _t: PhantomData, } @@ -66,7 +66,7 @@ where H: HttpHandler, Io: IoStream, { - fn new(settings: WorkerSettings) -> HttpServiceHandler { + fn new(settings: ServiceConfig) -> HttpServiceHandler { HttpServiceHandler { settings, _t: PhantomData, @@ -103,6 +103,12 @@ pub struct StreamConfiguration { _t: PhantomData<(T, E)>, } +impl Default for StreamConfiguration { + fn default() -> Self { + Self::new() + } +} + impl StreamConfiguration { /// Create new `StreamConfigurationService` instance. pub fn new() -> Self { @@ -136,8 +142,8 @@ impl NewService for StreamConfiguration { fn new_service(&self) -> Self::Future { ok(StreamConfigurationService { - no_delay: self.no_delay.clone(), - tcp_ka: self.tcp_ka.clone(), + no_delay: self.no_delay, + tcp_ka: self.tcp_ka, _t: PhantomData, }) } diff --git a/src/server/settings.rs b/src/server/settings.rs index 2f306073c..3798fae50 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -127,7 +127,8 @@ impl ServerSettings { // "Sun, 06 Nov 1994 08:49:37 GMT".len() const DATE_VALUE_LENGTH: usize = 29; -pub struct WorkerSettings(Rc>); +/// Http service configuration +pub struct ServiceConfig(Rc>); struct Inner { handler: H, @@ -141,18 +142,18 @@ struct Inner { date: UnsafeCell<(bool, Date)>, } -impl Clone for WorkerSettings { +impl Clone for ServiceConfig { fn clone(&self) -> Self { - WorkerSettings(self.0.clone()) + ServiceConfig(self.0.clone()) } } -impl WorkerSettings { - /// Create instance of `WorkerSettings` +impl ServiceConfig { + /// Create instance of `ServiceConfig` pub(crate) fn new( handler: H, keep_alive: KeepAlive, client_timeout: u64, client_shutdown: u64, settings: ServerSettings, - ) -> WorkerSettings { + ) -> ServiceConfig { let (keep_alive, ka_enabled) = match keep_alive { KeepAlive::Timeout(val) => (val as u64, true), KeepAlive::Os | KeepAlive::Tcp(_) => (0, true), @@ -164,7 +165,7 @@ impl WorkerSettings { None }; - WorkerSettings(Rc::new(Inner { + ServiceConfig(Rc::new(Inner { handler, keep_alive, ka_enabled, @@ -178,8 +179,8 @@ impl WorkerSettings { } /// Create worker settings builder. - pub fn build(handler: H) -> WorkerSettingsBuilder { - WorkerSettingsBuilder::new(handler) + pub fn build(handler: H) -> ServiceConfigBuilder { + ServiceConfigBuilder::new(handler) } pub(crate) fn head(&self) -> RefMut> { @@ -220,7 +221,7 @@ impl WorkerSettings { } } -impl WorkerSettings { +impl ServiceConfig { #[inline] /// Client timeout for first request. pub fn client_timer(&self) -> Option { @@ -319,11 +320,11 @@ impl WorkerSettings { } } -/// An worker settings builder +/// A service config builder /// -/// This type can be used to construct an instance of `WorkerSettings` through a +/// This type can be used to construct an instance of `ServiceConfig` through a /// builder-like pattern. -pub struct WorkerSettingsBuilder { +pub struct ServiceConfigBuilder { handler: H, keep_alive: KeepAlive, client_timeout: u64, @@ -333,10 +334,10 @@ pub struct WorkerSettingsBuilder { secure: bool, } -impl WorkerSettingsBuilder { - /// Create instance of `WorkerSettingsBuilder` - pub fn new(handler: H) -> WorkerSettingsBuilder { - WorkerSettingsBuilder { +impl ServiceConfigBuilder { + /// Create instance of `ServiceConfigBuilder` + pub fn new(handler: H) -> ServiceConfigBuilder { + ServiceConfigBuilder { handler, keep_alive: KeepAlive::Timeout(5), client_timeout: 5000, @@ -419,12 +420,12 @@ impl WorkerSettingsBuilder { self } - /// Finish worker settings configuration and create `WorkerSettings` object. - pub fn finish(self) -> WorkerSettings { + /// Finish service configuration and create `ServiceConfig` object. + pub fn finish(self) -> ServiceConfig { let settings = ServerSettings::new(self.addr, &self.host, self.secure); let client_shutdown = if self.secure { self.client_shutdown } else { 0 }; - WorkerSettings::new( + ServiceConfig::new( self.handler, self.keep_alive, self.client_timeout, @@ -507,7 +508,7 @@ mod tests { let mut rt = current_thread::Runtime::new().unwrap(); let _ = rt.block_on(future::lazy(|| { - let settings = WorkerSettings::<()>::new( + let settings = ServiceConfig::<()>::new( (), KeepAlive::Os, 0, diff --git a/tests/test_server.rs b/tests/test_server.rs index 240a5ddc0..8d9a400d8 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1228,7 +1228,7 @@ fn test_custom_pipeline() { use actix::System; use actix_net::service::NewServiceExt; use actix_web::server::{ - HttpService, KeepAlive, StreamConfiguration, WorkerSettings, + HttpService, KeepAlive, ServiceConfig, StreamConfiguration, }; let addr = test::TestServer::unused_addr(); @@ -1239,7 +1239,7 @@ fn test_custom_pipeline() { let app = App::new() .route("/", http::Method::GET, |_: HttpRequest| "OK") .finish(); - let settings = WorkerSettings::build(app) + let settings = ServiceConfig::build(app) .keep_alive(KeepAlive::Disabled) .client_timeout(1000) .client_shutdown(1000) From 2710f70e394700c58dbf1951d19bd0b249fbf279 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 17:30:29 -0700 Subject: [PATCH 171/219] add H1 transport --- src/server/channel.rs | 85 ++++++++++++++++++++++++++++------ src/server/h1.rs | 13 ++++-- src/server/h2.rs | 4 +- src/server/incoming.rs | 4 +- src/server/mod.rs | 20 ++++++-- src/server/service.rs | 87 ++++++++++++++++++++++++++++++++++- src/server/ssl/nativetls.rs | 7 ++- src/server/ssl/openssl.rs | 7 ++- src/server/ssl/rustls.rs | 7 ++- tests/test_custom_pipeline.rs | 81 ++++++++++++++++++++++++++++++++ tests/test_server.rs | 43 ----------------- 11 files changed, 284 insertions(+), 74 deletions(-) create mode 100644 tests/test_custom_pipeline.rs diff --git a/src/server/channel.rs b/src/server/channel.rs index 513601ac9..cbbe1a95e 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -1,4 +1,4 @@ -use std::net::{Shutdown, SocketAddr}; +use std::net::Shutdown; use std::{io, mem, time}; use bytes::{Buf, BufMut, BytesMut}; @@ -16,7 +16,7 @@ const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; pub(crate) enum HttpProtocol { H1(h1::Http1Dispatcher), H2(h2::Http2), - Unknown(ServiceConfig, Option, T, BytesMut), + Unknown(ServiceConfig, T, BytesMut), None, } @@ -29,7 +29,7 @@ impl HttpProtocol { let _ = IoStream::shutdown(io, Shutdown::Both); } HttpProtocol::H2(ref mut h2) => h2.shutdown(), - HttpProtocol::Unknown(_, _, io, _) => { + HttpProtocol::Unknown(_, io, _) => { let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); let _ = IoStream::shutdown(io, Shutdown::Both); } @@ -59,9 +59,7 @@ where T: IoStream, H: HttpHandler + 'static, { - pub(crate) fn new( - settings: ServiceConfig, io: T, peer: Option, - ) -> HttpChannel { + pub(crate) fn new(settings: ServiceConfig, io: T) -> HttpChannel { let ka_timeout = settings.client_timer(); HttpChannel { @@ -69,7 +67,6 @@ where node_reg: false, node: Node::new(HttpProtocol::Unknown( settings, - peer, io, BytesMut::with_capacity(8192), )), @@ -102,7 +99,7 @@ where Ok(Async::Ready(_)) => { trace!("Slow request timed out, close connection"); let proto = mem::replace(self.node.get_mut(), HttpProtocol::None); - if let HttpProtocol::Unknown(settings, _, io, buf) = proto { + if let HttpProtocol::Unknown(settings, io, buf) = proto { *self.node.get_mut() = HttpProtocol::H1(h1::Http1Dispatcher::for_error( settings, @@ -125,7 +122,7 @@ where let settings = match self.node.get_mut() { HttpProtocol::H1(ref mut h1) => h1.settings().clone(), HttpProtocol::H2(ref mut h2) => h2.settings().clone(), - HttpProtocol::Unknown(ref mut settings, _, _, _) => settings.clone(), + HttpProtocol::Unknown(ref mut settings, _, _) => settings.clone(), HttpProtocol::None => unreachable!(), }; settings.head().insert(&mut self.node); @@ -135,7 +132,7 @@ where let kind = match self.node.get_mut() { HttpProtocol::H1(ref mut h1) => return h1.poll(), HttpProtocol::H2(ref mut h2) => return h2.poll(), - HttpProtocol::Unknown(_, _, ref mut io, ref mut buf) => { + HttpProtocol::Unknown(_, ref mut io, ref mut buf) => { let mut err = None; let mut disconnect = false; match io.read_available(buf) { @@ -173,13 +170,12 @@ where // upgrade to specific http protocol let proto = mem::replace(self.node.get_mut(), HttpProtocol::None); - if let HttpProtocol::Unknown(settings, addr, io, buf) = proto { + if let HttpProtocol::Unknown(settings, io, buf) = proto { match kind { ProtocolKind::Http1 => { *self.node.get_mut() = HttpProtocol::H1(h1::Http1Dispatcher::new( settings, io, - addr, buf, is_eof, self.ka_timeout.take(), @@ -190,7 +186,6 @@ where *self.node.get_mut() = HttpProtocol::H2(h2::Http2::new( settings, io, - addr, buf.freeze(), self.ka_timeout.take(), )); @@ -202,6 +197,70 @@ where } } +#[doc(hidden)] +pub struct H1Channel +where + T: IoStream, + H: HttpHandler + 'static, +{ + node: Node>, + node_reg: bool, +} + +impl H1Channel +where + T: IoStream, + H: HttpHandler + 'static, +{ + pub(crate) fn new(settings: ServiceConfig, io: T) -> H1Channel { + H1Channel { + node_reg: false, + node: Node::new(HttpProtocol::H1(h1::Http1Dispatcher::new( + settings, + io, + BytesMut::with_capacity(8192), + false, + None, + ))), + } + } +} + +impl Drop for H1Channel +where + T: IoStream, + H: HttpHandler + 'static, +{ + fn drop(&mut self) { + self.node.remove(); + } +} + +impl Future for H1Channel +where + T: IoStream, + H: HttpHandler + 'static, +{ + type Item = (); + type Error = HttpDispatchError; + + fn poll(&mut self) -> Poll { + if !self.node_reg { + self.node_reg = true; + let settings = match self.node.get_mut() { + HttpProtocol::H1(ref mut h1) => h1.settings().clone(), + _ => unreachable!(), + }; + settings.head().insert(&mut self.node); + } + + match self.node.get_mut() { + HttpProtocol::H1(ref mut h1) => h1.poll(), + _ => unreachable!(), + } + } +} + pub(crate) struct Node { next: Option<*mut Node>, prev: Option<*mut Node>, diff --git a/src/server/h1.rs b/src/server/h1.rs index 53c4e2cf5..7a59b6496 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -87,9 +87,10 @@ where H: HttpHandler + 'static, { pub fn new( - settings: ServiceConfig, stream: T, addr: Option, buf: BytesMut, - is_eof: bool, keepalive_timer: Option, + settings: ServiceConfig, stream: T, buf: BytesMut, is_eof: bool, + keepalive_timer: Option, ) -> Self { + let addr = stream.peer_addr(); let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer { (delay.deadline(), Some(delay)) } else if let Some(delay) = settings.keep_alive_timer() { @@ -107,12 +108,12 @@ where }; Http1Dispatcher { - flags, stream: H1Writer::new(stream, settings.clone()), decoder: H1Decoder::new(), payload: None, tasks: VecDeque::new(), error: None, + flags, addr, buf, settings, @@ -337,9 +338,11 @@ where /// read data from the stream pub(self) fn poll_io(&mut self) -> Result { if !self.flags.contains(Flags::POLLED) { - let updated = self.parse()?; self.flags.insert(Flags::POLLED); - return Ok(updated); + if !self.buf.is_empty() { + let updated = self.parse()?; + return Ok(updated); + } } // read io from socket diff --git a/src/server/h2.rs b/src/server/h2.rs index 312b51df9..2fe2fa073 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -58,9 +58,9 @@ where H: HttpHandler + 'static, { pub fn new( - settings: ServiceConfig, io: T, addr: Option, buf: Bytes, - keepalive_timer: Option, + settings: ServiceConfig, io: T, buf: Bytes, keepalive_timer: Option, ) -> Self { + let addr = io.peer_addr(); let extensions = io.extensions(); Http2 { flags: Flags::empty(), diff --git a/src/server/incoming.rs b/src/server/incoming.rs index f2bc1d8f5..b13bba2a7 100644 --- a/src/server/incoming.rs +++ b/src/server/incoming.rs @@ -64,8 +64,6 @@ where type Result = (); fn handle(&mut self, msg: WrapperStream, _: &mut Context) -> Self::Result { - Arbiter::spawn( - HttpChannel::new(self.settings.clone(), msg, None).map_err(|_| ()), - ); + Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg).map_err(|_| ())); } } diff --git a/src/server/mod.rs b/src/server/mod.rs index d6e9f26b1..c942ff91f 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -106,7 +106,7 @@ //! let _ = sys.run(); //!} //! ``` -use std::net::Shutdown; +use std::net::{Shutdown, SocketAddr}; use std::rc::Rc; use std::{io, time}; @@ -143,10 +143,13 @@ pub use self::message::Request; pub use self::ssl::*; pub use self::error::{AcceptorError, HttpDispatchError}; -pub use self::settings::{ServerSettings, ServiceConfig, ServiceConfigBuilder}; +pub use self::settings::ServerSettings; #[doc(hidden)] -pub use self::service::{HttpService, StreamConfiguration}; +pub use self::settings::{ServiceConfig, ServiceConfigBuilder}; + +#[doc(hidden)] +pub use self::service::{H1Service, HttpService, StreamConfiguration}; #[doc(hidden)] pub use self::helpers::write_content_length; @@ -266,6 +269,12 @@ pub trait Writer { pub trait IoStream: AsyncRead + AsyncWrite + 'static { fn shutdown(&mut self, how: Shutdown) -> io::Result<()>; + /// Returns the socket address of the remote peer of this TCP connection. + fn peer_addr(&self) -> Option { + None + } + + /// Sets the value of the TCP_NODELAY option on this socket. fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>; fn set_linger(&mut self, dur: Option) -> io::Result<()>; @@ -341,6 +350,11 @@ impl IoStream for TcpStream { TcpStream::shutdown(self, how) } + #[inline] + fn peer_addr(&self) -> Option { + TcpStream::peer_addr(self).ok() + } + #[inline] fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { TcpStream::set_nodelay(self, nodelay) diff --git a/src/server/service.rs b/src/server/service.rs index ec71a1f1f..e3402e305 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -5,7 +5,7 @@ use actix_net::service::{NewService, Service}; use futures::future::{ok, FutureResult}; use futures::{Async, Poll}; -use super::channel::HttpChannel; +use super::channel::{H1Channel, HttpChannel}; use super::error::HttpDispatchError; use super::handler::HttpHandler; use super::settings::ServiceConfig; @@ -89,7 +89,90 @@ where } fn call(&mut self, req: Self::Request) -> Self::Future { - HttpChannel::new(self.settings.clone(), req, None) + HttpChannel::new(self.settings.clone(), req) + } +} + +/// `NewService` implementation for HTTP1 transport +pub struct H1Service +where + H: HttpHandler, + Io: IoStream, +{ + settings: ServiceConfig, + _t: PhantomData, +} + +impl H1Service +where + H: HttpHandler, + Io: IoStream, +{ + /// Create new `HttpService` instance. + pub fn new(settings: ServiceConfig) -> Self { + H1Service { + settings, + _t: PhantomData, + } + } +} + +impl NewService for H1Service +where + H: HttpHandler, + Io: IoStream, +{ + type Request = Io; + type Response = (); + type Error = HttpDispatchError; + type InitError = (); + type Service = H1ServiceHandler; + type Future = FutureResult; + + fn new_service(&self) -> Self::Future { + ok(H1ServiceHandler::new(self.settings.clone())) + } +} + +/// `Service` implementation for HTTP1 transport +pub struct H1ServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ + settings: ServiceConfig, + _t: PhantomData, +} + +impl H1ServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ + fn new(settings: ServiceConfig) -> H1ServiceHandler { + H1ServiceHandler { + settings, + _t: PhantomData, + } + } +} + +impl Service for H1ServiceHandler +where + H: HttpHandler, + Io: IoStream, +{ + type Request = Io; + type Response = (); + type Error = HttpDispatchError; + type Future = H1Channel; + + fn poll_ready(&mut self) -> Poll<(), Self::Error> { + Ok(Async::Ready(())) + } + + fn call(&mut self, req: Self::Request) -> Self::Future { + H1Channel::new(self.settings.clone(), req) } } diff --git a/src/server/ssl/nativetls.rs b/src/server/ssl/nativetls.rs index e56b4521b..a9797ffb3 100644 --- a/src/server/ssl/nativetls.rs +++ b/src/server/ssl/nativetls.rs @@ -1,4 +1,4 @@ -use std::net::Shutdown; +use std::net::{Shutdown, SocketAddr}; use std::{io, time}; use actix_net::ssl::TlsStream; @@ -12,6 +12,11 @@ impl IoStream for TlsStream { Ok(()) } + #[inline] + fn peer_addr(&self) -> Option { + self.get_ref().get_ref().peer_addr() + } + #[inline] fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { self.get_mut().get_mut().set_nodelay(nodelay) diff --git a/src/server/ssl/openssl.rs b/src/server/ssl/openssl.rs index f9e0e1774..9d370f8be 100644 --- a/src/server/ssl/openssl.rs +++ b/src/server/ssl/openssl.rs @@ -1,4 +1,4 @@ -use std::net::Shutdown; +use std::net::{Shutdown, SocketAddr}; use std::{io, time}; use actix_net::ssl; @@ -65,6 +65,11 @@ impl IoStream for SslStream { Ok(()) } + #[inline] + fn peer_addr(&self) -> Option { + self.get_ref().get_ref().peer_addr() + } + #[inline] fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { self.get_mut().get_mut().set_nodelay(nodelay) diff --git a/src/server/ssl/rustls.rs b/src/server/ssl/rustls.rs index df78d1dc6..a53a53a98 100644 --- a/src/server/ssl/rustls.rs +++ b/src/server/ssl/rustls.rs @@ -1,4 +1,4 @@ -use std::net::Shutdown; +use std::net::{Shutdown, SocketAddr}; use std::{io, time}; use actix_net::ssl; //::RustlsAcceptor; @@ -65,6 +65,11 @@ impl IoStream for TlsStream { Ok(()) } + #[inline] + fn peer_addr(&self) -> Option { + self.get_ref().0.peer_addr() + } + #[inline] fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { self.get_mut().0.set_nodelay(nodelay) diff --git a/tests/test_custom_pipeline.rs b/tests/test_custom_pipeline.rs new file mode 100644 index 000000000..cf1eeb5bf --- /dev/null +++ b/tests/test_custom_pipeline.rs @@ -0,0 +1,81 @@ +extern crate actix; +extern crate actix_net; +extern crate actix_web; + +use std::{thread, time}; + +use actix::System; +use actix_net::server::Server; +use actix_net::service::NewServiceExt; +use actix_web::server::{HttpService, KeepAlive, ServiceConfig, StreamConfiguration}; +use actix_web::{client, test, App, HttpRequest}; + +#[test] +fn test_custom_pipeline() { + let addr = test::TestServer::unused_addr(); + + thread::spawn(move || { + Server::new() + .bind("test", addr, move || { + let app = App::new() + .route("/", http::Method::GET, |_: HttpRequest| "OK") + .finish(); + let settings = ServiceConfig::build(app) + .keep_alive(KeepAlive::Disabled) + .client_timeout(1000) + .client_shutdown(1000) + .server_hostname("localhost") + .server_address(addr) + .finish(); + + StreamConfiguration::new() + .nodelay(true) + .tcp_keepalive(Some(time::Duration::from_secs(10))) + .and_then(HttpService::new(settings)) + }).unwrap() + .run(); + }); + + let mut sys = System::new("test"); + { + let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) + .finish() + .unwrap(); + let response = sys.block_on(req.send()).unwrap(); + assert!(response.status().is_success()); + } +} + +#[test] +fn test_h1() { + use actix_web::server::H1Service; + + let addr = test::TestServer::unused_addr(); + thread::spawn(move || { + Server::new() + .bind("test", addr, move || { + let app = App::new() + .route("/", http::Method::GET, |_: HttpRequest| "OK") + .finish(); + let settings = ServiceConfig::build(app) + .keep_alive(KeepAlive::Disabled) + .client_timeout(1000) + .client_shutdown(1000) + .server_hostname("localhost") + .server_address(addr) + .finish(); + + H1Service::new(settings) + }).unwrap() + .run(); + }); + + let mut sys = System::new("test"); + { + let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) + .finish() + .unwrap(); + let response = sys.block_on(req.send()).unwrap(); + assert!(response.status().is_success()); + } +} diff --git a/tests/test_server.rs b/tests/test_server.rs index 8d9a400d8..477d3e64b 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -26,7 +26,6 @@ use std::io::{Read, Write}; use std::sync::Arc; use std::{thread, time}; -use actix_net::server::Server; #[cfg(feature = "brotli")] use brotli2::write::{BrotliDecoder, BrotliEncoder}; use bytes::{Bytes, BytesMut}; @@ -1223,48 +1222,6 @@ fn test_server_cookies() { } } -#[test] -fn test_custom_pipeline() { - use actix::System; - use actix_net::service::NewServiceExt; - use actix_web::server::{ - HttpService, KeepAlive, ServiceConfig, StreamConfiguration, - }; - - let addr = test::TestServer::unused_addr(); - - thread::spawn(move || { - Server::new() - .bind("test", addr, move || { - let app = App::new() - .route("/", http::Method::GET, |_: HttpRequest| "OK") - .finish(); - let settings = ServiceConfig::build(app) - .keep_alive(KeepAlive::Disabled) - .client_timeout(1000) - .client_shutdown(1000) - .server_hostname("localhost") - .server_address(addr) - .finish(); - - StreamConfiguration::new() - .nodelay(true) - .tcp_keepalive(Some(time::Duration::from_secs(10))) - .and_then(HttpService::new(settings)) - }).unwrap() - .run(); - }); - - let mut sys = System::new("test"); - { - let req = client::ClientRequest::get(format!("http://{}/", addr).as_str()) - .finish() - .unwrap(); - let response = sys.block_on(req.send()).unwrap(); - assert!(response.status().is_success()); - } -} - #[test] fn test_slow_request() { use actix::System; From 1f68ce85410a57d323297502559a46e912eaf4d5 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 19:05:58 -0700 Subject: [PATCH 172/219] fix tests --- src/server/h1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 7a59b6496..4fb730f71 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -733,7 +733,7 @@ mod tests { let settings = wrk_settings(); let mut h1 = - Http1Dispatcher::new(settings.clone(), buf, None, readbuf, false, None); + Http1Dispatcher::new(settings.clone(), buf, readbuf, false, None); assert!(h1.poll_io().is_ok()); assert!(h1.poll_io().is_ok()); assert!(h1.flags.contains(Flags::READ_DISCONNECTED)); From bbcd618304e7bee84413fbb74df70910e21b41ca Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 19:12:08 -0700 Subject: [PATCH 173/219] export AcceptorTimeout --- src/server/acceptor.rs | 12 ++++++++---- src/server/mod.rs | 3 +++ src/server/settings.rs | 4 ++-- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index 79d133d2d..f66e51dbe 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -172,10 +172,11 @@ where } } +#[doc(hidden)] /// Acceptor timeout middleware /// /// Applies timeout to request prcoessing. -pub(crate) struct AcceptorTimeout { +pub struct AcceptorTimeout { inner: T, timeout: Duration, } @@ -206,7 +207,7 @@ impl NewService for AcceptorTimeout { } #[doc(hidden)] -pub(crate) struct AcceptorTimeoutFut { +pub struct AcceptorTimeoutFut { fut: T::Future, timeout: Duration, } @@ -224,10 +225,11 @@ impl Future for AcceptorTimeoutFut { } } +#[doc(hidden)] /// Acceptor timeout service /// /// Applies timeout to request prcoessing. -pub(crate) struct AcceptorTimeoutService { +pub struct AcceptorTimeoutService { inner: T, timeout: Duration, } @@ -250,10 +252,12 @@ impl Service for AcceptorTimeoutService { } } -pub(crate) struct AcceptorTimeoutResponse { +#[doc(hidden)] +pub struct AcceptorTimeoutResponse { fut: T::Future, sleep: Delay, } + impl Future for AcceptorTimeoutResponse { type Item = T::Response; type Error = AcceptorError; diff --git a/src/server/mod.rs b/src/server/mod.rs index c942ff91f..3277dba5a 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -145,6 +145,9 @@ pub use self::ssl::*; pub use self::error::{AcceptorError, HttpDispatchError}; pub use self::settings::ServerSettings; +#[doc(hidden)] +pub use self::acceptor::AcceptorTimeout; + #[doc(hidden)] pub use self::settings::{ServiceConfig, ServiceConfigBuilder}; diff --git a/src/server/settings.rs b/src/server/settings.rs index 3798fae50..9b27ed5e5 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -282,7 +282,7 @@ impl ServiceConfig { // periodic date update let s = self.clone(); - spawn(sleep(Duration::from_secs(1)).then(move |_| { + spawn(sleep(Duration::from_millis(500)).then(move |_| { s.update_date(); future::ok(()) })); @@ -310,7 +310,7 @@ impl ServiceConfig { // periodic date update let s = self.clone(); - spawn(sleep(Duration::from_secs(1)).then(move |_| { + spawn(sleep(Duration::from_millis(500)).then(move |_| { s.update_date(); future::ok(()) })); From 401ea574c03161ea0c9d1d935915381272c4d9aa Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 19:31:30 -0700 Subject: [PATCH 174/219] make AcceptorTimeout::new public --- src/server/acceptor.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index f66e51dbe..2e1b1f283 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -182,7 +182,8 @@ pub struct AcceptorTimeout { } impl AcceptorTimeout { - pub(crate) fn new(timeout: u64, inner: T) -> Self { + /// Create new `AcceptorTimeout` instance. timeout is in milliseconds. + pub fn new(timeout: u64, inner: T) -> Self { Self { inner, timeout: Duration::from_millis(timeout), From b0677aa0290adc94dbcf5da7ee4ae2ac35c08548 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 19:42:24 -0700 Subject: [PATCH 175/219] fix stable compatibility --- tests/test_custom_pipeline.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_custom_pipeline.rs b/tests/test_custom_pipeline.rs index cf1eeb5bf..6b5df00e3 100644 --- a/tests/test_custom_pipeline.rs +++ b/tests/test_custom_pipeline.rs @@ -8,7 +8,7 @@ use actix::System; use actix_net::server::Server; use actix_net::service::NewServiceExt; use actix_web::server::{HttpService, KeepAlive, ServiceConfig, StreamConfiguration}; -use actix_web::{client, test, App, HttpRequest}; +use actix_web::{client, http, test, App, HttpRequest}; #[test] fn test_custom_pipeline() { From 49eea3bf76d82aa1b4f31a6efb6dcf803f6623de Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 20:22:51 -0700 Subject: [PATCH 176/219] travis config --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 59f6a8549..62867e030 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,7 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin - cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml + RUST_BACKTRACE=1 cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage" fi From 1e1a4f846e0f3a109b168bfb660ae781697688eb Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 2 Oct 2018 22:23:51 -0700 Subject: [PATCH 177/219] use actix-net cell features --- Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 46719d709..12f98ac37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ name = "actix_web" path = "src/lib.rs" [features] -default = ["session", "brotli", "flate2-c"] +default = ["session", "brotli", "flate2-c", "cell"] # tls tls = ["native-tls", "tokio-tls", "actix-net/tls"] @@ -58,6 +58,8 @@ flate2-c = ["flate2/miniz-sys"] # rust backend for flate2 crate flate2-rust = ["flate2/rust_backend"] +cell = ["actix-net/cell"] + [dependencies] actix = "0.7.0" actix-net = { git="https://github.com/actix/actix-net.git" } From 7ae5a43877966cb335a1a03b128f7cc6caefac51 Mon Sep 17 00:00:00 2001 From: lzx <40080252+lzxZz@users.noreply.github.com> Date: Sat, 6 Oct 2018 13:16:12 +0800 Subject: [PATCH 178/219] httpresponse.rs doc fix (#534) --- src/httpresponse.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/httpresponse.rs b/src/httpresponse.rs index 59815c58c..8b091d42e 100644 --- a/src/httpresponse.rs +++ b/src/httpresponse.rs @@ -272,7 +272,7 @@ impl HttpResponse { self.get_mut().response_size = size; } - /// Set write buffer capacity + /// Get write buffer capacity pub fn write_buffer_capacity(&self) -> usize { self.get_ref().write_capacity } From 10678a22af6a138a6106ffc3dccd281625bf7c4b Mon Sep 17 00:00:00 2001 From: Danil Berestov Date: Sat, 6 Oct 2018 13:17:20 +0800 Subject: [PATCH 179/219] test content length (#532) --- tests/test_server.rs | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/tests/test_server.rs b/tests/test_server.rs index 477d3e64b..cb19cfed0 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1355,3 +1355,47 @@ fn test_ssl_handshake_timeout() { let _ = sys.stop(); } + +#[test] +fn test_content_length() { + use http::StatusCode; + use actix_web::http::header::{HeaderName, HeaderValue}; + + let mut srv = test::TestServer::new(move |app| { + app.resource("/{status}", |r| { + r.f(|req: &HttpRequest| { + let indx: usize = + req.match_info().get("status").unwrap().parse().unwrap(); + let statuses = [ + StatusCode::NO_CONTENT, + StatusCode::CONTINUE, + StatusCode::SWITCHING_PROTOCOLS, + StatusCode::PROCESSING, + StatusCode::OK, + StatusCode::NOT_FOUND, + ]; + HttpResponse::new(statuses[indx]) + }) + }); + }); + + let addr = srv.addr(); + let mut get_resp = |i| { + let url = format!("http://{}/{}", addr, i); + let req = srv.get().uri(url).finish().unwrap(); + srv.execute(req.send()).unwrap() + }; + + let header = HeaderName::from_static("content-length"); + let value = HeaderValue::from_static("0"); + + for i in 0..4 { + let response = get_resp(i); + assert_eq!(response.headers().get(&header), None); + } + for i in 4..6 { + let response = get_resp(i); + assert_eq!(response.headers().get(&header), Some(&value)); + } +} + From cfad5bf1f343a472f8dbaf9c6dba525c59cb19b9 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 8 Oct 2018 07:47:42 -0700 Subject: [PATCH 180/219] enable slow request timeout for h2 dispatcher --- src/server/h1.rs | 27 +++++------- src/server/h2.rs | 106 +++++++++++++++++++++++++++------------------ src/server/http.rs | 5 --- src/server/mod.rs | 14 ++---- 4 files changed, 78 insertions(+), 74 deletions(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 4fb730f71..0fb72ef7e 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -203,7 +203,7 @@ where #[inline] pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { // check connection keep-alive - self.poll_keep_alive()?; + self.poll_keepalive()?; // shutdown if self.flags.contains(Flags::SHUTDOWN) { @@ -277,23 +277,21 @@ where } /// keep-alive timer. returns `true` is keep-alive, otherwise drop - fn poll_keep_alive(&mut self) -> Result<(), HttpDispatchError> { + fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> { if let Some(ref mut timer) = self.ka_timer { match timer.poll() { Ok(Async::Ready(_)) => { + // if we get timer during shutdown, just drop connection + if self.flags.contains(Flags::SHUTDOWN) { + let io = self.stream.get_mut(); + let _ = IoStream::set_linger(io, Some(Duration::from_secs(0))); + let _ = IoStream::shutdown(io, Shutdown::Both); + return Err(HttpDispatchError::ShutdownTimeout); + } if timer.deadline() >= self.ka_expire { // check for any outstanding request handling if self.tasks.is_empty() { - // if we get timer during shutdown, just drop connection - if self.flags.contains(Flags::SHUTDOWN) { - let io = self.stream.get_mut(); - let _ = IoStream::set_linger( - io, - Some(Duration::from_secs(0)), - ); - let _ = IoStream::shutdown(io, Shutdown::Both); - return Err(HttpDispatchError::ShutdownTimeout); - } else if !self.flags.contains(Flags::STARTED) { + if !self.flags.contains(Flags::STARTED) { // timeout on first request (slow request) return 408 trace!("Slow request timeout"); self.flags @@ -315,9 +313,8 @@ where return Ok(()); } } - } else if let Some(deadline) = self.settings.keep_alive_expire() - { - timer.reset(deadline) + } else if let Some(dl) = self.settings.keep_alive_expire() { + timer.reset(dl) } } else { timer.reset(self.ka_expire) diff --git a/src/server/h2.rs b/src/server/h2.rs index 2fe2fa073..6ad9af709 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -27,7 +27,8 @@ use super::{HttpHandler, HttpHandlerTask, IoStream, Writer}; bitflags! { struct Flags: u8 { - const DISCONNECTED = 0b0000_0010; + const DISCONNECTED = 0b0000_0001; + const SHUTDOWN = 0b0000_0010; } } @@ -42,8 +43,9 @@ where addr: Option, state: State>, tasks: VecDeque>, - keepalive_timer: Option, extensions: Option>, + ka_expire: Instant, + ka_timer: Option, } enum State { @@ -62,6 +64,16 @@ where ) -> Self { let addr = io.peer_addr(); let extensions = io.extensions(); + + // keep-alive timeout + let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer { + (delay.deadline(), Some(delay)) + } else if let Some(delay) = settings.keep_alive_timer() { + (delay.deadline(), Some(delay)) + } else { + (settings.now(), None) + }; + Http2 { flags: Flags::empty(), tasks: VecDeque::new(), @@ -72,14 +84,14 @@ where addr, settings, extensions, - keepalive_timer, + ka_expire, + ka_timer, } } pub(crate) fn shutdown(&mut self) { self.state = State::Empty; self.tasks.clear(); - self.keepalive_timer.take(); } pub fn settings(&self) -> &ServiceConfig { @@ -87,21 +99,16 @@ where } pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { + self.poll_keepalive()?; + // server if let State::Connection(ref mut conn) = self.state { - // keep-alive timer - if let Some(ref mut timeout) = self.keepalive_timer { - match timeout.poll() { - Ok(Async::Ready(_)) => { - trace!("Keep-alive timeout, close connection"); - return Ok(Async::Ready(())); - } - Ok(Async::NotReady) => (), - Err(_) => unreachable!(), - } - } - loop { + // shutdown connection + if self.flags.contains(Flags::SHUTDOWN) { + return conn.poll_close().map_err(|e| e.into()); + } + let mut not_ready = true; let disconnected = self.flags.contains(Flags::DISCONNECTED); @@ -216,8 +223,12 @@ where not_ready = false; let (parts, body) = req.into_parts(); - // stop keepalive timer - self.keepalive_timer.take(); + // update keep-alive expire + if self.ka_timer.is_some() { + if let Some(expire) = self.settings.keep_alive_expire() { + self.ka_expire = expire; + } + } self.tasks.push_back(Entry::new( parts, @@ -228,36 +239,14 @@ where self.extensions.clone(), )); } - Ok(Async::NotReady) => { - // start keep-alive timer - if self.tasks.is_empty() { - if self.settings.keep_alive_enabled() { - if self.keepalive_timer.is_none() { - if let Some(ka) = self.settings.keep_alive() { - trace!("Start keep-alive timer"); - let mut timeout = - Delay::new(Instant::now() + ka); - // register timeout - let _ = timeout.poll(); - self.keepalive_timer = Some(timeout); - } - } - } else { - // keep-alive disable, drop connection - return conn.poll_close().map_err(|e| e.into()); - } - } else { - // keep-alive unset, rely on operating system - return Ok(Async::NotReady); - } - } + Ok(Async::NotReady) => return Ok(Async::NotReady), Err(err) => { trace!("Connection error: {}", err); - self.flags.insert(Flags::DISCONNECTED); + self.flags.insert(Flags::SHUTDOWN); for entry in &mut self.tasks { entry.task.disconnected() } - self.keepalive_timer.take(); + continue; } } } @@ -289,6 +278,37 @@ where self.poll() } + + /// keep-alive timer. returns `true` is keep-alive, otherwise drop + fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> { + if let Some(ref mut timer) = self.ka_timer { + match timer.poll() { + Ok(Async::Ready(_)) => { + // if we get timer during shutdown, just drop connection + if self.flags.contains(Flags::SHUTDOWN) { + return Err(HttpDispatchError::ShutdownTimeout); + } + if timer.deadline() >= self.ka_expire { + // check for any outstanding request handling + if self.tasks.is_empty() { + return Err(HttpDispatchError::ShutdownTimeout); + } else if let Some(dl) = self.settings.keep_alive_expire() { + timer.reset(dl) + } + } else { + timer.reset(self.ka_expire) + } + } + Ok(Async::NotReady) => (), + Err(e) => { + error!("Timer error {:?}", e); + return Err(HttpDispatchError::Unknown); + } + } + } + + Ok(()) + } } bitflags! { diff --git a/src/server/http.rs b/src/server/http.rs index 6a7790c13..9ecd4a5d2 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -197,11 +197,6 @@ where } /// Disable `HTTP/2` support - // #[doc(hidden)] - // #[deprecated( - // since = "0.7.4", - // note = "please use acceptor service with proper ServerFlags parama" - // )] pub fn no_http2(mut self) -> Self { self.no_http2 = true; self diff --git a/src/server/mod.rs b/src/server/mod.rs index 3277dba5a..8d7195166 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -12,7 +12,7 @@ //! to serve incoming HTTP requests. //! //! As the server uses worker pool, the factory function is restricted to trait bounds -//! `Sync + Send + 'static` so that each worker would be able to accept Application +//! `Send + Clone + 'static` so that each worker would be able to accept Application //! without a need for synchronization. //! //! If you wish to share part of state among all workers you should @@ -29,13 +29,9 @@ //! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html) //! that describes how HTTP Server accepts connections. //! -//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts +//! For `bind` and `listen` there are corresponding `bind_ssl|tls|rustls` and `listen_ssl|tls|rustls` that accepts //! these services. //! -//! By default, acceptor would work with both HTTP2 and HTTP1 protocols. -//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which -//! can be supplied when creating `AcceptorService`. -//! //! **NOTE:** `native-tls` doesn't support `HTTP2` yet //! //! ## Signal handling and shutdown @@ -87,17 +83,13 @@ //! // load ssl keys //! let config = load_ssl(); //! -//! // Create acceptor service for only HTTP1 protocol -//! // You can use ::new(config) to leave defaults -//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1); -//! //! // create and start server at once //! server::new(|| { //! App::new() //! // register simple handler, handle all methods //! .resource("/index.html", |r| r.f(index)) //! })) -//! }).bind_with("127.0.0.1:8080", acceptor) +//! }).bind_rustls("127.0.0.1:8443", config) //! .unwrap() //! .start(); //! From 03d988b898ab976bdf04658209c35239b6c4b1e7 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 8 Oct 2018 10:16:19 -0700 Subject: [PATCH 181/219] refactor date rendering --- src/server/settings.rs | 66 ++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 41 deletions(-) diff --git a/src/server/settings.rs b/src/server/settings.rs index 9b27ed5e5..bafffb5f7 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -1,4 +1,4 @@ -use std::cell::{RefCell, RefMut, UnsafeCell}; +use std::cell::{Cell, RefCell, RefMut}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; @@ -139,7 +139,7 @@ struct Inner { bytes: Rc, messages: &'static RequestPool, node: RefCell>, - date: UnsafeCell<(bool, Date)>, + date: Cell>, } impl Clone for ServiceConfig { @@ -174,7 +174,7 @@ impl ServiceConfig { bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), node: RefCell::new(Node::head()), - date: UnsafeCell::new((false, Date::new())), + date: Cell::new(None), })) } @@ -214,11 +214,6 @@ impl ServiceConfig { pub(crate) fn get_request(&self) -> Request { RequestPool::get(self.0.messages) } - - fn update_date(&self) { - // Unsafe: WorkerSetting is !Sync and !Send - unsafe { (*self.0.date.get()).0 = false }; - } } impl ServiceConfig { @@ -272,51 +267,39 @@ impl ServiceConfig { } } - pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) { - // Unsafe: WorkerSetting is !Sync and !Send - let date_bytes = unsafe { - let date = &mut (*self.0.date.get()); - if !date.0 { - date.1.update(); - date.0 = true; + fn check_date(&self) { + if unsafe { &*self.0.date.as_ptr() }.is_none() { + self.0.date.set(Some(Date::new())); + + // periodic date update + let s = self.clone(); + spawn(sleep(Duration::from_millis(500)).then(move |_| { + s.0.date.set(None); + future::ok(()) + })); + } + } + + pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) { + self.check_date(); + + let date = &unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().bytes; - // periodic date update - let s = self.clone(); - spawn(sleep(Duration::from_millis(500)).then(move |_| { - s.update_date(); - future::ok(()) - })); - } - &date.1.bytes - }; if full { let mut buf: [u8; 39] = [0; 39]; buf[..6].copy_from_slice(b"date: "); - buf[6..35].copy_from_slice(date_bytes); + buf[6..35].copy_from_slice(date); buf[35..].copy_from_slice(b"\r\n\r\n"); dst.extend_from_slice(&buf); } else { - dst.extend_from_slice(date_bytes); + dst.extend_from_slice(date); } } #[inline] pub(crate) fn now(&self) -> Instant { - unsafe { - let date = &mut (*self.0.date.get()); - if !date.0 { - date.1.update(); - date.0 = true; - - // periodic date update - let s = self.clone(); - spawn(sleep(Duration::from_millis(500)).then(move |_| { - s.update_date(); - future::ok(()) - })); - } - date.1.current - } + self.check_date(); + unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().current } } @@ -435,6 +418,7 @@ impl ServiceConfigBuilder { } } +#[derive(Copy, Clone)] struct Date { current: Instant, bytes: [u8; DATE_VALUE_LENGTH], From 4e7fac08b9675dad2965611e1c33cc734603de4f Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 8 Oct 2018 15:30:59 -0700 Subject: [PATCH 182/219] do not override content-length header --- src/server/h1writer.rs | 14 +++++++++++--- src/server/output.rs | 16 +++++++--------- tests/test_server.rs | 3 +-- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/server/h1writer.rs b/src/server/h1writer.rs index c27a4c44a..97ce6dff9 100644 --- a/src/server/h1writer.rs +++ b/src/server/h1writer.rs @@ -176,13 +176,11 @@ impl Writer for H1Writer { buffer.extend_from_slice(reason); // content length + let mut len_is_set = true; match info.length { ResponseLength::Chunked => { buffer.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n") } - ResponseLength::Zero => { - buffer.extend_from_slice(b"\r\ncontent-length: 0\r\n") - } ResponseLength::Length(len) => { helpers::write_content_length(len, &mut buffer) } @@ -191,6 +189,10 @@ impl Writer for H1Writer { write!(buffer.writer(), "{}", len)?; buffer.extend_from_slice(b"\r\n"); } + ResponseLength::Zero => { + len_is_set = false; + buffer.extend_from_slice(b"\r\n"); + } ResponseLength::None => buffer.extend_from_slice(b"\r\n"), } if let Some(ce) = info.content_encoding { @@ -212,6 +214,9 @@ impl Writer for H1Writer { }, CONTENT_LENGTH => match info.length { ResponseLength::None => (), + ResponseLength::Zero => { + len_is_set = true; + } _ => continue, }, DATE => { @@ -248,6 +253,9 @@ impl Writer for H1Writer { unsafe { buffer.advance_mut(pos); } + if !len_is_set { + buffer.extend_from_slice(b"content-length: 0\r\n") + } // optimized date header, set_date writes \r\n if !has_date { diff --git a/src/server/output.rs b/src/server/output.rs index 70c24facc..35f3c7a45 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -191,15 +191,13 @@ impl Output { let transfer = match resp.body() { Body::Empty => { - if !info.head { - info.length = match resp.status() { - StatusCode::NO_CONTENT - | StatusCode::CONTINUE - | StatusCode::SWITCHING_PROTOCOLS - | StatusCode::PROCESSING => ResponseLength::None, - _ => ResponseLength::Zero, - }; - } + info.length = match resp.status() { + StatusCode::NO_CONTENT + | StatusCode::CONTINUE + | StatusCode::SWITCHING_PROTOCOLS + | StatusCode::PROCESSING => ResponseLength::None, + _ => ResponseLength::Zero, + }; *self = Output::Empty(buf); return; } diff --git a/tests/test_server.rs b/tests/test_server.rs index cb19cfed0..f3c9bf9dd 100644 --- a/tests/test_server.rs +++ b/tests/test_server.rs @@ -1358,8 +1358,8 @@ fn test_ssl_handshake_timeout() { #[test] fn test_content_length() { - use http::StatusCode; use actix_web::http::header::{HeaderName, HeaderValue}; + use http::StatusCode; let mut srv = test::TestServer::new(move |app| { app.resource("/{status}", |r| { @@ -1398,4 +1398,3 @@ fn test_content_length() { assert_eq!(response.headers().get(&header), Some(&value)); } } - From 93b1c5fd46465f2e08b661811a28b968fb77de70 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Mon, 8 Oct 2018 21:58:37 -0700 Subject: [PATCH 183/219] update deps --- .appveyor.yml | 3 ++- CHANGES.md | 2 +- Cargo.toml | 10 ++-------- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 7addc8c08..2f0a4a7dd 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,6 +1,6 @@ environment: global: - PROJECT_NAME: actix + PROJECT_NAME: actix-web matrix: # Stable channel - TARGET: i686-pc-windows-msvc @@ -37,4 +37,5 @@ build: false # Equivalent to Travis' `script` phase test_script: + - cargo clean - cargo test --no-default-features --features="flate2-rust" diff --git a/CHANGES.md b/CHANGES.md index 3c55c3f64..f4f665a86 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.9] - 2018-09-x +## [0.7.9] - 2018-10-09 ### Added diff --git a/Cargo.toml b/Cargo.toml index 12f98ac37..2d606cc07 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"] build = "build.rs" [package.metadata.docs.rs] -features = ["tls", "alpn", "rust-tls", "session", "brotli", "flate2-c"] +features = ["tls", "ssl", "rust-tls", "session", "brotli", "flate2-c"] [badges] travis-ci = { repository = "actix/actix-web", branch = "master" } @@ -62,8 +62,7 @@ cell = ["actix-net/cell"] [dependencies] actix = "0.7.0" -actix-net = { git="https://github.com/actix/actix-net.git" } -#actix-net = { path = "../actix-net" } +actix-net = "0.1.0" base64 = "0.9" bitflags = "1.0" @@ -139,8 +138,3 @@ version_check = "0.1" lto = true opt-level = 3 codegen-units = 1 - -[workspace] -members = [ - "./", -] From c3ad516f56cfa59accd91b7d589a2c8e62969844 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 9 Oct 2018 09:45:24 -0700 Subject: [PATCH 184/219] disable shutdown atm --- src/server/channel.rs | 91 ++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 45 deletions(-) diff --git a/src/server/channel.rs b/src/server/channel.rs index cbbe1a95e..1f4ec5b19 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -49,8 +49,8 @@ where T: IoStream, H: HttpHandler + 'static, { - node: Node>, - node_reg: bool, + proto: HttpProtocol, + node: Option>, ka_timeout: Option, } @@ -64,12 +64,8 @@ where HttpChannel { ka_timeout, - node_reg: false, - node: Node::new(HttpProtocol::Unknown( - settings, - io, - BytesMut::with_capacity(8192), - )), + node: None, + proto: HttpProtocol::Unknown(settings, io, BytesMut::with_capacity(8192)), } } } @@ -80,7 +76,9 @@ where H: HttpHandler + 'static, { fn drop(&mut self) { - self.node.remove(); + if let Some(mut node) = self.node.take() { + node.remove() + } } } @@ -98,16 +96,15 @@ where match self.ka_timeout.as_mut().unwrap().poll() { Ok(Async::Ready(_)) => { trace!("Slow request timed out, close connection"); - let proto = mem::replace(self.node.get_mut(), HttpProtocol::None); + let proto = mem::replace(&mut self.proto, HttpProtocol::None); if let HttpProtocol::Unknown(settings, io, buf) = proto { - *self.node.get_mut() = - HttpProtocol::H1(h1::Http1Dispatcher::for_error( - settings, - io, - StatusCode::REQUEST_TIMEOUT, - self.ka_timeout.take(), - buf, - )); + self.proto = HttpProtocol::H1(h1::Http1Dispatcher::for_error( + settings, + io, + StatusCode::REQUEST_TIMEOUT, + self.ka_timeout.take(), + buf, + )); return self.poll(); } return Ok(Async::Ready(())); @@ -117,19 +114,24 @@ where } } - if !self.node_reg { - self.node_reg = true; - let settings = match self.node.get_mut() { - HttpProtocol::H1(ref mut h1) => h1.settings().clone(), - HttpProtocol::H2(ref mut h2) => h2.settings().clone(), - HttpProtocol::Unknown(ref mut settings, _, _) => settings.clone(), + if self.node.is_none() { + self.node = Some(Node::new(())); + let _ = match self.proto { + HttpProtocol::H1(ref mut h1) => { + self.node.as_mut().map(|n| h1.settings().head().insert(n)) + } + HttpProtocol::H2(ref mut h2) => { + self.node.as_mut().map(|n| h2.settings().head().insert(n)) + } + HttpProtocol::Unknown(ref mut settings, _, _) => { + self.node.as_mut().map(|n| settings.head().insert(n)) + } HttpProtocol::None => unreachable!(), }; - settings.head().insert(&mut self.node); } let mut is_eof = false; - let kind = match self.node.get_mut() { + let kind = match self.proto { HttpProtocol::H1(ref mut h1) => return h1.poll(), HttpProtocol::H2(ref mut h2) => return h2.poll(), HttpProtocol::Unknown(_, ref mut io, ref mut buf) => { @@ -169,11 +171,11 @@ where }; // upgrade to specific http protocol - let proto = mem::replace(self.node.get_mut(), HttpProtocol::None); + let proto = mem::replace(&mut self.proto, HttpProtocol::None); if let HttpProtocol::Unknown(settings, io, buf) = proto { match kind { ProtocolKind::Http1 => { - *self.node.get_mut() = HttpProtocol::H1(h1::Http1Dispatcher::new( + self.proto = HttpProtocol::H1(h1::Http1Dispatcher::new( settings, io, buf, @@ -183,7 +185,7 @@ where return self.poll(); } ProtocolKind::Http2 => { - *self.node.get_mut() = HttpProtocol::H2(h2::Http2::new( + self.proto = HttpProtocol::H2(h2::Http2::new( settings, io, buf.freeze(), @@ -203,8 +205,8 @@ where T: IoStream, H: HttpHandler + 'static, { - node: Node>, - node_reg: bool, + proto: HttpProtocol, + node: Option>, } impl H1Channel @@ -214,14 +216,14 @@ where { pub(crate) fn new(settings: ServiceConfig, io: T) -> H1Channel { H1Channel { - node_reg: false, - node: Node::new(HttpProtocol::H1(h1::Http1Dispatcher::new( + node: None, + proto: HttpProtocol::H1(h1::Http1Dispatcher::new( settings, io, BytesMut::with_capacity(8192), false, None, - ))), + )), } } } @@ -232,7 +234,9 @@ where H: HttpHandler + 'static, { fn drop(&mut self) { - self.node.remove(); + if let Some(mut node) = self.node.take() { + node.remove(); + } } } @@ -245,16 +249,17 @@ where type Error = HttpDispatchError; fn poll(&mut self) -> Poll { - if !self.node_reg { - self.node_reg = true; - let settings = match self.node.get_mut() { - HttpProtocol::H1(ref mut h1) => h1.settings().clone(), + if self.node.is_none() { + self.node = Some(Node::new(())); + match self.proto { + HttpProtocol::H1(ref mut h1) => { + self.node.as_mut().map(|n| h1.settings().head().insert(n)); + } _ => unreachable!(), }; - settings.head().insert(&mut self.node); } - match self.node.get_mut() { + match self.proto { HttpProtocol::H1(ref mut h1) => h1.poll(), _ => unreachable!(), } @@ -276,10 +281,6 @@ impl Node { } } - fn get_mut(&mut self) -> &mut T { - &mut self.element - } - fn insert(&mut self, next_el: &mut Node) { let next: *mut Node = next_el as *const _ as *mut _; From 65e9201b4d586df303b7d0870cdb55a107d0327b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 9 Oct 2018 11:35:57 -0700 Subject: [PATCH 185/219] Fixed panic during graceful shutdown --- CHANGES.md | 7 +++++++ src/server/acceptor.rs | 8 ++++---- src/server/channel.rs | 34 +++++++++++++++++----------------- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f4f665a86..46f1fb367 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.7.10] - 2018-10-09 + +### Fixed + +* Fixed panic during graceful shutdown + + ## [0.7.9] - 2018-10-09 ### Added diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index 2e1b1f283..a18dded9b 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -9,7 +9,7 @@ use tokio_reactor::Handle; use tokio_tcp::TcpStream; use tokio_timer::{sleep, Delay}; -use super::channel::HttpProtocol; +// use super::channel::HttpProtocol; use super::error::AcceptorError; use super::handler::HttpHandler; use super::settings::ServiceConfig; @@ -367,9 +367,9 @@ where } ServerMessage::Shutdown(_) => Either::B(ok(())), ServerMessage::ForceShutdown => { - self.settings - .head() - .traverse(|proto: &mut HttpProtocol| proto.shutdown()); + // self.settings + // .head() + // .traverse(|proto: &mut HttpProtocol| proto.shutdown()); Either::B(ok(())) } } diff --git a/src/server/channel.rs b/src/server/channel.rs index 1f4ec5b19..af90d9346 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -20,23 +20,23 @@ pub(crate) enum HttpProtocol { None, } -impl HttpProtocol { - pub(crate) fn shutdown(&mut self) { - match self { - HttpProtocol::H1(ref mut h1) => { - let io = h1.io(); - let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); - let _ = IoStream::shutdown(io, Shutdown::Both); - } - HttpProtocol::H2(ref mut h2) => h2.shutdown(), - HttpProtocol::Unknown(_, io, _) => { - let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); - let _ = IoStream::shutdown(io, Shutdown::Both); - } - HttpProtocol::None => (), - } - } -} +// impl HttpProtocol { +// fn shutdown_(&mut self) { +// match self { +// HttpProtocol::H1(ref mut h1) => { +// let io = h1.io(); +// let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); +// let _ = IoStream::shutdown(io, Shutdown::Both); +// } +// HttpProtocol::H2(ref mut h2) => h2.shutdown(), +// HttpProtocol::Unknown(_, io, _) => { +// let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); +// let _ = IoStream::shutdown(io, Shutdown::Both); +// } +// HttpProtocol::None => (), +// } +// } +// } enum ProtocolKind { Http1, From 4d17a9afcca0d8818f41feb6d5c24b58b960b45e Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 9 Oct 2018 11:42:52 -0700 Subject: [PATCH 186/219] update version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 2d606cc07..51474c264 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.9" +version = "0.7.10" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From c63838bb71bfa96c46341982afd17cb3608aaf0b Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 9 Oct 2018 13:12:49 -0700 Subject: [PATCH 187/219] fix 204 support for http/2 --- CHANGES.md | 7 +++++++ src/server/h2writer.rs | 13 +++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 46f1fb367..260b6df74 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.7.11] - 2018-10-09 + +### Fixed + +* Fixed 204 responses for http/2 + + ## [0.7.10] - 2018-10-09 ### Fixed diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index 51d4dce6f..66f2923c4 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -96,6 +96,7 @@ impl Writer for H2Writer { let mut has_date = false; let mut resp = Response::new(()); + let mut len_is_set = false; *resp.status_mut() = msg.status(); *resp.version_mut() = Version::HTTP_2; for (key, value) in msg.headers().iter() { @@ -107,6 +108,9 @@ impl Writer for H2Writer { }, CONTENT_LENGTH => match info.length { ResponseLength::None => (), + ResponseLength::Zero => { + len_is_set = true; + } _ => continue, }, DATE => has_date = true, @@ -126,8 +130,10 @@ impl Writer for H2Writer { // content length match info.length { ResponseLength::Zero => { - resp.headers_mut() - .insert(CONTENT_LENGTH, HeaderValue::from_static("0")); + if !len_is_set { + resp.headers_mut() + .insert(CONTENT_LENGTH, HeaderValue::from_static("0")); + } self.flags.insert(Flags::EOF); } ResponseLength::Length(len) => { @@ -144,6 +150,9 @@ impl Writer for H2Writer { resp.headers_mut() .insert(CONTENT_LENGTH, HeaderValue::try_from(l.as_str()).unwrap()); } + ResponseLength::None => { + self.flags.insert(Flags::EOF); + } _ => (), } if let Some(ce) = info.content_encoding { From f45038bbfe338661f3b958b10c37dd64d3d70650 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Tue, 9 Oct 2018 13:23:37 -0700 Subject: [PATCH 188/219] remove unused code --- Cargo.toml | 2 +- README.md | 5 +- src/server/acceptor.rs | 37 ++++------- src/server/builder.rs | 2 - src/server/channel.rs | 136 ----------------------------------------- src/server/h1.rs | 10 --- src/server/h2.rs | 9 --- src/server/settings.rs | 9 +-- 8 files changed, 15 insertions(+), 195 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 51474c264..14102881a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.10" +version = "0.7.11" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" diff --git a/README.md b/README.md index 4e396cb91..321f82abf 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,6 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust. * Client/server [WebSockets](https://actix.rs/docs/websockets/) support * Transparent content compression/decompression (br, gzip, deflate) * Configurable [request routing](https://actix.rs/docs/url-dispatch/) -* Graceful server shutdown * Multipart streams * Static assets * SSL support with OpenSSL or `native-tls` @@ -51,7 +50,7 @@ fn main() { * [Protobuf support](https://github.com/actix/examples/tree/master/protobuf/) * [Multipart streams](https://github.com/actix/examples/tree/master/multipart/) * [Simple websocket](https://github.com/actix/examples/tree/master/websocket/) -* [Tera](https://github.com/actix/examples/tree/master/template_tera/) / +* [Tera](https://github.com/actix/examples/tree/master/template_tera/) / [Askama](https://github.com/actix/examples/tree/master/template_askama/) templates * [Diesel integration](https://github.com/actix/examples/tree/master/diesel/) * [r2d2](https://github.com/actix/examples/tree/master/r2d2/) @@ -66,8 +65,6 @@ You may consider checking out * [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext) -* Some basic benchmarks could be found in this [repository](https://github.com/fafhrd91/benchmarks). - ## License This project is licensed under either of diff --git a/src/server/acceptor.rs b/src/server/acceptor.rs index a18dded9b..994b4b7bd 100644 --- a/src/server/acceptor.rs +++ b/src/server/acceptor.rs @@ -9,10 +9,7 @@ use tokio_reactor::Handle; use tokio_tcp::TcpStream; use tokio_timer::{sleep, Delay}; -// use super::channel::HttpProtocol; use super::error::AcceptorError; -use super::handler::HttpHandler; -use super::settings::ServiceConfig; use super::IoStream; /// This trait indicates types that can create acceptor service for http server. @@ -275,56 +272,49 @@ impl Future for AcceptorTimeoutResponse { } } -pub(crate) struct ServerMessageAcceptor { +pub(crate) struct ServerMessageAcceptor { inner: T, - settings: ServiceConfig, } -impl ServerMessageAcceptor +impl ServerMessageAcceptor where - H: HttpHandler, T: NewService, { - pub(crate) fn new(settings: ServiceConfig, inner: T) -> Self { - ServerMessageAcceptor { inner, settings } + pub(crate) fn new(inner: T) -> Self { + ServerMessageAcceptor { inner } } } -impl NewService for ServerMessageAcceptor +impl NewService for ServerMessageAcceptor where - H: HttpHandler, T: NewService, { type Request = ServerMessage; type Response = (); type Error = T::Error; type InitError = T::InitError; - type Service = ServerMessageAcceptorService; - type Future = ServerMessageAcceptorResponse; + type Service = ServerMessageAcceptorService; + type Future = ServerMessageAcceptorResponse; fn new_service(&self) -> Self::Future { ServerMessageAcceptorResponse { fut: self.inner.new_service(), - settings: self.settings.clone(), } } } -pub(crate) struct ServerMessageAcceptorResponse +pub(crate) struct ServerMessageAcceptorResponse where - H: HttpHandler, T: NewService, { fut: T::Future, - settings: ServiceConfig, } -impl Future for ServerMessageAcceptorResponse +impl Future for ServerMessageAcceptorResponse where - H: HttpHandler, T: NewService, { - type Item = ServerMessageAcceptorService; + type Item = ServerMessageAcceptorService; type Error = T::InitError; fn poll(&mut self) -> Poll { @@ -332,20 +322,17 @@ where Async::NotReady => Ok(Async::NotReady), Async::Ready(service) => Ok(Async::Ready(ServerMessageAcceptorService { inner: service, - settings: self.settings.clone(), })), } } } -pub(crate) struct ServerMessageAcceptorService { +pub(crate) struct ServerMessageAcceptorService { inner: T, - settings: ServiceConfig, } -impl Service for ServerMessageAcceptorService +impl Service for ServerMessageAcceptorService where - H: HttpHandler, T: Service, { type Request = ServerMessage; diff --git a/src/server/builder.rs b/src/server/builder.rs index ec6ce9923..4f159af13 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -60,7 +60,6 @@ where if secure { Either::B(ServerMessageAcceptor::new( - settings.clone(), TcpAcceptor::new(AcceptorTimeout::new( client_timeout, acceptor.create(), @@ -74,7 +73,6 @@ where )) } else { Either::A(ServerMessageAcceptor::new( - settings.clone(), TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service)) .map_err(|_| ()) .map_init_err(|_| ()) diff --git a/src/server/channel.rs b/src/server/channel.rs index af90d9346..d65b05e85 100644 --- a/src/server/channel.rs +++ b/src/server/channel.rs @@ -50,7 +50,6 @@ where H: HttpHandler + 'static, { proto: HttpProtocol, - node: Option>, ka_timeout: Option, } @@ -64,24 +63,11 @@ where HttpChannel { ka_timeout, - node: None, proto: HttpProtocol::Unknown(settings, io, BytesMut::with_capacity(8192)), } } } -impl Drop for HttpChannel -where - T: IoStream, - H: HttpHandler + 'static, -{ - fn drop(&mut self) { - if let Some(mut node) = self.node.take() { - node.remove() - } - } -} - impl Future for HttpChannel where T: IoStream, @@ -114,22 +100,6 @@ where } } - if self.node.is_none() { - self.node = Some(Node::new(())); - let _ = match self.proto { - HttpProtocol::H1(ref mut h1) => { - self.node.as_mut().map(|n| h1.settings().head().insert(n)) - } - HttpProtocol::H2(ref mut h2) => { - self.node.as_mut().map(|n| h2.settings().head().insert(n)) - } - HttpProtocol::Unknown(ref mut settings, _, _) => { - self.node.as_mut().map(|n| settings.head().insert(n)) - } - HttpProtocol::None => unreachable!(), - }; - } - let mut is_eof = false; let kind = match self.proto { HttpProtocol::H1(ref mut h1) => return h1.poll(), @@ -206,7 +176,6 @@ where H: HttpHandler + 'static, { proto: HttpProtocol, - node: Option>, } impl H1Channel @@ -216,7 +185,6 @@ where { pub(crate) fn new(settings: ServiceConfig, io: T) -> H1Channel { H1Channel { - node: None, proto: HttpProtocol::H1(h1::Http1Dispatcher::new( settings, io, @@ -228,18 +196,6 @@ where } } -impl Drop for H1Channel -where - T: IoStream, - H: HttpHandler + 'static, -{ - fn drop(&mut self) { - if let Some(mut node) = self.node.take() { - node.remove(); - } - } -} - impl Future for H1Channel where T: IoStream, @@ -249,16 +205,6 @@ where type Error = HttpDispatchError; fn poll(&mut self) -> Poll { - if self.node.is_none() { - self.node = Some(Node::new(())); - match self.proto { - HttpProtocol::H1(ref mut h1) => { - self.node.as_mut().map(|n| h1.settings().head().insert(n)); - } - _ => unreachable!(), - }; - } - match self.proto { HttpProtocol::H1(ref mut h1) => h1.poll(), _ => unreachable!(), @@ -266,88 +212,6 @@ where } } -pub(crate) struct Node { - next: Option<*mut Node>, - prev: Option<*mut Node>, - element: T, -} - -impl Node { - fn new(element: T) -> Self { - Node { - element, - next: None, - prev: None, - } - } - - fn insert(&mut self, next_el: &mut Node) { - let next: *mut Node = next_el as *const _ as *mut _; - - if let Some(next2) = self.next { - unsafe { - let n = next2.as_mut().unwrap(); - n.prev = Some(next); - } - next_el.next = Some(next2 as *mut _); - } - self.next = Some(next); - - unsafe { - let next: &mut Node = &mut *next; - next.prev = Some(self as *mut _); - } - } - - fn remove(&mut self) { - let next = self.next.take(); - let prev = self.prev.take(); - - if let Some(prev) = prev { - unsafe { - prev.as_mut().unwrap().next = next; - } - } - if let Some(next) = next { - unsafe { - next.as_mut().unwrap().prev = prev; - } - } - } -} - -impl Node<()> { - pub(crate) fn head() -> Self { - Node { - next: None, - prev: None, - element: (), - } - } - - pub(crate) fn traverse)>(&self, f: F) - where - T: IoStream, - H: HttpHandler + 'static, - { - if let Some(n) = self.next.as_ref() { - unsafe { - let mut next: &mut Node> = - &mut *(n.as_ref().unwrap() as *const _ as *mut _); - loop { - f(&mut next.element); - - next = if let Some(n) = next.next.as_ref() { - &mut **n - } else { - return; - } - } - } - } - } -} - /// Wrapper for `AsyncRead + AsyncWrite` types pub(crate) struct WrapperStream where diff --git a/src/server/h1.rs b/src/server/h1.rs index 0fb72ef7e..a2ffc0551 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -147,16 +147,6 @@ where disp } - #[inline] - pub fn settings(&self) -> &ServiceConfig { - &self.settings - } - - #[inline] - pub(crate) fn io(&mut self) -> &mut T { - self.stream.get_mut() - } - #[inline] fn can_read(&self) -> bool { if self.flags.contains(Flags::READ_DISCONNECTED) { diff --git a/src/server/h2.rs b/src/server/h2.rs index 6ad9af709..35afa3397 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -89,15 +89,6 @@ where } } - pub(crate) fn shutdown(&mut self) { - self.state = State::Empty; - self.tasks.clear(); - } - - pub fn settings(&self) -> &ServiceConfig { - &self.settings - } - pub fn poll(&mut self) -> Poll<(), HttpDispatchError> { self.poll_keepalive()?; diff --git a/src/server/settings.rs b/src/server/settings.rs index bafffb5f7..66a4eed88 100644 --- a/src/server/settings.rs +++ b/src/server/settings.rs @@ -1,4 +1,4 @@ -use std::cell::{Cell, RefCell, RefMut}; +use std::cell::{Cell, RefCell}; use std::collections::VecDeque; use std::fmt::Write; use std::rc::Rc; @@ -15,7 +15,6 @@ use time; use tokio_current_thread::spawn; use tokio_timer::{sleep, Delay}; -use super::channel::Node; use super::message::{Request, RequestPool}; use super::KeepAlive; use body::Body; @@ -138,7 +137,6 @@ struct Inner { ka_enabled: bool, bytes: Rc, messages: &'static RequestPool, - node: RefCell>, date: Cell>, } @@ -173,7 +171,6 @@ impl ServiceConfig { client_shutdown, bytes: Rc::new(SharedBytesPool::new()), messages: RequestPool::pool(settings), - node: RefCell::new(Node::head()), date: Cell::new(None), })) } @@ -183,10 +180,6 @@ impl ServiceConfig { ServiceConfigBuilder::new(handler) } - pub(crate) fn head(&self) -> RefMut> { - self.0.node.borrow_mut() - } - pub(crate) fn handler(&self) -> &H { &self.0.handler } From ec8aef6b433832d5ab384d7bf66f847356900189 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 10 Oct 2018 08:36:16 -0700 Subject: [PATCH 189/219] update dep versions --- CHANGES.md | 9 +++++++++ Cargo.toml | 9 ++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 260b6df74..39b97cc0b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,14 @@ # Changes +## [0.7.12] - 2018-10-10 + +### Changed + +* Set min version for actix + +* Set min version for actix-net + + ## [0.7.11] - 2018-10-09 ### Fixed diff --git a/Cargo.toml b/Cargo.toml index 14102881a..ea400dc66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.11" +version = "0.7.12" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" @@ -61,11 +61,12 @@ flate2-rust = ["flate2/rust_backend"] cell = ["actix-net/cell"] [dependencies] -actix = "0.7.0" -actix-net = "0.1.0" +actix = "^0.7.5" +actix-net = "^0.1.1" base64 = "0.9" bitflags = "1.0" +failure = "^0.1.2" h2 = "0.1" htmlescape = "0.3" http = "^0.1.8" @@ -93,8 +94,6 @@ cookie = { version="0.11", features=["percent-encode"] } brotli2 = { version="^0.3.2", optional = true } flate2 = { version="^1.0.2", optional = true, default-features = false } -failure = "^0.1.2" - # io mio = "^0.6.13" net2 = "0.2" From 32145cf6c31a9d149041b0894029190e3c4086ba Mon Sep 17 00:00:00 2001 From: jeizsm Date: Thu, 11 Oct 2018 11:05:07 +0300 Subject: [PATCH 190/219] fix after update tokio-rustls (#542) --- CHANGES.md | 6 ++++++ src/client/connector.rs | 16 +++++----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 39b97cc0b..ad5ae9e1b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Changes +## [0.7.13] - 2018-10-* + +### Fixed + +* Fixed rustls build + ## [0.7.12] - 2018-10-10 ### Changed diff --git a/src/client/connector.rs b/src/client/connector.rs index 07c7b646d..3f4ac27cb 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -37,15 +37,9 @@ use { ))] use { rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc, - tokio_rustls::ClientConfigExt, webpki::DNSNameRef, webpki_roots, + tokio_rustls::TlsConnector as SslConnector, webpki::DNSNameRef, webpki_roots, }; -#[cfg(all( - feature = "rust-tls", - not(any(feature = "alpn", feature = "tls", feature = "ssl")) -))] -type SslConnector = Arc; - #[cfg(not(any( feature = "alpn", feature = "ssl", @@ -282,7 +276,7 @@ impl Default for ClientConnector { config .root_store .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); - Arc::new(config) + SslConnector::from(Arc::new(config)) } #[cfg_attr(rustfmt, rustfmt_skip)] @@ -373,7 +367,7 @@ impl ClientConnector { /// config /// .root_store /// .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); - /// let conn = ClientConnector::with_connector(Arc::new(config)).start(); + /// let conn = ClientConnector::with_connector(config).start(); /// /// conn.send( /// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host @@ -390,7 +384,7 @@ impl ClientConnector { /// ``` pub fn with_connector(connector: ClientConfig) -> ClientConnector { // keep level of indirection for docstrings matching featureflags - Self::with_connector_impl(Arc::new(connector)) + Self::with_connector_impl(SslConnector::from(Arc::new(connector))) } #[cfg(all( @@ -832,7 +826,7 @@ impl ClientConnector { let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap(); fut::Either::A( act.connector - .connect_async(host, stream) + .connect(host, stream) .into_actor(act) .then(move |res, _, _| { match res { From d145136e569b49caec0fe87735ab0c736b2eb5de Mon Sep 17 00:00:00 2001 From: Douman Date: Sat, 13 Oct 2018 09:54:03 +0300 Subject: [PATCH 191/219] Add individual check for TLS features --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 62867e030..6793745f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,6 +32,9 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then cargo clean + cargo check --feature rust-tls + cargo check --feature ssl + cargo check --feature tls cargo test --features="ssl,tls,rust-tls" -- --nocapture fi - | From 63a443fce0560f2d4275032cd12c3fb2d22dd931 Mon Sep 17 00:00:00 2001 From: Douman Date: Sat, 13 Oct 2018 10:05:21 +0300 Subject: [PATCH 192/219] Correct build script --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6793745f7..c5dfcd81b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,9 +32,9 @@ script: - | if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then cargo clean - cargo check --feature rust-tls - cargo check --feature ssl - cargo check --feature tls + cargo check --features rust-tls + cargo check --features ssl + cargo check --features tls cargo test --features="ssl,tls,rust-tls" -- --nocapture fi - | From dd948f836e75b4edecb912203fe9f6fe89365115 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 14 Oct 2018 08:08:12 -0700 Subject: [PATCH 193/219] HttpServer not sending streamed request body on HTTP/2 requests #544 --- CHANGES.md | 7 +++++-- src/httprequest.rs | 2 +- src/server/output.rs | 23 +++++++++-------------- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ad5ae9e1b..62d2e9157 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,13 @@ # Changes -## [0.7.13] - 2018-10-* +## [0.7.13] - 2018-10-14 ### Fixed -* Fixed rustls build +* Fixed rustls support + +* HttpServer not sending streamed request body on HTTP/2 requests #544 + ## [0.7.12] - 2018-10-10 diff --git a/src/httprequest.rs b/src/httprequest.rs index d8c49496a..0e4f74e5e 100644 --- a/src/httprequest.rs +++ b/src/httprequest.rs @@ -216,7 +216,7 @@ impl HttpRequest { self.url_for(name, &NO_PARAMS) } - /// This method returns reference to current `RouteInfo` object. + /// This method returns reference to current `ResourceInfo` object. #[inline] pub fn resource(&self) -> &ResourceInfo { &self.resource diff --git a/src/server/output.rs b/src/server/output.rs index 35f3c7a45..104700d44 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -299,12 +299,11 @@ impl Output { match resp.chunked() { Some(true) => { // Enable transfer encoding - if version == Version::HTTP_2 { - info.length = ResponseLength::None; - TransferEncoding::eof(buf) - } else { - info.length = ResponseLength::Chunked; + info.length = ResponseLength::Chunked; + if version == Version::HTTP_11 { TransferEncoding::chunked(buf) + } else { + TransferEncoding::eof(buf) } } Some(false) => TransferEncoding::eof(buf), @@ -337,15 +336,11 @@ impl Output { } } else { // Enable transfer encoding - match version { - Version::HTTP_11 => { - info.length = ResponseLength::Chunked; - TransferEncoding::chunked(buf) - } - _ => { - info.length = ResponseLength::None; - TransferEncoding::eof(buf) - } + info.length = ResponseLength::Chunked; + if version == Version::HTTP_11 { + TransferEncoding::chunked(buf) + } else { + TransferEncoding::eof(buf) } } } From c04b4678f136b118fef40b979c0df90402a8e0e4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 14 Oct 2018 08:10:41 -0700 Subject: [PATCH 194/219] bump version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index ea400dc66..d98ce5eac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.12" +version = "0.7.13" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" From f383f618b537d6912ba9e2e7e27402b7bba964e1 Mon Sep 17 00:00:00 2001 From: ivan-ochc Date: Thu, 18 Oct 2018 21:27:31 +0300 Subject: [PATCH 195/219] Fix typo in error message (#554) --- src/pipeline.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pipeline.rs b/src/pipeline.rs index 1940f9308..a938f2eb2 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -551,12 +551,12 @@ impl ProcessResponse { if self.resp.as_ref().unwrap().status().is_server_error() { error!( - "Error occured during request handling, status: {} {}", + "Error occurred during request handling, status: {} {}", self.resp.as_ref().unwrap().status(), err ); } else { warn!( - "Error occured during request handling: {}", + "Error occurred during request handling: {}", err ); } From 960274ada8540064364579cb5a7caeb289ae7340 Mon Sep 17 00:00:00 2001 From: Douman Date: Fri, 19 Oct 2018 07:52:10 +0300 Subject: [PATCH 196/219] Refactoring of server output to not exclude HTTP_10 (#552) --- CHANGES.md | 6 ++++++ src/server/output.rs | 12 ++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 62d2e9157..8ac1724a3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Changes +## [0.7.14] - 2018-10-x + +### Fixed + +* HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549 + ## [0.7.13] - 2018-10-14 ### Fixed diff --git a/src/server/output.rs b/src/server/output.rs index 104700d44..ac89d6440 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -300,10 +300,10 @@ impl Output { Some(true) => { // Enable transfer encoding info.length = ResponseLength::Chunked; - if version == Version::HTTP_11 { - TransferEncoding::chunked(buf) - } else { + if version == Version::HTTP_2 { TransferEncoding::eof(buf) + } else { + TransferEncoding::chunked(buf) } } Some(false) => TransferEncoding::eof(buf), @@ -337,10 +337,10 @@ impl Output { } else { // Enable transfer encoding info.length = ResponseLength::Chunked; - if version == Version::HTTP_11 { - TransferEncoding::chunked(buf) - } else { + if version == Version::HTTP_2 { TransferEncoding::eof(buf) + } else { + TransferEncoding::chunked(buf) } } } From 42d5d48e7105270e89eb8af6b111c305c5536e00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Sat, 20 Oct 2018 05:43:43 +0200 Subject: [PATCH 197/219] add a way to configure error treatment for Query and Path extractors (#550) * add a way to configure error treatment for Query extractor * allow error handler to be customized for Path extractor --- CHANGES.md | 4 ++ src/extractor.rs | 122 ++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 114 insertions(+), 12 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8ac1724a3..f5adb82c3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,6 +6,10 @@ * HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549 +### Added + +* Add method to configure custom error handler to `Query` and `Path` extractors. + ## [0.7.13] - 2018-10-14 ### Fixed diff --git a/src/extractor.rs b/src/extractor.rs index 7b0b4b003..45e29ace0 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -111,18 +111,64 @@ impl FromRequest for Path where T: DeserializeOwned, { - type Config = (); + type Config = PathConfig; type Result = Result; #[inline] - fn from_request(req: &HttpRequest, _: &Self::Config) -> Self::Result { + fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result { let req = req.clone(); + let req2 = req.clone(); + let err = Rc::clone(&cfg.ehandler); de::Deserialize::deserialize(PathDeserializer::new(&req)) - .map_err(ErrorNotFound) + .map_err(move |e| (*err)(e, &req2)) .map(|inner| Path { inner }) } } +/// Path extractor configuration +/// +/// ```rust +/// # extern crate actix_web; +/// use actix_web::{error, http, App, HttpResponse, Path, Result}; +/// +/// /// deserialize `Info` from request's body, max payload size is 4kb +/// fn index(info: Path<(u32, String)>) -> Result { +/// Ok(format!("Welcome {}!", info.1)) +/// } +/// +/// fn main() { +/// let app = App::new().resource("/index.html/{id}/{name}", |r| { +/// r.method(http::Method::GET).with_config(index, |cfg| { +/// cfg.0.error_handler(|err, req| { +/// // <- create custom error response +/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into() +/// }); +/// }) +/// }); +/// } +/// ``` +pub struct PathConfig { + ehandler: Rc) -> Error>, +} +impl PathConfig { + /// Set custom error handler + pub fn error_handler(&mut self, f: F) -> &mut Self + where + F: Fn(serde_urlencoded::de::Error, &HttpRequest) -> Error + 'static, + { + self.ehandler = Rc::new(f); + self + } +} + +impl Default for PathConfig { + fn default() -> Self { + PathConfig { + ehandler: Rc::new(|e, _| ErrorNotFound(e)), + } + } +} + impl fmt::Debug for Path { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.fmt(f) @@ -200,17 +246,69 @@ impl FromRequest for Query where T: de::DeserializeOwned, { - type Config = (); + type Config = QueryConfig; type Result = Result; #[inline] - fn from_request(req: &HttpRequest, _: &Self::Config) -> Self::Result { + fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result { + let req2 = req.clone(); + let err = Rc::clone(&cfg.ehandler); serde_urlencoded::from_str::(req.query_string()) - .map_err(|e| e.into()) + .map_err(move |e| (*err)(e, &req2)) .map(Query) } } +/// Query extractor configuration +/// +/// ```rust +/// # extern crate actix_web; +/// #[macro_use] extern crate serde_derive; +/// use actix_web::{error, http, App, HttpResponse, Query, Result}; +/// +/// #[derive(Deserialize)] +/// struct Info { +/// username: String, +/// } +/// +/// /// deserialize `Info` from request's body, max payload size is 4kb +/// fn index(info: Query) -> Result { +/// Ok(format!("Welcome {}!", info.username)) +/// } +/// +/// fn main() { +/// let app = App::new().resource("/index.html", |r| { +/// r.method(http::Method::GET).with_config(index, |cfg| { +/// cfg.0.error_handler(|err, req| { +/// // <- create custom error response +/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into() +/// }); +/// }) +/// }); +/// } +/// ``` +pub struct QueryConfig { + ehandler: Rc) -> Error>, +} +impl QueryConfig { + /// Set custom error handler + pub fn error_handler(&mut self, f: F) -> &mut Self + where + F: Fn(serde_urlencoded::de::Error, &HttpRequest) -> Error + 'static, + { + self.ehandler = Rc::new(f); + self + } +} + +impl Default for QueryConfig { + fn default() -> Self { + QueryConfig { + ehandler: Rc::new(|e, _| e.into()), + } + } +} + impl fmt::Debug for Query { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) @@ -951,15 +1049,15 @@ mod tests { let info = router.recognize(&req, &(), 0); let req = req.with_route_info(info); - let s = Path::::from_request(&req, &()).unwrap(); + let s = Path::::from_request(&req, &PathConfig::default()).unwrap(); assert_eq!(s.key, "name"); assert_eq!(s.value, "user1"); - let s = Path::<(String, String)>::from_request(&req, &()).unwrap(); + let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap(); assert_eq!(s.0, "name"); assert_eq!(s.1, "user1"); - let s = Query::::from_request(&req, &()).unwrap(); + let s = Query::::from_request(&req, &QueryConfig::default()).unwrap(); assert_eq!(s.id, "test"); let mut router = Router::<()>::default(); @@ -968,11 +1066,11 @@ mod tests { let info = router.recognize(&req, &(), 0); let req = req.with_route_info(info); - let s = Path::::from_request(&req, &()).unwrap(); + let s = Path::::from_request(&req, &PathConfig::default()).unwrap(); assert_eq!(s.as_ref().key, "name"); assert_eq!(s.value, 32); - let s = Path::<(String, u8)>::from_request(&req, &()).unwrap(); + let s = Path::<(String, u8)>::from_request(&req, &PathConfig::default()).unwrap(); assert_eq!(s.0, "name"); assert_eq!(s.1, 32); @@ -989,7 +1087,7 @@ mod tests { let req = TestRequest::with_uri("/32/").finish(); let info = router.recognize(&req, &(), 0); let req = req.with_route_info(info); - assert_eq!(*Path::::from_request(&req, &()).unwrap(), 32); + assert_eq!(*Path::::from_request(&req, &&PathConfig::default()).unwrap(), 32); } #[test] From 5f91f5eda6f3b61d91a63b5ef651ef9f2617d7b7 Mon Sep 17 00:00:00 2001 From: Douman Date: Fri, 26 Oct 2018 10:59:06 +0300 Subject: [PATCH 198/219] Correct IoStream::set_keepalive for UDS (#564) Enable uds feature in tests --- .travis.yml | 2 +- src/server/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c5dfcd81b..9b1bcff54 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,7 +35,7 @@ script: cargo check --features rust-tls cargo check --features ssl cargo check --features tls - cargo test --features="ssl,tls,rust-tls" -- --nocapture + cargo test --features="ssl,tls,rust-tls,uds" -- --nocapture fi - | if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then diff --git a/src/server/mod.rs b/src/server/mod.rs index 8d7195166..0a16f26b9 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -334,7 +334,7 @@ impl IoStream for ::tokio_uds::UnixStream { } #[inline] - fn set_keepalive(&mut self, _nodelay: bool) -> io::Result<()> { + fn set_keepalive(&mut self, _dur: Option) -> io::Result<()> { Ok(()) } } From cfd9a56ff74dd6cb6ad38b6a67b40ed1763d6071 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Sun, 28 Oct 2018 09:24:19 -0700 Subject: [PATCH 199/219] Add async/await ref --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 321f82abf..db3cc68c5 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust. * Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/)) * Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html) * Built on top of [Actix actor framework](https://github.com/actix/actix) +* Experimental [Async/Await](https://github.com/mehcode/actix-web-async-await) support. ## Documentation & community resources From 3b536ee96c0118e8cf452b28f6acab6085db22a6 Mon Sep 17 00:00:00 2001 From: Stanislav Tkach Date: Thu, 1 Nov 2018 10:14:48 +0200 Subject: [PATCH 200/219] Use old clippy attributes syntax (#562) --- src/client/connector.rs | 2 +- src/client/writer.rs | 2 +- src/httpresponse.rs | 2 +- src/info.rs | 2 +- src/middleware/defaultheaders.rs | 2 +- src/scope.rs | 2 +- src/server/h2writer.rs | 2 +- src/server/http.rs | 2 +- src/server/output.rs | 4 ++-- src/ws/frame.rs | 2 +- src/ws/mask.rs | 6 +++--- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/client/connector.rs b/src/client/connector.rs index 3f4ac27cb..3990c955c 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -287,7 +287,7 @@ impl Default for ClientConnector { } }; - #[cfg_attr(feature = "cargo-clippy", allow(clippy::let_unit_value))] + #[cfg_attr(feature = "cargo-clippy", allow(let_unit_value))] ClientConnector::with_connector_impl(connector) } } diff --git a/src/client/writer.rs b/src/client/writer.rs index e74f22332..321753bbf 100644 --- a/src/client/writer.rs +++ b/src/client/writer.rs @@ -1,6 +1,6 @@ #![cfg_attr( feature = "cargo-clippy", - allow(clippy::redundant_field_names) + allow(redundant_field_names) )] use std::cell::RefCell; diff --git a/src/httpresponse.rs b/src/httpresponse.rs index 8b091d42e..52dd8046b 100644 --- a/src/httpresponse.rs +++ b/src/httpresponse.rs @@ -694,7 +694,7 @@ impl HttpResponseBuilder { } #[inline] -#[cfg_attr(feature = "cargo-clippy", allow(clippy::borrowed_box))] +#[cfg_attr(feature = "cargo-clippy", allow(borrowed_box))] fn parts<'a>( parts: &'a mut Option>, err: &Option, ) -> Option<&'a mut Box> { diff --git a/src/info.rs b/src/info.rs index 5a2f21805..43c22123e 100644 --- a/src/info.rs +++ b/src/info.rs @@ -18,7 +18,7 @@ impl ConnectionInfo { /// Create *ConnectionInfo* instance for a request. #[cfg_attr( feature = "cargo-clippy", - allow(clippy::cyclomatic_complexity) + allow(cyclomatic_complexity) )] pub fn update(&mut self, req: &Request) { let mut host = None; diff --git a/src/middleware/defaultheaders.rs b/src/middleware/defaultheaders.rs index d980a2503..a33fa6a33 100644 --- a/src/middleware/defaultheaders.rs +++ b/src/middleware/defaultheaders.rs @@ -48,7 +48,7 @@ impl DefaultHeaders { /// Set a header. #[inline] - #[cfg_attr(feature = "cargo-clippy", allow(clippy::match_wild_err_arm))] + #[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))] pub fn header(mut self, key: K, value: V) -> Self where HeaderName: HttpTryFrom, diff --git a/src/scope.rs b/src/scope.rs index 43789d427..1bddc0e01 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -61,7 +61,7 @@ pub struct Scope { #[cfg_attr( feature = "cargo-clippy", - allow(clippy::new_without_default_derive) + allow(new_without_default_derive) )] impl Scope { /// Create a new scope diff --git a/src/server/h2writer.rs b/src/server/h2writer.rs index 66f2923c4..fef6f889a 100644 --- a/src/server/h2writer.rs +++ b/src/server/h2writer.rs @@ -1,6 +1,6 @@ #![cfg_attr( feature = "cargo-clippy", - allow(clippy::redundant_field_names) + allow(redundant_field_names) )] use std::{cmp, io}; diff --git a/src/server/http.rs b/src/server/http.rs index 9ecd4a5d2..0bec8be3f 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -326,7 +326,7 @@ where #[doc(hidden)] #[cfg_attr( feature = "cargo-clippy", - allow(clippy::needless_pass_by_value) + allow(needless_pass_by_value) )] pub fn bind_with(mut self, addr: S, acceptor: A) -> io::Result where diff --git a/src/server/output.rs b/src/server/output.rs index ac89d6440..4a86ffbb7 100644 --- a/src/server/output.rs +++ b/src/server/output.rs @@ -438,7 +438,7 @@ impl ContentEncoder { } } - #[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] + #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[inline(always)] pub fn write_eof(&mut self) -> Result { let encoder = @@ -480,7 +480,7 @@ impl ContentEncoder { } } - #[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))] + #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[inline(always)] pub fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { match *self { diff --git a/src/ws/frame.rs b/src/ws/frame.rs index d5fa98272..5e4fd8290 100644 --- a/src/ws/frame.rs +++ b/src/ws/frame.rs @@ -46,7 +46,7 @@ impl Frame { Frame::message(payload, OpCode::Close, true, genmask) } - #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))] + #[cfg_attr(feature = "cargo-clippy", allow(type_complexity))] fn read_copy_md( pl: &mut PayloadBuffer, server: bool, max_size: usize, ) -> Poll)>, ProtocolError> diff --git a/src/ws/mask.rs b/src/ws/mask.rs index a88c21afb..18ce57bb7 100644 --- a/src/ws/mask.rs +++ b/src/ws/mask.rs @@ -1,5 +1,5 @@ //! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs) -#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] +#![cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))] use std::ptr::copy_nonoverlapping; use std::slice; @@ -19,7 +19,7 @@ impl<'a> ShortSlice<'a> { /// Faster version of `apply_mask()` which operates on 8-byte blocks. #[inline] -#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))] +#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))] pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) { // Extend the mask to 64 bits let mut mask_u64 = ((mask_u32 as u64) << 32) | (mask_u32 as u64); @@ -52,7 +52,7 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) { // a `ShortSlice` must be smaller than a u64. #[cfg_attr( feature = "cargo-clippy", - allow(clippy::needless_pass_by_value) + allow(needless_pass_by_value) )] fn xor_short(buf: ShortSlice, mask: u64) { // Unsafe: we know that a `ShortSlice` fits in a u64 From 8e354021d47b2131180de1a312d308ca96e7eb9a Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Wed, 7 Nov 2018 12:24:06 -0800 Subject: [PATCH 201/219] Add SameSite option to identity middleware cookie (#581) --- CHANGES.md | 1 + src/middleware/identity.rs | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index f5adb82c3..2aa9cbfd2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,6 +9,7 @@ ### Added * Add method to configure custom error handler to `Query` and `Path` extractors. +* Add method to configure `SameSite` option in `CookieIdentityPolicy`. ## [0.7.13] - 2018-10-14 diff --git a/src/middleware/identity.rs b/src/middleware/identity.rs index d890bebef..a664ba1f0 100644 --- a/src/middleware/identity.rs +++ b/src/middleware/identity.rs @@ -48,7 +48,7 @@ //! ``` use std::rc::Rc; -use cookie::{Cookie, CookieJar, Key}; +use cookie::{Cookie, CookieJar, Key, SameSite}; use futures::future::{err as FutErr, ok as FutOk, FutureResult}; use futures::Future; use time::Duration; @@ -237,6 +237,7 @@ struct CookieIdentityInner { domain: Option, secure: bool, max_age: Option, + same_site: Option, } impl CookieIdentityInner { @@ -248,6 +249,7 @@ impl CookieIdentityInner { domain: None, secure: true, max_age: None, + same_site: None, } } @@ -268,6 +270,10 @@ impl CookieIdentityInner { cookie.set_max_age(max_age); } + if let Some(same_site) = self.same_site { + cookie.set_same_site(same_site); + } + let mut jar = CookieJar::new(); if some { jar.private(&self.key).add(cookie); @@ -370,6 +376,12 @@ impl CookieIdentityPolicy { Rc::get_mut(&mut self.0).unwrap().max_age = Some(value); self } + + /// Sets the `same_site` field in the session cookie being built. + pub fn same_site(mut self, same_site: SameSite) -> Self { + Rc::get_mut(&mut self.0).unwrap().same_site = Some(same_site); + self + } } impl IdentityPolicy for CookieIdentityPolicy { From 2677d325a727508e0d2b17ac412173b06528eb7a Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 7 Nov 2018 21:09:33 -0800 Subject: [PATCH 202/219] fix keep-alive timer reset --- CHANGES.md | 7 ++++++- Cargo.toml | 2 +- src/server/h1.rs | 21 +++++++++++++++------ src/server/h2.rs | 18 +++++++++++++----- 4 files changed, 35 insertions(+), 13 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2aa9cbfd2..1e66cff87 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,16 +1,21 @@ # Changes -## [0.7.14] - 2018-10-x +## [0.7.14] - 2018-11-x ### Fixed +* Fix keep-alive timer reset + * HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549 + ### Added * Add method to configure custom error handler to `Query` and `Path` extractors. + * Add method to configure `SameSite` option in `CookieIdentityPolicy`. + ## [0.7.13] - 2018-10-14 ### Fixed diff --git a/Cargo.toml b/Cargo.toml index d98ce5eac..4a6e23173 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.13" +version = "0.7.14" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md" diff --git a/src/server/h1.rs b/src/server/h1.rs index a2ffc0551..07f773eba 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -87,7 +87,10 @@ where H: HttpHandler + 'static, { pub fn new( - settings: ServiceConfig, stream: T, buf: BytesMut, is_eof: bool, + settings: ServiceConfig, + stream: T, + buf: BytesMut, + is_eof: bool, keepalive_timer: Option, ) -> Self { let addr = stream.peer_addr(); @@ -123,8 +126,11 @@ where } pub(crate) fn for_error( - settings: ServiceConfig, stream: T, status: StatusCode, - mut keepalive_timer: Option, buf: BytesMut, + settings: ServiceConfig, + stream: T, + status: StatusCode, + mut keepalive_timer: Option, + buf: BytesMut, ) -> Self { if let Some(deadline) = settings.client_timer_expire() { let _ = keepalive_timer.as_mut().map(|delay| delay.reset(deadline)); @@ -298,16 +304,19 @@ where if let Some(deadline) = self.settings.client_shutdown_timer() { - timer.reset(deadline) + timer.reset(deadline); + let _ = timer.poll(); } else { return Ok(()); } } } else if let Some(dl) = self.settings.keep_alive_expire() { - timer.reset(dl) + timer.reset(dl); + let _ = timer.poll(); } } else { - timer.reset(self.ka_expire) + timer.reset(self.ka_expire); + let _ = timer.poll(); } } Ok(Async::NotReady) => (), diff --git a/src/server/h2.rs b/src/server/h2.rs index 35afa3397..c9e968a39 100644 --- a/src/server/h2.rs +++ b/src/server/h2.rs @@ -60,7 +60,10 @@ where H: HttpHandler + 'static, { pub fn new( - settings: ServiceConfig, io: T, buf: Bytes, keepalive_timer: Option, + settings: ServiceConfig, + io: T, + buf: Bytes, + keepalive_timer: Option, ) -> Self { let addr = io.peer_addr(); let extensions = io.extensions(); @@ -284,10 +287,12 @@ where if self.tasks.is_empty() { return Err(HttpDispatchError::ShutdownTimeout); } else if let Some(dl) = self.settings.keep_alive_expire() { - timer.reset(dl) + timer.reset(dl); + let _ = timer.poll(); } } else { - timer.reset(self.ka_expire) + timer.reset(self.ka_expire); + let _ = timer.poll(); } } Ok(Async::NotReady) => (), @@ -348,8 +353,11 @@ struct Entry { impl Entry { fn new( - parts: Parts, recv: RecvStream, resp: SendResponse, - addr: Option, settings: ServiceConfig, + parts: Parts, + recv: RecvStream, + resp: SendResponse, + addr: Option, + settings: ServiceConfig, extensions: Option>, ) -> Entry where From 62f1c90c8d245d3854e1ff2d7228c0c705aa1eb7 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 7 Nov 2018 21:18:40 -0800 Subject: [PATCH 203/219] update base64 dep --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 4a6e23173..8041c783f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,7 +64,7 @@ cell = ["actix-net/cell"] actix = "^0.7.5" actix-net = "^0.1.1" -base64 = "0.9" +base64 = "0.10" bitflags = "1.0" failure = "^0.1.2" h2 = "0.1" From 9ab586e24e1ea4be98c31e6b0eed09f962fad1f9 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 8 Nov 2018 16:06:23 -0800 Subject: [PATCH 204/219] update actix-net dep --- Cargo.toml | 2 +- tests/test_ws.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 8041c783f..16be8cd41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ cell = ["actix-net/cell"] [dependencies] actix = "^0.7.5" -actix-net = "^0.1.1" +actix-net = "0.2.0" base64 = "0.10" bitflags = "1.0" diff --git a/tests/test_ws.rs b/tests/test_ws.rs index 5a0ce204f..cb46bc7e1 100644 --- a/tests/test_ws.rs +++ b/tests/test_ws.rs @@ -385,10 +385,11 @@ fn test_ws_stopped() { { let (reader, mut writer) = srv.ws().unwrap(); writer.text("text"); + writer.close(None); let (item, _) = srv.execute(reader.into_future()).unwrap(); assert_eq!(item, Some(ws::Message::Text("text".to_owned()))); } - thread::sleep(time::Duration::from_millis(1000)); + thread::sleep(time::Duration::from_millis(100)); assert_eq!(num.load(Ordering::Relaxed), 1); } From 1a0bf32ec76411e6ae017ea680b4dad7db3f0c69 Mon Sep 17 00:00:00 2001 From: imaperson Date: Fri, 9 Nov 2018 01:08:06 +0100 Subject: [PATCH 205/219] Fix unnecessary owned string and change htmlescape in favor of askama_escape (#584) --- Cargo.toml | 2 +- src/fs.rs | 27 +++++++++++++++++++-------- src/lib.rs | 2 +- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 16be8cd41..0dcce54b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,11 +64,11 @@ cell = ["actix-net/cell"] actix = "^0.7.5" actix-net = "0.2.0" +askama_escape = "0.1.0" base64 = "0.10" bitflags = "1.0" failure = "^0.1.2" h2 = "0.1" -htmlescape = "0.3" http = "^0.1.8" httparse = "1.3" log = "0.4" diff --git a/src/fs.rs b/src/fs.rs index 10cdaff7b..51470846e 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -11,10 +11,10 @@ use std::{cmp, io}; #[cfg(unix)] use std::os::unix::fs::MetadataExt; +use askama_escape::{escape as escape_html_entity}; use bytes::Bytes; use futures::{Async, Future, Poll, Stream}; use futures_cpupool::{CpuFuture, CpuPool}; -use htmlescape::encode_minimal as escape_html_entity; use mime; use mime_guess::{get_mime_type, guess_mime_type}; use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET}; @@ -561,6 +561,20 @@ impl Directory { } } +// show file url as relative to static path +macro_rules! encode_file_url { + ($path:ident) => { + utf8_percent_encode(&$path.to_string_lossy(), DEFAULT_ENCODE_SET) + }; +} + +// " -- " & -- & ' -- ' < -- < > -- > / -- / +macro_rules! encode_file_name { + ($entry:ident) => { + escape_html_entity(&$entry.file_name().to_string_lossy()) + }; +} + fn directory_listing( dir: &Directory, req: &HttpRequest, ) -> Result { @@ -575,11 +589,6 @@ fn directory_listing( Ok(p) => base.join(p), Err(_) => continue, }; - // show file url as relative to static path - let file_url = utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET) - .to_string(); - // " -- " & -- & ' -- ' < -- < > -- > - let file_name = escape_html_entity(&entry.file_name().to_string_lossy()); // if file is a directory, add '/' to the end of the name if let Ok(metadata) = entry.metadata() { @@ -587,13 +596,15 @@ fn directory_listing( let _ = write!( body, "
  • {}/
  • ", - file_url, file_name + encode_file_url!(p), + encode_file_name!(entry), ); } else { let _ = write!( body, "
  • {}
  • ", - file_url, file_name + encode_file_url!(p), + encode_file_name!(entry), ); } } else { diff --git a/src/lib.rs b/src/lib.rs index 1ed408099..738153fab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,9 +100,9 @@ extern crate failure; extern crate lazy_static; #[macro_use] extern crate futures; +extern crate askama_escape; extern crate cookie; extern crate futures_cpupool; -extern crate htmlescape; extern crate http as modhttp; extern crate httparse; extern crate language_tags; From 5b7740dee3f0ddd5ff953755b62f1371c95e7489 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 8 Nov 2018 16:12:16 -0800 Subject: [PATCH 206/219] hide ChunkedReadFile --- src/fs.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/fs.rs b/src/fs.rs index 10cdaff7b..4fa112871 100644 --- a/src/fs.rs +++ b/src/fs.rs @@ -472,6 +472,7 @@ impl Responder for NamedFile { } } +#[doc(hidden)] /// A helper created from a `std::fs::File` which reads the file /// chunk-by-chunk on a `CpuPool`. pub struct ChunkedReadFile { @@ -562,7 +563,8 @@ impl Directory { } fn directory_listing( - dir: &Directory, req: &HttpRequest, + dir: &Directory, + req: &HttpRequest, ) -> Result { let index_of = format!("Index of {}", req.path()); let mut body = String::new(); @@ -656,7 +658,8 @@ impl StaticFiles { /// Create new `StaticFiles` instance for specified base directory and /// `CpuPool`. pub fn with_pool>( - dir: T, pool: CpuPool, + dir: T, + pool: CpuPool, ) -> Result, Error> { Self::with_config_pool(dir, pool, DefaultConfig) } @@ -667,7 +670,8 @@ impl StaticFiles { /// /// Identical with `new` but allows to specify configiration to use. pub fn with_config>( - dir: T, config: C, + dir: T, + config: C, ) -> Result, Error> { // use default CpuPool let pool = { DEFAULT_CPUPOOL.lock().clone() }; @@ -678,7 +682,9 @@ impl StaticFiles { /// Create new `StaticFiles` instance for specified base directory with config and /// `CpuPool`. pub fn with_config_pool>( - dir: T, pool: CpuPool, _: C, + dir: T, + pool: CpuPool, + _: C, ) -> Result, Error> { let dir = dir.into().canonicalize()?; @@ -736,7 +742,8 @@ impl StaticFiles { } fn try_handle( - &self, req: &HttpRequest, + &self, + req: &HttpRequest, ) -> Result, Error> { let tail: String = req.match_info().query("tail")?; let relpath = PathBuf::from_param(tail.trim_left_matches('/'))?; From 7065c540e1822fb15d4d040703c314c15ce81e95 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 8 Nov 2018 16:29:43 -0800 Subject: [PATCH 207/219] set nodelay on socket #560 --- CHANGES.md | 14 ++++++++------ src/server/builder.rs | 33 ++++++++++++++++++++++++++------- src/server/service.rs | 2 +- 3 files changed, 35 insertions(+), 14 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1e66cff87..617237417 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,18 +2,20 @@ ## [0.7.14] - 2018-11-x +### Added + +* Add method to configure custom error handler to `Query` and `Path` extractors. + +* Add method to configure `SameSite` option in `CookieIdentityPolicy`. + + ### Fixed * Fix keep-alive timer reset * HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549 - -### Added - -* Add method to configure custom error handler to `Query` and `Path` extractors. - -* Add method to configure `SameSite` option in `CookieIdentityPolicy`. +* Set nodelay for socket #560 ## [0.7.13] - 2018-10-14 diff --git a/src/server/builder.rs b/src/server/builder.rs index 4f159af13..ea3638f10 100644 --- a/src/server/builder.rs +++ b/src/server/builder.rs @@ -9,14 +9,20 @@ use super::acceptor::{ }; use super::error::AcceptorError; use super::handler::IntoHttpHandler; -use super::service::HttpService; +use super::service::{HttpService, StreamConfiguration}; use super::settings::{ServerSettings, ServiceConfig}; use super::KeepAlive; pub(crate) trait ServiceProvider { fn register( - &self, server: Server, lst: net::TcpListener, host: String, - addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64, + &self, + server: Server, + lst: net::TcpListener, + host: String, + addr: net::SocketAddr, + keep_alive: KeepAlive, + secure: bool, + client_timeout: u64, client_shutdown: u64, ) -> Server; } @@ -43,8 +49,13 @@ where } fn finish( - &self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, - client_timeout: u64, client_shutdown: u64, + &self, + host: String, + addr: net::SocketAddr, + keep_alive: KeepAlive, + secure: bool, + client_timeout: u64, + client_shutdown: u64, ) -> impl ServiceFactory { let factory = self.factory.clone(); let acceptor = self.acceptor.clone(); @@ -65,6 +76,7 @@ where acceptor.create(), )).map_err(|_| ()) .map_init_err(|_| ()) + .and_then(StreamConfiguration::new().nodelay(true)) .and_then( HttpService::new(settings) .map_init_err(|_| ()) @@ -76,6 +88,7 @@ where TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service)) .map_err(|_| ()) .map_init_err(|_| ()) + .and_then(StreamConfiguration::new().nodelay(true)) .and_then( HttpService::new(settings) .map_init_err(|_| ()) @@ -95,8 +108,14 @@ where H: IntoHttpHandler, { fn register( - &self, server: Server, lst: net::TcpListener, host: String, - addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64, + &self, + server: Server, + lst: net::TcpListener, + host: String, + addr: net::SocketAddr, + keep_alive: KeepAlive, + secure: bool, + client_timeout: u64, client_shutdown: u64, ) -> Server { server.listen2( diff --git a/src/server/service.rs b/src/server/service.rs index e3402e305..cd4b3d3fa 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -88,7 +88,7 @@ where Ok(Async::Ready(())) } - fn call(&mut self, req: Self::Request) -> Self::Future { + fn call(&mut self, mut req: Self::Request) -> Self::Future { HttpChannel::new(self.settings.clone(), req) } } From 61b1030882781f93c0228b5605041a197e5eb8f5 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 8 Nov 2018 20:35:47 -0800 Subject: [PATCH 208/219] Fix websockets connection drop if request contains content-length header #567 --- CHANGES.md | 2 ++ Cargo.toml | 4 ++-- src/server/h1decoder.rs | 31 +++++++++++++++++++++++++------ src/server/service.rs | 2 +- 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 617237417..b1717ea92 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -11,6 +11,8 @@ ### Fixed +* Fix websockets connection drop if request contains "content-length" header #567 + * Fix keep-alive timer reset * HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549 diff --git a/Cargo.toml b/Cargo.toml index 0dcce54b0..4abb64e27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,8 +61,8 @@ flate2-rust = ["flate2/rust_backend"] cell = ["actix-net/cell"] [dependencies] -actix = "^0.7.5" -actix-net = "0.2.0" +actix = "0.7.6" +actix-net = "0.2.1" askama_escape = "0.1.0" base64 = "0.10" diff --git a/src/server/h1decoder.rs b/src/server/h1decoder.rs index 434dc42df..10f7e68a0 100644 --- a/src/server/h1decoder.rs +++ b/src/server/h1decoder.rs @@ -43,7 +43,9 @@ impl H1Decoder { } pub fn decode( - &mut self, src: &mut BytesMut, settings: &ServiceConfig, + &mut self, + src: &mut BytesMut, + settings: &ServiceConfig, ) -> Result, DecoderError> { // read payload if self.decoder.is_some() { @@ -80,7 +82,9 @@ impl H1Decoder { } fn parse_message( - &self, buf: &mut BytesMut, settings: &ServiceConfig, + &self, + buf: &mut BytesMut, + settings: &ServiceConfig, ) -> Poll<(Request, Option), ParseError> { // Parse http message let mut has_upgrade = false; @@ -178,6 +182,13 @@ impl H1Decoder { } header::UPGRADE => { has_upgrade = true; + // check content-length, some clients (dart) + // sends "content-length: 0" with websocket upgrade + if let Ok(val) = value.to_str() { + if val == "websocket" { + content_length = None; + } + } } _ => (), } @@ -221,7 +232,9 @@ pub(crate) struct HeaderIndex { impl HeaderIndex { pub(crate) fn record( - bytes: &[u8], headers: &[httparse::Header], indices: &mut [HeaderIndex], + bytes: &[u8], + headers: &[httparse::Header], + indices: &mut [HeaderIndex], ) { let bytes_ptr = bytes.as_ptr() as usize; for (header, indices) in headers.iter().zip(indices.iter_mut()) { @@ -369,7 +382,10 @@ macro_rules! byte ( impl ChunkedState { fn step( - &self, body: &mut BytesMut, size: &mut u64, buf: &mut Option, + &self, + body: &mut BytesMut, + size: &mut u64, + buf: &mut Option, ) -> Poll { use self::ChunkedState::*; match *self { @@ -432,7 +448,8 @@ impl ChunkedState { } } fn read_size_lf( - rdr: &mut BytesMut, size: &mut u64, + rdr: &mut BytesMut, + size: &mut u64, ) -> Poll { match byte!(rdr) { b'\n' if *size > 0 => Ok(Async::Ready(ChunkedState::Body)), @@ -445,7 +462,9 @@ impl ChunkedState { } fn read_body( - rdr: &mut BytesMut, rem: &mut u64, buf: &mut Option, + rdr: &mut BytesMut, + rem: &mut u64, + buf: &mut Option, ) -> Poll { trace!("Chunked read, remaining={:?}", rem); diff --git a/src/server/service.rs b/src/server/service.rs index cd4b3d3fa..e3402e305 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -88,7 +88,7 @@ where Ok(Async::Ready(())) } - fn call(&mut self, mut req: Self::Request) -> Self::Future { + fn call(&mut self, req: Self::Request) -> Self::Future { HttpChannel::new(self.settings.clone(), req) } } From 1ef0eed0bde2d66ea38762e7a8d1ec65b68e2cf4 Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Thu, 8 Nov 2018 20:46:13 -0800 Subject: [PATCH 209/219] do not stop on keep-alive timer if sink is not completly flushed --- src/server/h1.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/h1.rs b/src/server/h1.rs index 07f773eba..f491ba597 100644 --- a/src/server/h1.rs +++ b/src/server/h1.rs @@ -286,7 +286,7 @@ where } if timer.deadline() >= self.ka_expire { // check for any outstanding request handling - if self.tasks.is_empty() { + if self.tasks.is_empty() && self.flags.contains(Flags::FLUSHED) { if !self.flags.contains(Flags::STARTED) { // timeout on first request (slow request) return 408 trace!("Slow request timeout"); From cd9901c928bfb7b016484f8c0c81c3629eca3e9f Mon Sep 17 00:00:00 2001 From: Nikolay Kim Date: Wed, 14 Nov 2018 16:24:01 -0800 Subject: [PATCH 210/219] prepare release --- CHANGES.md | 2 +- Cargo.toml | 2 +- src/client/parser.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b1717ea92..efeaadf0a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.14] - 2018-11-x +## [0.7.14] - 2018-11-14 ### Added diff --git a/Cargo.toml b/Cargo.toml index 4abb64e27..41f2e6676 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ cell = ["actix-net/cell"] [dependencies] actix = "0.7.6" -actix-net = "0.2.1" +actix-net = "0.2.2" askama_escape = "0.1.0" base64 = "0.10" diff --git a/src/client/parser.rs b/src/client/parser.rs index 11252fa52..92a7abe13 100644 --- a/src/client/parser.rs +++ b/src/client/parser.rs @@ -56,7 +56,7 @@ impl HttpResponseParser { return Ok(Async::Ready(msg)); } Async::NotReady => { - if buf.capacity() >= MAX_BUFFER_SIZE { + if buf.len() >= MAX_BUFFER_SIZE { return Err(HttpResponseParserError::Error( ParseError::TooLarge, )); From 6a9317847979a17fa8d39f05d15188cbb7dde902 Mon Sep 17 00:00:00 2001 From: Huston Bokinsky Date: Sat, 17 Nov 2018 15:25:44 -0800 Subject: [PATCH 211/219] Complete error helper functions. --- src/error.rs | 312 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 312 insertions(+) diff --git a/src/error.rs b/src/error.rs index 76c8e79ec..1766c1523 100644 --- a/src/error.rs +++ b/src/error.rs @@ -759,6 +759,16 @@ where InternalError::new(err, StatusCode::UNAUTHORIZED).into() } +/// Helper function that creates wrapper of any error and generate +/// *PAYMENT_REQUIRED* response. +#[allow(non_snake_case)] +pub fn ErrorPaymentRequired(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::PAYMENT_REQUIRED).into() +} + /// Helper function that creates wrapper of any error and generate *FORBIDDEN* /// response. #[allow(non_snake_case)] @@ -789,6 +799,26 @@ where InternalError::new(err, StatusCode::METHOD_NOT_ALLOWED).into() } +/// Helper function that creates wrapper of any error and generate *NOT +/// ACCEPTABLE* response. +#[allow(non_snake_case)] +pub fn ErrorNotAcceptable(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::NOT_ACCEPTABLE).into() +} + +/// Helper function that creates wrapper of any error and generate *PROXY +/// AUTHENTICATION REQUIRED* response. +#[allow(non_snake_case)] +pub fn ErrorProxyAuthenticationRequired(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::PROXY_AUTHENTICATION_REQUIRED).into() +} + /// Helper function that creates wrapper of any error and generate *REQUEST /// TIMEOUT* response. #[allow(non_snake_case)] @@ -819,6 +849,16 @@ where InternalError::new(err, StatusCode::GONE).into() } +/// Helper function that creates wrapper of any error and generate *LENGTH +/// REQUIRED* response. +#[allow(non_snake_case)] +pub fn ErrorLengthRequired(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::LENGTH_REQUIRED).into() +} + /// Helper function that creates wrapper of any error and generate /// *PRECONDITION FAILED* response. #[allow(non_snake_case)] @@ -829,6 +869,46 @@ where InternalError::new(err, StatusCode::PRECONDITION_FAILED).into() } +/// Helper function that creates wrapper of any error and generate +/// *PAYLOAD TOO LARGE* response. +#[allow(non_snake_case)] +pub fn ErrorPayloadTooLarge(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::PAYLOAD_TOO_LARGE).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *URI TOO LONG* response. +#[allow(non_snake_case)] +pub fn ErrorUriTooLong(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::URI_TOO_LONG).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *UNSUPPORTED MEDIA TYPE* response. +#[allow(non_snake_case)] +pub fn ErrorUnsupportedMediaType(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::UNSUPPORTED_MEDIA_TYPE).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *RANGE NOT SATISFIABLE* response. +#[allow(non_snake_case)] +pub fn ErrorRangeNotSatisfiable(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::RANGE_NOT_SATISFIABLE).into() +} + /// Helper function that creates wrapper of any error and generate /// *EXPECTATION FAILED* response. #[allow(non_snake_case)] @@ -839,6 +919,106 @@ where InternalError::new(err, StatusCode::EXPECTATION_FAILED).into() } +/// Helper function that creates wrapper of any error and generate +/// *IM A TEAPOT* response. +#[allow(non_snake_case)] +pub fn ErrorImATeapot(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::IM_A_TEAPOT).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *MISDIRECTED REQUEST* response. +#[allow(non_snake_case)] +pub fn ErrorMisdirectedRequest(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::MISDIRECTED_REQUEST).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *UNPROCESSABLE ENTITY* response. +#[allow(non_snake_case)] +pub fn ErrorUnprocessableEntity(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::UNPROCESSABLE_ENTITY).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *LOCKED* response. +#[allow(non_snake_case)] +pub fn ErrorLocked(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::LOCKED).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *FAILED DEPENDENCY* response. +#[allow(non_snake_case)] +pub fn ErrorFailedDependency(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::FAILED_DEPENDENCY).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *UPGRADE REQUIRED* response. +#[allow(non_snake_case)] +pub fn ErrorUpgradeRequired(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::UPGRADE_REQUIRED).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *PRECONDITION REQUIRED* response. +#[allow(non_snake_case)] +pub fn ErrorPreconditionRequired(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::PRECONDITION_REQUIRED).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *TOO MANY REQUESTS* response. +#[allow(non_snake_case)] +pub fn ErrorTooManyRequests(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::TOO_MANY_REQUESTS).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *REQUEST HEADER FIELDS TOO LARGE* response. +#[allow(non_snake_case)] +pub fn ErrorRequestHeaderFieldsTooLarge(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE).into() +} + +/// Helper function that creates wrapper of any error and generate +/// *UNAVAILABLE FOR LEGAL REASONS* response. +#[allow(non_snake_case)] +pub fn ErrorUnavailableForLegalReasons(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS).into() +} + /// Helper function that creates wrapper of any error and /// generate *INTERNAL SERVER ERROR* response. #[allow(non_snake_case)] @@ -889,6 +1069,66 @@ where InternalError::new(err, StatusCode::GATEWAY_TIMEOUT).into() } +/// Helper function that creates wrapper of any error and +/// generate *HTTP VERSION NOT SUPPORTED* response. +#[allow(non_snake_case)] +pub fn ErrorHttpVersionNotSupported(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::HTTP_VERSION_NOT_SUPPORTED).into() +} + +/// Helper function that creates wrapper of any error and +/// generate *VARIANT ALSO NEGOTIATES* response. +#[allow(non_snake_case)] +pub fn ErrorVariantAlsoNegotiates(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::VARIANT_ALSO_NEGOTIATES).into() +} + +/// Helper function that creates wrapper of any error and +/// generate *INSUFFICIENT STORAGE* response. +#[allow(non_snake_case)] +pub fn ErrorInsufficientStorage(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::INSUFFICIENT_STORAGE).into() +} + +/// Helper function that creates wrapper of any error and +/// generate *LOOP DETECTED* response. +#[allow(non_snake_case)] +pub fn ErrorLoopDetected(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::LOOP_DETECTED).into() +} + +/// Helper function that creates wrapper of any error and +/// generate *NOT EXTENDED* response. +#[allow(non_snake_case)] +pub fn ErrorNotExtended(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::NOT_EXTENDED).into() +} + +/// Helper function that creates wrapper of any error and +/// generate *NETWORK AUTHENTICATION REQUIRED* response. +#[allow(non_snake_case)] +pub fn ErrorNetworkAuthenticationRequired(err: T) -> Error +where + T: Send + Sync + fmt::Debug + fmt::Display + 'static, +{ + InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into() +} + #[cfg(test)] mod tests { use super::*; @@ -1068,6 +1308,9 @@ mod tests { let r: HttpResponse = ErrorUnauthorized("err").into(); assert_eq!(r.status(), StatusCode::UNAUTHORIZED); + let r: HttpResponse = ErrorPaymentRequired("err").into(); + assert_eq!(r.status(), StatusCode::PAYMENT_REQUIRED); + let r: HttpResponse = ErrorForbidden("err").into(); assert_eq!(r.status(), StatusCode::FORBIDDEN); @@ -1077,6 +1320,12 @@ mod tests { let r: HttpResponse = ErrorMethodNotAllowed("err").into(); assert_eq!(r.status(), StatusCode::METHOD_NOT_ALLOWED); + let r: HttpResponse = ErrorNotAcceptable("err").into(); + assert_eq!(r.status(), StatusCode::NOT_ACCEPTABLE); + + let r: HttpResponse = ErrorProxyAuthenticationRequired("err").into(); + assert_eq!(r.status(), StatusCode::PROXY_AUTHENTICATION_REQUIRED); + let r: HttpResponse = ErrorRequestTimeout("err").into(); assert_eq!(r.status(), StatusCode::REQUEST_TIMEOUT); @@ -1086,12 +1335,57 @@ mod tests { let r: HttpResponse = ErrorGone("err").into(); assert_eq!(r.status(), StatusCode::GONE); + let r: HttpResponse = ErrorLengthRequired("err").into(); + assert_eq!(r.status(), StatusCode::LENGTH_REQUIRED); + let r: HttpResponse = ErrorPreconditionFailed("err").into(); assert_eq!(r.status(), StatusCode::PRECONDITION_FAILED); + let r: HttpResponse = ErrorPayloadTooLarge("err").into(); + assert_eq!(r.status(), StatusCode::PAYLOAD_TOO_LARGE); + + let r: HttpResponse = ErrorUriTooLong("err").into(); + assert_eq!(r.status(), StatusCode::URI_TOO_LONG); + + let r: HttpResponse = ErrorUnsupportedMediaType("err").into(); + assert_eq!(r.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE); + + let r: HttpResponse = ErrorRangeNotSatisfiable("err").into(); + assert_eq!(r.status(), StatusCode::RANGE_NOT_SATISFIABLE); + let r: HttpResponse = ErrorExpectationFailed("err").into(); assert_eq!(r.status(), StatusCode::EXPECTATION_FAILED); + let r: HttpResponse = ErrorImATeapot("err").into(); + assert_eq!(r.status(), StatusCode::IM_A_TEAPOT); + + let r: HttpResponse = ErrorMisdirectedRequest("err").into(); + assert_eq!(r.status(), StatusCode::MISDIRECTED_REQUEST); + + let r: HttpResponse = ErrorUnprocessableEntity("err").into(); + assert_eq!(r.status(), StatusCode::UNPROCESSABLE_ENTITY); + + let r: HttpResponse = ErrorLocked("err").into(); + assert_eq!(r.status(), StatusCode::LOCKED); + + let r: HttpResponse = ErrorFailedDependency("err").into(); + assert_eq!(r.status(), StatusCode::FAILED_DEPENDENCY); + + let r: HttpResponse = ErrorUpgradeRequired("err").into(); + assert_eq!(r.status(), StatusCode::UPGRADE_REQUIRED); + + let r: HttpResponse = ErrorPreconditionRequired("err").into(); + assert_eq!(r.status(), StatusCode::PRECONDITION_REQUIRED); + + let r: HttpResponse = ErrorTooManyRequests("err").into(); + assert_eq!(r.status(), StatusCode::TOO_MANY_REQUESTS); + + let r: HttpResponse = ErrorRequestHeaderFieldsTooLarge("err").into(); + assert_eq!(r.status(), StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE); + + let r: HttpResponse = ErrorUnavailableForLegalReasons("err").into(); + assert_eq!(r.status(), StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS); + let r: HttpResponse = ErrorInternalServerError("err").into(); assert_eq!(r.status(), StatusCode::INTERNAL_SERVER_ERROR); @@ -1106,5 +1400,23 @@ mod tests { let r: HttpResponse = ErrorGatewayTimeout("err").into(); assert_eq!(r.status(), StatusCode::GATEWAY_TIMEOUT); + + let r: HttpResponse = ErrorHttpVersionNotSupported("err").into(); + assert_eq!(r.status(), StatusCode::HTTP_VERSION_NOT_SUPPORTED); + + let r: HttpResponse = ErrorVariantAlsoNegotiates("err").into(); + assert_eq!(r.status(), StatusCode::VARIANT_ALSO_NEGOTIATES); + + let r: HttpResponse = ErrorInsufficientStorage("err").into(); + assert_eq!(r.status(), StatusCode::INSUFFICIENT_STORAGE); + + let r: HttpResponse = ErrorLoopDetected("err").into(); + assert_eq!(r.status(), StatusCode::LOOP_DETECTED); + + let r: HttpResponse = ErrorNotExtended("err").into(); + assert_eq!(r.status(), StatusCode::NOT_EXTENDED); + + let r: HttpResponse = ErrorNetworkAuthenticationRequired("err").into(); + assert_eq!(r.status(), StatusCode::NETWORK_AUTHENTICATION_REQUIRED); } } From 389cb13cd63704a024d7e668592c2aca06bcd876 Mon Sep 17 00:00:00 2001 From: Douman Date: Tue, 20 Nov 2018 23:06:38 +0300 Subject: [PATCH 212/219] Export PathConfig and QueryConfig Closes #597 --- CHANGES.md | 6 ++++++ src/lib.rs | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index efeaadf0a..cb4488833 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,11 @@ # Changes +## [0.7.15] - 2018-xx-xx + +## Changed + +* `QueryConfig` and `PathConfig` are made public. + ## [0.7.14] - 2018-11-14 ### Added diff --git a/src/lib.rs b/src/lib.rs index 738153fab..f8326886f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -255,7 +255,7 @@ pub mod dev { pub use body::BodyStream; pub use context::Drain; - pub use extractor::{FormConfig, PayloadConfig}; + pub use extractor::{FormConfig, PayloadConfig, QueryConfig, PathConfig}; pub use handler::{AsyncResult, Handler}; pub use httpmessage::{MessageBody, Readlines, UrlEncoded}; pub use httpresponse::HttpResponseBuilder; From 9aab382ea89395fcc627c5375ddd8721cc47c514 Mon Sep 17 00:00:00 2001 From: Douman Date: Thu, 22 Nov 2018 19:20:07 +0300 Subject: [PATCH 213/219] Allow user to provide addr to custom resolver We basically swaps Addr with Recipient to enable user to use custom resolver --- CHANGES.md | 2 ++ Cargo.toml | 2 +- src/client/connector.rs | 12 +++++++----- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index cb4488833..2e028d6db 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,6 +4,8 @@ ## Changed +* `ClientConnector::resolver` now accepts `Into` instead of `Addr`. It enables user to implement own resolver. + * `QueryConfig` and `PathConfig` are made public. ## [0.7.14] - 2018-11-14 diff --git a/Cargo.toml b/Cargo.toml index 41f2e6676..e3fbd4e38 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ flate2-rust = ["flate2/rust_backend"] cell = ["actix-net/cell"] [dependencies] -actix = "0.7.6" +actix = "0.7.7" actix-net = "0.2.2" askama_escape = "0.1.0" diff --git a/src/client/connector.rs b/src/client/connector.rs index 3990c955c..72132bc67 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -5,7 +5,7 @@ use std::{fmt, io, mem, time}; use actix::resolver::{Connect as ResolveConnect, Resolver, ResolverError}; use actix::{ - fut, Actor, ActorFuture, ActorResponse, Addr, AsyncContext, Context, + fut, Actor, ActorFuture, ActorResponse, AsyncContext, Context, ContextFutureSpawner, Handler, Message, Recipient, StreamHandler, Supervised, SystemService, WrapFuture, }; @@ -220,7 +220,7 @@ pub struct ClientConnector { acq_tx: mpsc::UnboundedSender, acq_rx: Option>, - resolver: Option>, + resolver: Option>, conn_lifetime: Duration, conn_keep_alive: Duration, limit: usize, @@ -239,7 +239,7 @@ impl Actor for ClientConnector { fn started(&mut self, ctx: &mut Self::Context) { if self.resolver.is_none() { - self.resolver = Some(Resolver::from_registry()) + self.resolver = Some(Resolver::from_registry().recipient()) } self.collect_periodic(ctx); ctx.add_stream(self.acq_rx.take().unwrap()); @@ -503,8 +503,10 @@ impl ClientConnector { } /// Use custom resolver actor - pub fn resolver(mut self, addr: Addr) -> Self { - self.resolver = Some(addr); + /// + /// By default actix's Resolver is used. + pub fn resolver>>(mut self, addr: A) -> Self { + self.resolver = Some(addr.into()); self } From c386353337cff83626941fca2b58628845b440f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois?= Date: Sat, 24 Nov 2018 14:54:11 +0100 Subject: [PATCH 214/219] decode reserved characters when extracting path with configuration (#577) * decode reserved characters when extracting path with configuration * remove useless clone * add a method to get decoded parameter by name --- CHANGES.md | 9 ++++ MIGRATION.md | 28 +++++++++++++ src/de.rs | 70 ++++++++++++++++++------------- src/extractor.rs | 76 ++++++++++++++++++++++++++++++++- src/param.rs | 33 ++++++++++++++- src/uri.rs | 95 ++++++++++++++++++++++-------------------- tests/test_handlers.rs | 2 +- 7 files changed, 234 insertions(+), 79 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2e028d6db..902a84f69 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,12 @@ * `QueryConfig` and `PathConfig` are made public. +### Added + +* By default, `Path` extractor now percent decode all characters. This behaviour can be disabled + with `PathConfig::default().disable_decoding()` + + ## [0.7.14] - 2018-11-14 ### Added @@ -16,6 +22,9 @@ * Add method to configure `SameSite` option in `CookieIdentityPolicy`. +* By default, `Path` extractor now percent decode all characters. This behaviour can be disabled + with `PathConfig::default().disable_decoding()` + ### Fixed diff --git a/MIGRATION.md b/MIGRATION.md index 3c0bdd943..26a314240 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,3 +1,31 @@ +## 0.7.15 + +* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in + your routes, you should use `%20`. + + instead of + + ```rust + fn main() { + let app = App::new().resource("/my index", |r| { + r.method(http::Method::GET) + .with(index); + }); + } + ``` + + use + + ```rust + fn main() { + let app = App::new().resource("/my%20index", |r| { + r.method(http::Method::GET) + .with(index); + }); + } + ``` + + ## 0.7.4 * `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple diff --git a/src/de.rs b/src/de.rs index 59ab79ba9..05f8914f8 100644 --- a/src/de.rs +++ b/src/de.rs @@ -1,7 +1,10 @@ +use std::rc::Rc; + use serde::de::{self, Deserializer, Error as DeError, Visitor}; use httprequest::HttpRequest; use param::ParamsIter; +use uri::RESERVED_QUOTER; macro_rules! unsupported_type { ($trait_fn:ident, $name:expr) => { @@ -13,6 +16,20 @@ macro_rules! unsupported_type { }; } +macro_rules! percent_decode_if_needed { + ($value:expr, $decode:expr) => { + if $decode { + if let Some(ref mut value) = RESERVED_QUOTER.requote($value.as_bytes()) { + Rc::make_mut(value).parse() + } else { + $value.parse() + } + } else { + $value.parse() + } + } +} + macro_rules! parse_single_value { ($trait_fn:ident, $visit_fn:ident, $tp:tt) => { fn $trait_fn(self, visitor: V) -> Result @@ -23,11 +40,11 @@ macro_rules! parse_single_value { format!("wrong number of parameters: {} expected 1", self.req.match_info().len()).as_str())) } else { - let v = self.req.match_info()[0].parse().map_err( - |_| de::value::Error::custom( - format!("can not parse {:?} to a {}", - &self.req.match_info()[0], $tp)))?; - visitor.$visit_fn(v) + let v_parsed = percent_decode_if_needed!(&self.req.match_info()[0], self.decode) + .map_err(|_| de::value::Error::custom( + format!("can not parse {:?} to a {}", &self.req.match_info()[0], $tp) + ))?; + visitor.$visit_fn(v_parsed) } } } @@ -35,11 +52,12 @@ macro_rules! parse_single_value { pub struct PathDeserializer<'de, S: 'de> { req: &'de HttpRequest, + decode: bool, } impl<'de, S: 'de> PathDeserializer<'de, S> { - pub fn new(req: &'de HttpRequest) -> Self { - PathDeserializer { req } + pub fn new(req: &'de HttpRequest, decode: bool) -> Self { + PathDeserializer { req, decode } } } @@ -53,6 +71,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> { visitor.visit_map(ParamsDeserializer { params: self.req.match_info().iter(), current: None, + decode: self.decode, }) } @@ -107,6 +126,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> { } else { visitor.visit_seq(ParamsSeq { params: self.req.match_info().iter(), + decode: self.decode, }) } } @@ -128,6 +148,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> { } else { visitor.visit_seq(ParamsSeq { params: self.req.match_info().iter(), + decode: self.decode, }) } } @@ -141,28 +162,13 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> { Err(de::value::Error::custom("unsupported type: enum")) } - fn deserialize_str(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - if self.req.match_info().len() != 1 { - Err(de::value::Error::custom( - format!( - "wrong number of parameters: {} expected 1", - self.req.match_info().len() - ).as_str(), - )) - } else { - visitor.visit_str(&self.req.match_info()[0]) - } - } - fn deserialize_seq(self, visitor: V) -> Result where V: Visitor<'de>, { visitor.visit_seq(ParamsSeq { params: self.req.match_info().iter(), + decode: self.decode, }) } @@ -184,13 +190,16 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> { parse_single_value!(deserialize_f32, visit_f32, "f32"); parse_single_value!(deserialize_f64, visit_f64, "f64"); parse_single_value!(deserialize_string, visit_string, "String"); + parse_single_value!(deserialize_str, visit_string, "String"); parse_single_value!(deserialize_byte_buf, visit_string, "String"); parse_single_value!(deserialize_char, visit_char, "char"); + } struct ParamsDeserializer<'de> { params: ParamsIter<'de>, current: Option<(&'de str, &'de str)>, + decode: bool, } impl<'de> de::MapAccess<'de> for ParamsDeserializer<'de> { @@ -212,7 +221,7 @@ impl<'de> de::MapAccess<'de> for ParamsDeserializer<'de> { V: de::DeserializeSeed<'de>, { if let Some((_, value)) = self.current.take() { - seed.deserialize(Value { value }) + seed.deserialize(Value { value, decode: self.decode }) } else { Err(de::value::Error::custom("unexpected item")) } @@ -252,16 +261,18 @@ macro_rules! parse_value { fn $trait_fn(self, visitor: V) -> Result where V: Visitor<'de> { - let v = self.value.parse().map_err( - |_| de::value::Error::custom( - format!("can not parse {:?} to a {}", self.value, $tp)))?; - visitor.$visit_fn(v) + let v_parsed = percent_decode_if_needed!(&self.value, self.decode) + .map_err(|_| de::value::Error::custom( + format!("can not parse {:?} to a {}", &self.value, $tp) + ))?; + visitor.$visit_fn(v_parsed) } } } struct Value<'de> { value: &'de str, + decode: bool, } impl<'de> Deserializer<'de> for Value<'de> { @@ -377,6 +388,7 @@ impl<'de> Deserializer<'de> for Value<'de> { struct ParamsSeq<'de> { params: ParamsIter<'de>, + decode: bool, } impl<'de> de::SeqAccess<'de> for ParamsSeq<'de> { @@ -387,7 +399,7 @@ impl<'de> de::SeqAccess<'de> for ParamsSeq<'de> { T: de::DeserializeSeed<'de>, { match self.params.next() { - Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)), + Some(item) => Ok(Some(seed.deserialize(Value { value: item.1, decode: self.decode })?)), None => Ok(None), } } diff --git a/src/extractor.rs b/src/extractor.rs index 45e29ace0..717e0f6c1 100644 --- a/src/extractor.rs +++ b/src/extractor.rs @@ -18,7 +18,8 @@ use httpmessage::{HttpMessage, MessageBody, UrlEncoded}; use httprequest::HttpRequest; #[derive(PartialEq, Eq, PartialOrd, Ord)] -/// Extract typed information from the request's path. +/// Extract typed information from the request's path. Information from the path is +/// URL decoded. Decoding of special characters can be disabled through `PathConfig`. /// /// ## Example /// @@ -119,7 +120,7 @@ where let req = req.clone(); let req2 = req.clone(); let err = Rc::clone(&cfg.ehandler); - de::Deserialize::deserialize(PathDeserializer::new(&req)) + de::Deserialize::deserialize(PathDeserializer::new(&req, cfg.decode)) .map_err(move |e| (*err)(e, &req2)) .map(|inner| Path { inner }) } @@ -149,6 +150,7 @@ where /// ``` pub struct PathConfig { ehandler: Rc) -> Error>, + decode: bool, } impl PathConfig { /// Set custom error handler @@ -159,12 +161,20 @@ impl PathConfig { self.ehandler = Rc::new(f); self } + + /// Disable decoding of URL encoded special charaters from the path + pub fn disable_decoding(&mut self) -> &mut Self + { + self.decode = false; + self + } } impl Default for PathConfig { fn default() -> Self { PathConfig { ehandler: Rc::new(|e, _| ErrorNotFound(e)), + decode: true, } } } @@ -1090,6 +1100,68 @@ mod tests { assert_eq!(*Path::::from_request(&req, &&PathConfig::default()).unwrap(), 32); } + #[test] + fn test_extract_path_decode() { + let mut router = Router::<()>::default(); + router.register_resource(Resource::new(ResourceDef::new("/{value}/"))); + + macro_rules! test_single_value { + ($value:expr, $expected:expr) => { + { + let req = TestRequest::with_uri($value).finish(); + let info = router.recognize(&req, &(), 0); + let req = req.with_route_info(info); + assert_eq!(*Path::::from_request(&req, &PathConfig::default()).unwrap(), $expected); + } + } + } + + test_single_value!("/%25/", "%"); + test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+="); + test_single_value!("/%2B/", "+"); + test_single_value!("/%252B/", "%2B"); + test_single_value!("/%2F/", "/"); + test_single_value!("/%252F/", "%2F"); + test_single_value!("/http%3A%2F%2Flocalhost%3A80%2Ffoo/", "http://localhost:80/foo"); + test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog"); + test_single_value!( + "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/", + "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog" + ); + + let req = TestRequest::with_uri("/%25/7/?id=test").finish(); + + let mut router = Router::<()>::default(); + router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/"))); + let info = router.recognize(&req, &(), 0); + let req = req.with_route_info(info); + + let s = Path::::from_request(&req, &PathConfig::default()).unwrap(); + assert_eq!(s.key, "%"); + assert_eq!(s.value, 7); + + let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap(); + assert_eq!(s.0, "%"); + assert_eq!(s.1, "7"); + } + + #[test] + fn test_extract_path_no_decode() { + let mut router = Router::<()>::default(); + router.register_resource(Resource::new(ResourceDef::new("/{value}/"))); + + let req = TestRequest::with_uri("/%25/").finish(); + let info = router.recognize(&req, &(), 0); + let req = req.with_route_info(info); + assert_eq!( + *Path::::from_request( + &req, + &&PathConfig::default().disable_decoding() + ).unwrap(), + "%25" + ); + } + #[test] fn test_tuple_extract() { let mut router = Router::<()>::default(); diff --git a/src/param.rs b/src/param.rs index d0664df99..a3f602599 100644 --- a/src/param.rs +++ b/src/param.rs @@ -8,7 +8,7 @@ use http::StatusCode; use smallvec::SmallVec; use error::{InternalError, ResponseError, UriSegmentError}; -use uri::Url; +use uri::{Url, RESERVED_QUOTER}; /// A trait to abstract the idea of creating a new instance of a type from a /// path parameter. @@ -103,6 +103,17 @@ impl Params { } } + /// Get URL-decoded matched parameter by name without type conversion + pub fn get_decoded(&self, key: &str) -> Option { + self.get(key).map(|value| { + if let Some(ref mut value) = RESERVED_QUOTER.requote(value.as_bytes()) { + Rc::make_mut(value).to_string() + } else { + value.to_string() + } + }) + } + /// Get unprocessed part of path pub fn unprocessed(&self) -> &str { &self.url.path()[(self.tail as usize)..] @@ -300,4 +311,24 @@ mod tests { Ok(PathBuf::from_iter(vec!["seg2"])) ); } + + #[test] + fn test_get_param_by_name() { + let mut params = Params::new(); + params.add_static("item1", "path"); + params.add_static("item2", "http%3A%2F%2Flocalhost%3A80%2Ffoo"); + + assert_eq!(params.get("item0"), None); + assert_eq!(params.get_decoded("item0"), None); + assert_eq!(params.get("item1"), Some("path")); + assert_eq!(params.get_decoded("item1"), Some("path".to_string())); + assert_eq!( + params.get("item2"), + Some("http%3A%2F%2Flocalhost%3A80%2Ffoo") + ); + assert_eq!( + params.get_decoded("item2"), + Some("http://localhost:80/foo".to_string()) + ); + } } diff --git a/src/uri.rs b/src/uri.rs index 881cf20a8..c87cb3d5b 100644 --- a/src/uri.rs +++ b/src/uri.rs @@ -1,25 +1,12 @@ use http::Uri; use std::rc::Rc; -#[allow(dead_code)] -const GEN_DELIMS: &[u8] = b":/?#[]@"; -#[allow(dead_code)] -const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,"; -#[allow(dead_code)] -const SUB_DELIMS: &[u8] = b"!$'()*,+?=;"; -#[allow(dead_code)] -const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;"; -#[allow(dead_code)] -const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz - ABCDEFGHIJKLMNOPQRSTUVWXYZ - 1234567890 - -._~"; -const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz - ABCDEFGHIJKLMNOPQRSTUVWXYZ - 1234567890 - -._~ - !$'()*,"; -const QS: &[u8] = b"+&=;b"; +// https://tools.ietf.org/html/rfc3986#section-2.2 +const RESERVED_PLUS_EXTRA: &[u8] = b":/?#[]@!$&'()*,+?;=%^ <>\"\\`{}|"; + +// https://tools.ietf.org/html/rfc3986#section-2.3 +const UNRESERVED: &[u8] = + b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-._~"; #[inline] fn bit_at(array: &[u8], ch: u8) -> bool { @@ -32,7 +19,8 @@ fn set_bit(array: &mut [u8], ch: u8) { } lazy_static! { - static ref DEFAULT_QUOTER: Quoter = { Quoter::new(b"@:", b"/+") }; + static ref UNRESERVED_QUOTER: Quoter = { Quoter::new(UNRESERVED) }; + pub(crate) static ref RESERVED_QUOTER: Quoter = { Quoter::new(RESERVED_PLUS_EXTRA) }; } #[derive(Default, Clone, Debug)] @@ -43,7 +31,7 @@ pub(crate) struct Url { impl Url { pub fn new(uri: Uri) -> Url { - let path = DEFAULT_QUOTER.requote(uri.path().as_bytes()); + let path = UNRESERVED_QUOTER.requote(uri.path().as_bytes()); Url { uri, path } } @@ -63,36 +51,19 @@ impl Url { pub(crate) struct Quoter { safe_table: [u8; 16], - protected_table: [u8; 16], } impl Quoter { - pub fn new(safe: &[u8], protected: &[u8]) -> Quoter { + pub fn new(safe: &[u8]) -> Quoter { let mut q = Quoter { safe_table: [0; 16], - protected_table: [0; 16], }; // prepare safe table - for i in 0..128 { - if ALLOWED.contains(&i) { - set_bit(&mut q.safe_table, i); - } - if QS.contains(&i) { - set_bit(&mut q.safe_table, i); - } - } - for ch in safe { set_bit(&mut q.safe_table, *ch) } - // prepare protected table - for ch in protected { - set_bit(&mut q.safe_table, *ch); - set_bit(&mut q.protected_table, *ch); - } - q } @@ -115,19 +86,17 @@ impl Quoter { if let Some(ch) = restore_ch(pct[1], pct[2]) { if ch < 128 { - if bit_at(&self.protected_table, ch) { - buf.extend_from_slice(&pct); - idx += 1; - continue; - } - if bit_at(&self.safe_table, ch) { buf.push(ch); idx += 1; continue; } + + buf.extend_from_slice(&pct); + } else { + // Not ASCII, decode it + buf.push(ch); } - buf.push(ch); } else { buf.extend_from_slice(&pct[..]); } @@ -172,3 +141,37 @@ fn from_hex(v: u8) -> Option { fn restore_ch(d1: u8, d2: u8) -> Option { from_hex(d1).and_then(|d1| from_hex(d2).and_then(move |d2| Some(d1 << 4 | d2))) } + + +#[cfg(test)] +mod tests { + use std::rc::Rc; + + use super::*; + + #[test] + fn decode_path() { + assert_eq!(UNRESERVED_QUOTER.requote(b"https://localhost:80/foo"), None); + + assert_eq!( + Rc::try_unwrap(UNRESERVED_QUOTER.requote( + b"https://localhost:80/foo%25" + ).unwrap()).unwrap(), + "https://localhost:80/foo%25".to_string() + ); + + assert_eq!( + Rc::try_unwrap(UNRESERVED_QUOTER.requote( + b"http://cache-service/http%3A%2F%2Flocalhost%3A80%2Ffoo" + ).unwrap()).unwrap(), + "http://cache-service/http%3A%2F%2Flocalhost%3A80%2Ffoo".to_string() + ); + + assert_eq!( + Rc::try_unwrap(UNRESERVED_QUOTER.requote( + b"http://cache/http%3A%2F%2Flocal%3A80%2Ffile%2F%252Fvar%252Flog%0A" + ).unwrap()).unwrap(), + "http://cache/http%3A%2F%2Flocal%3A80%2Ffile%2F%252Fvar%252Flog%0A".to_string() + ); + } +} \ No newline at end of file diff --git a/tests/test_handlers.rs b/tests/test_handlers.rs index 3ea709c92..debc1626a 100644 --- a/tests/test_handlers.rs +++ b/tests/test_handlers.rs @@ -672,6 +672,6 @@ fn test_unsafe_path_route() { let bytes = srv.execute(response.body()).unwrap(); assert_eq!( bytes, - Bytes::from_static(b"success: http:%2F%2Fexample.com") + Bytes::from_static(b"success: http%3A%2F%2Fexample.com") ); } From 68c5d6e6d69f14c14c62d03b9e280ebfc320b6e9 Mon Sep 17 00:00:00 2001 From: vemoo Date: Sun, 2 Dec 2018 06:32:55 +0100 Subject: [PATCH 215/219] impl `From>` for `Binary` (#611) impl `From` for `Cow<'static, [u8]>` and `From>` for `Binary` --- src/body.rs | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/src/body.rs b/src/body.rs index a93db1e92..5487dbba4 100644 --- a/src/body.rs +++ b/src/body.rs @@ -1,5 +1,6 @@ use bytes::{Bytes, BytesMut}; use futures::Stream; +use std::borrow::Cow; use std::sync::Arc; use std::{fmt, mem}; @@ -194,12 +195,30 @@ impl From> for Binary { } } +impl From> for Binary { + fn from(b: Cow<'static, [u8]>) -> Binary { + match b { + Cow::Borrowed(s) => Binary::Slice(s), + Cow::Owned(vec) => Binary::Bytes(Bytes::from(vec)), + } + } +} + impl From for Binary { fn from(s: String) -> Binary { Binary::Bytes(Bytes::from(s)) } } +impl From> for Binary { + fn from(s: Cow<'static, str>) -> Binary { + match s { + Cow::Borrowed(s) => Binary::Slice(s.as_ref()), + Cow::Owned(s) => Binary::Bytes(Bytes::from(s)), + } + } +} + impl<'a> From<&'a String> for Binary { fn from(s: &'a String) -> Binary { Binary::Bytes(Bytes::from(AsRef::<[u8]>::as_ref(&s))) @@ -287,6 +306,16 @@ mod tests { assert_eq!(Binary::from("test").as_ref(), b"test"); } + #[test] + fn test_cow_str() { + let cow: Cow<'static, str> = Cow::Borrowed("test"); + assert_eq!(Binary::from(cow.clone()).len(), 4); + assert_eq!(Binary::from(cow.clone()).as_ref(), b"test"); + let cow: Cow<'static, str> = Cow::Owned("test".to_owned()); + assert_eq!(Binary::from(cow.clone()).len(), 4); + assert_eq!(Binary::from(cow.clone()).as_ref(), b"test"); + } + #[test] fn test_static_bytes() { assert_eq!(Binary::from(b"test".as_ref()).len(), 4); @@ -307,6 +336,16 @@ mod tests { assert_eq!(Binary::from(Bytes::from("test")).as_ref(), b"test"); } + #[test] + fn test_cow_bytes() { + let cow: Cow<'static, [u8]> = Cow::Borrowed(b"test"); + assert_eq!(Binary::from(cow.clone()).len(), 4); + assert_eq!(Binary::from(cow.clone()).as_ref(), b"test"); + let cow: Cow<'static, [u8]> = Cow::Owned(Vec::from("test")); + assert_eq!(Binary::from(cow.clone()).len(), 4); + assert_eq!(Binary::from(cow.clone()).as_ref(), b"test"); + } + #[test] fn test_arc_string() { let b = Arc::new("test".to_owned()); From 08c7743bb8431033b180a872f39eb006db0933fd Mon Sep 17 00:00:00 2001 From: Kelly Thomas Kline Date: Thu, 15 Nov 2018 18:59:36 -0800 Subject: [PATCH 216/219] Add set_mailbox_capacity() function --- src/ws/context.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/ws/context.rs b/src/ws/context.rs index 4db83df5c..5e207d43e 100644 --- a/src/ws/context.rs +++ b/src/ws/context.rs @@ -231,6 +231,13 @@ where pub fn handle(&self) -> SpawnHandle { self.inner.curr_handle() } + + /// Set mailbox capacity + /// + /// By default mailbox capacity is 16 messages. + pub fn set_mailbox_capacity(&mut self, cap: usize) { + self.inner.set_mailbox_capacity(cap) + } } impl WsWriter for WebsocketContext From b1635bc0e6ab116c2ccb684c0440935fe6ac5395 Mon Sep 17 00:00:00 2001 From: silwol Date: Tue, 4 Dec 2018 07:58:22 +0100 Subject: [PATCH 217/219] Update some dependencies (#612) * Update rand to 0.6 * Update parking_lot to 0.7 * Update env_logger to 0.6 --- Cargo.toml | 6 +++--- tests/test_client.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e3fbd4e38..37e900515 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ mime = "0.3" mime_guess = "2.0.0-alpha" num_cpus = "1.0" percent-encoding = "1.0" -rand = "0.5" +rand = "0.6" regex = "1.0" serde = "1.0" serde_json = "1.0" @@ -87,7 +87,7 @@ encoding = "0.2" language-tags = "0.2" lazy_static = "1.0" lazycell = "1.0.0" -parking_lot = "0.6" +parking_lot = "0.7" serde_urlencoded = "^0.5.3" url = { version="1.7", features=["query_encoding"] } cookie = { version="0.11", features=["percent-encode"] } @@ -127,7 +127,7 @@ webpki-roots = { version = "0.15", optional = true } tokio-uds = { version="0.2", optional = true } [dev-dependencies] -env_logger = "0.5" +env_logger = "0.6" serde_derive = "1.0" [build-dependencies] diff --git a/tests/test_client.rs b/tests/test_client.rs index 8c5d5819d..9808f3e6f 100644 --- a/tests/test_client.rs +++ b/tests/test_client.rs @@ -179,7 +179,7 @@ fn test_client_gzip_encoding_large() { #[test] fn test_client_gzip_encoding_large_random() { let data = rand::thread_rng() - .gen_ascii_chars() + .sample_iter(&rand::distributions::Alphanumeric) .take(100_000) .collect::(); @@ -247,7 +247,7 @@ fn test_client_brotli_encoding() { #[test] fn test_client_brotli_encoding_large_random() { let data = rand::thread_rng() - .gen_ascii_chars() + .sample_iter(&rand::distributions::Alphanumeric) .take(70_000) .collect::(); @@ -309,7 +309,7 @@ fn test_client_deflate_encoding() { #[test] fn test_client_deflate_encoding_large_random() { let data = rand::thread_rng() - .gen_ascii_chars() + .sample_iter(&rand::distributions::Alphanumeric) .take(70_000) .collect::(); From 0745a1a9f8d43840454c6aae24df5e2c6f781c36 Mon Sep 17 00:00:00 2001 From: Douman Date: Wed, 5 Dec 2018 03:07:59 -0500 Subject: [PATCH 218/219] Remove usage of upcoming keyword async AsyncResult::async is replaced with AsyncResult::future --- CHANGES.md | 2 ++ MIGRATION.md | 2 ++ src/client/connector.rs | 2 +- src/client/request.rs | 2 +- src/handler.rs | 6 +++--- src/middleware/csrf.rs | 2 +- src/route.rs | 2 +- src/scope.rs | 2 +- src/with.rs | 4 ++-- 9 files changed, 14 insertions(+), 10 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 902a84f69..4d8fa128f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,8 @@ * `QueryConfig` and `PathConfig` are made public. +* `AsyncResult::async` is changed to `AsyncResult::future` as `async` is reserved keyword in 2018 edition. + ### Added * By default, `Path` extractor now percent decode all characters. This behaviour can be disabled diff --git a/MIGRATION.md b/MIGRATION.md index 26a314240..6b49e3e6a 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -25,6 +25,8 @@ } ``` +* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future` + ## 0.7.4 diff --git a/src/client/connector.rs b/src/client/connector.rs index 72132bc67..f5affad37 100644 --- a/src/client/connector.rs +++ b/src/client/connector.rs @@ -942,7 +942,7 @@ impl Handler for ClientConnector { } let host = uri.host().unwrap().to_owned(); - let port = uri.port().unwrap_or_else(|| proto.port()); + let port = uri.port_part().map(|port| port.as_u16()).unwrap_or_else(|| proto.port()); let key = Key { host, port, diff --git a/src/client/request.rs b/src/client/request.rs index 76fb1be59..71da8f74d 100644 --- a/src/client/request.rs +++ b/src/client/request.rs @@ -631,7 +631,7 @@ impl ClientRequestBuilder { if !parts.headers.contains_key(header::HOST) { let mut wrt = BytesMut::with_capacity(host.len() + 5).writer(); - let _ = match parts.uri.port() { + let _ = match parts.uri.port_part().map(|port| port.as_u16()) { None | Some(80) | Some(443) => write!(wrt, "{}", host), Some(port) => write!(wrt, "{}:{}", host, port), }; diff --git a/src/handler.rs b/src/handler.rs index 88210fbc0..6ed93f92e 100644 --- a/src/handler.rs +++ b/src/handler.rs @@ -250,7 +250,7 @@ pub(crate) enum AsyncResultItem { impl AsyncResult { /// Create async response #[inline] - pub fn async(fut: Box>) -> AsyncResult { + pub fn future(fut: Box>) -> AsyncResult { AsyncResult(Some(AsyncResultItem::Future(fut))) } @@ -401,7 +401,7 @@ where }, Err(e) => err(e), }); - Ok(AsyncResult::async(Box::new(fut))) + Ok(AsyncResult::future(Box::new(fut))) } } @@ -502,7 +502,7 @@ where Err(e) => Either::A(err(e)), } }); - AsyncResult::async(Box::new(fut)) + AsyncResult::future(Box::new(fut)) } } diff --git a/src/middleware/csrf.rs b/src/middleware/csrf.rs index 02cd150d5..cacfc8d53 100644 --- a/src/middleware/csrf.rs +++ b/src/middleware/csrf.rs @@ -76,7 +76,7 @@ impl ResponseError for CsrfError { } fn uri_origin(uri: &Uri) -> Option { - match (uri.scheme_part(), uri.host(), uri.port()) { + match (uri.scheme_part(), uri.host(), uri.port_part().map(|port| port.as_u16())) { (Some(scheme), Some(host), Some(port)) => { Some(format!("{}://{}:{}", scheme, host, port)) } diff --git a/src/route.rs b/src/route.rs index e4a7a9572..884a367ed 100644 --- a/src/route.rs +++ b/src/route.rs @@ -57,7 +57,7 @@ impl Route { pub(crate) fn compose( &self, req: HttpRequest, mws: Rc>>>, ) -> AsyncResult { - AsyncResult::async(Box::new(Compose::new(req, mws, self.handler.clone()))) + AsyncResult::future(Box::new(Compose::new(req, mws, self.handler.clone()))) } /// Add match predicate to route. diff --git a/src/scope.rs b/src/scope.rs index 1bddc0e01..fb9e7514a 100644 --- a/src/scope.rs +++ b/src/scope.rs @@ -356,7 +356,7 @@ impl RouteHandler for Scope { if self.middlewares.is_empty() { self.router.handle(&req2) } else { - AsyncResult::async(Box::new(Compose::new( + AsyncResult::future(Box::new(Compose::new( req2, Rc::clone(&self.router), Rc::clone(&self.middlewares), diff --git a/src/with.rs b/src/with.rs index c6d54dee8..140e086e1 100644 --- a/src/with.rs +++ b/src/with.rs @@ -86,7 +86,7 @@ where match fut.poll() { Ok(Async::Ready(resp)) => AsyncResult::ok(resp), - Ok(Async::NotReady) => AsyncResult::async(Box::new(fut)), + Ok(Async::NotReady) => AsyncResult::future(Box::new(fut)), Err(e) => AsyncResult::err(e), } } @@ -208,7 +208,7 @@ where match fut.poll() { Ok(Async::Ready(resp)) => AsyncResult::ok(resp), - Ok(Async::NotReady) => AsyncResult::async(Box::new(fut)), + Ok(Async::NotReady) => AsyncResult::future(Box::new(fut)), Err(e) => AsyncResult::err(e), } } From ac9fc662c625f5c6273744b98d804019249f887e Mon Sep 17 00:00:00 2001 From: Douman Date: Wed, 5 Dec 2018 18:27:06 +0300 Subject: [PATCH 219/219] Bump version to 0.7.15 --- CHANGES.md | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4d8fa128f..6092544e9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ # Changes -## [0.7.15] - 2018-xx-xx +## [0.7.15] - 2018-12-05 ## Changed diff --git a/Cargo.toml b/Cargo.toml index 37e900515..7b8dcec35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "0.7.14" +version = "0.7.15" authors = ["Nikolay Kim "] description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." readme = "README.md"