Merge branch 'master' into patch-1

This commit is contained in:
Rob Ede 2020-06-10 17:54:29 +01:00 committed by GitHub
commit 7dc00ba7b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
178 changed files with 3608 additions and 6915 deletions

View File

@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-web
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test --no-default-features --features="flate2-rust"

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Gitter channel (actix-web)
url: https://gitter.im/actix/actix-web
about: Please ask and answer questions about the actix-web here.
- name: Gitter channel (actix)
url: https://gitter.im/actix/actix
about: Please ask and answer questions about the actix here.

View File

@ -16,32 +16,8 @@ jobs:
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache cargo registry
uses: actions/cache@v1
with:
path: ~/.cargo/registry
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-registry-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v1
with:
path: ~/.cargo/git
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v1
with:
path: target
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Check benchmark
uses: actions-rs/cargo@v1
with:
command: bench
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache
args: --bench=server -- --sample-size=15

64
.github/workflows/linux.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: CI (Linux)
on: [push, pull_request]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.40.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

View File

@ -24,26 +24,6 @@ jobs:
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache cargo registry
uses: actions/cache@v1
with:
path: ~/.cargo/registry
key: ${{ matrix.version }}-x86_64-apple-darwin-cargo-registry-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v1
with:
path: ~/.cargo/git
key: ${{ matrix.version }}-x86_64-apple-darwin-cargo-index-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v1
with:
path: target
key: ${{ matrix.version }}-x86_64-apple-darwin-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: check build
uses: actions-rs/cargo@v1
with:
@ -57,8 +37,3 @@ jobs:
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Upload documentation
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps:
- uses: actions/checkout@master
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-deps --workspace --all-features
- name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
- name: Upload documentation
run: |
git clone https://github.com/davisp/ghp-import.git
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://${{ secrets.GITHUB_TOKEN }}@github.com/"${{ github.repository }}.git" target/doc

View File

@ -56,3 +56,4 @@ jobs:
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

View File

@ -1,61 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
# cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly-2019-11-20
allow_failures:
- rust: nightly-2019-11-20
env:
global:
# - RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-20" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install --version 0.6.11 cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- cargo update
- cargo check --all --no-default-features
- |
if [[ "$TRAVIS_RUST_VERSION" == "stable" || "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo test --all-features --all -- --nocapture
cd actix-http; cargo test --no-default-features --features="rustls" -- --nocapture; cd ..
cd awc; cargo test --no-default-features --features="rustls" -- --nocapture; cd ..
fi
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --no-deps --all-features &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation"
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-20" ]]; then
taskset -c 0 cargo tarpaulin --out Xml --all --all-features
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi

View File

@ -1,15 +1,55 @@
# Changes
## [3.0.0-alpha.3] - 2020-05-21
## [2.0.NEXT] - 2020-01-xx
### Added
* Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
* Fix audit issue logging by default peer address [#1485]
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
* Skip empty chunks when returning response from a `Stream` #1308
* Update the `time` dependency to 0.2.5
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25

View File

@ -1,6 +1,6 @@
[package]
name = "actix-web"
version = "2.0.0"
version = "3.0.0-alpha.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
@ -30,11 +30,7 @@ members = [
".",
"awc",
"actix-http",
"actix-cors",
"actix-files",
"actix-framed",
"actix-session",
"actix-identity",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
@ -42,7 +38,7 @@ members = [
]
[features]
default = ["compress", "failure"]
default = ["compress"]
# content-encoding support
compress = ["actix-http/compress", "awc/compress"]
@ -50,14 +46,24 @@ compress = ["actix-http/compress", "awc/compress"]
# sessions feature, session require "ring" crate and c compiler
secure-cookies = ["actix-http/secure-cookies"]
failure = ["actix-http/failure"]
# openssl
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
# rustls
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
[[example]]
name = "basic"
required-features = ["compress"]
[[example]]
name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
[dependencies]
actix-codec = "0.2.0"
actix-service = "1.0.2"
@ -68,34 +74,37 @@ actix-server = "1.0.0"
actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.1"
actix-tls = "1.0.0"
actix-tls = "2.0.0-alpha.1"
actix-web-codegen = "0.2.0"
actix-http = "1.0.1"
awc = { version = "1.0.1", default-features = false }
actix-web-codegen = "0.2.2"
actix-http = "2.0.0-alpha.4"
awc = { version = "2.0.0-alpha.2", default-features = false }
bytes = "0.5.3"
derive_more = "0.99.2"
encoding_rs = "0.8"
futures = "0.3.1"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
log = "0.4"
mime = "0.3"
net2 = "0.2.33"
pin-project = "0.4.6"
socket2 = "0.3"
pin-project = "0.4.17"
regex = "1.3"
serde = { version = "1.0", features=["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.6.1"
time = { version = "0.2.5", default-features = false, features = ["std"] }
time = { version = "0.2.7", default-features = false, features = ["std"] }
url = "2.1"
open-ssl = { version="0.10", package = "openssl", optional = true }
rust-tls = { version = "0.16.0", package = "rustls", optional = true }
rust-tls = { version = "0.17.0", package = "rustls", optional = true }
tinyvec = { version = "0.3", features = ["alloc"] }
[dev-dependencies]
actix = "0.9.0"
actix = "0.10.0-alpha.1"
rand = "0.7"
env_logger = "0.6"
env_logger = "0.7"
serde_derive = "1.0"
brotli2 = "0.3.2"
flate2 = "1.0.13"
@ -111,9 +120,6 @@ actix-web = { path = "." }
actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" }
actix-web-codegen = { path = "actix-web-codegen" }
actix-cors = { path = "actix-cors" }
actix-identity = { path = "actix-identity" }
actix-session = { path = "actix-session" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" }
awc = { path = "awc" }

View File

@ -4,6 +4,14 @@
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
@ -358,7 +366,7 @@
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile has been move to separate create.
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
@ -368,7 +376,7 @@
use `use actix_files::NamedFile`
* Multipart has been move to separate create.
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`

View File

@ -9,7 +9,7 @@
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Documentation](https://docs.rs/actix-web/badge.svg)](https://docs.rs/actix-web)
[![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web)
[![Version](https://img.shields.io/badge/rustc-1.39+-lightgray.svg)](https://blog.rust-lang.org/2019/11/07/Rust-1.39.0.html)
[![Version](https://img.shields.io/badge/rustc-1.40+-lightgray.svg)](https://blog.rust-lang.org/2019/12/19/Rust-1.40.0.html)
![License](https://img.shields.io/crates/l/actix-web.svg)
</p>
@ -38,6 +38,13 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix)
* Supports Rust 1.40+
## Docs
* [API documentation (master)](https://actix.rs/actix-web/actix_web)
* [API documentation (docs.rs)](https://docs.rs/actix-web)
* [User guide](https://actix.rs)
## Example
@ -74,7 +81,7 @@ async fn main() -> std::io::Result<()> {
* [Stateful](https://github.com/actix/examples/tree/master/state/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
* [Tera](https://github.com/actix/examples/tree/master/template_tera/)
* [Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)

View File

@ -1,15 +0,0 @@
# Changes
## [0.2.0] - 2019-12-20
* Release
## [0.2.0-alpha.3] - 2019-12-07
* Migrate to actix-web 2.0.0
* Bump `derive_more` crate version to 0.99.0
## [0.1.0] - 2019-06-15
* Move cors middleware to separate crate

View File

@ -1,26 +0,0 @@
[package]
name = "actix-cors"
version = "0.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Cross-origin resource sharing (CORS) for Actix applications."
readme = "README.md"
keywords = ["web", "framework"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-cors/"
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_cors"
path = "src/lib.rs"
[dependencies]
actix-web = "2.0.0-rc"
actix-service = "1.0.1"
derive_more = "0.99.2"
futures = "0.3.1"
[dev-dependencies]
actix-rt = "1.0.0"

View File

@ -1 +0,0 @@
../LICENSE-APACHE

View File

@ -1 +0,0 @@
../LICENSE-MIT

View File

@ -1,5 +1,7 @@
# Cors Middleware for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-cors)](https://crates.io/crates/actix-cors) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**This crate moved to https://github.com/actix/actix-extras.**
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,14 @@
# Changes
## [0.3.0-alpha.1] - 2020-05-23
* Update `actix-web` and `actix-http` dependencies to alpha
* Fix some typos in the docs
* Bump minimum supported Rust version to 1.40
* Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384
## [0.2.1] - 2019-12-22
* Use the same format for file URLs regardless of platforms

View File

@ -1,6 +1,6 @@
[package]
name = "actix-files"
version = "0.2.1"
version = "0.3.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Static files support for actix web."
readme = "README.md"
@ -18,12 +18,13 @@ name = "actix_files"
path = "src/lib.rs"
[dependencies]
actix-web = { version = "2.0.0-rc", default-features = false }
actix-http = "1.0.1"
actix-web = { version = "3.0.0-alpha.3", default-features = false }
actix-http = "2.0.0-alpha.4"
actix-service = "1.0.1"
bitflags = "1"
bytes = "0.5.3"
futures = "0.3.1"
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
derive_more = "0.99.2"
log = "0.4"
mime = "0.3"
@ -33,4 +34,4 @@ v_htmlescape = "0.4"
[dev-dependencies]
actix-rt = "1.0.0"
actix-web = { version = "2.0.0-rc", features=["openssl"] }
actix-web = { version = "3.0.0-alpha.3", features = ["openssl"] }

View File

@ -6,4 +6,4 @@
* [API Documentation](https://docs.rs/actix-files/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-files](https://crates.io/crates/actix-files)
* Minimum supported Rust version: 1.33 or later
* Minimum supported Rust version: 1.40 or later

View File

@ -5,6 +5,7 @@ use derive_more::Display;
#[derive(Display, Debug, PartialEq)]
pub enum FilesError {
/// Path is not a directory
#[allow(dead_code)]
#[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory,

View File

@ -24,8 +24,8 @@ use actix_web::http::header::{self, DispositionType};
use actix_web::http::Method;
use actix_web::{web, FromRequest, HttpRequest, HttpResponse};
use bytes::Bytes;
use futures::future::{ok, ready, Either, FutureExt, LocalBoxFuture, Ready};
use futures::Stream;
use futures_core::Stream;
use futures_util::future::{ok, ready, Either, FutureExt, LocalBoxFuture, Ready};
use mime;
use mime_guess::from_ext;
use percent_encoding::{utf8_percent_encode, CONTROLS};
@ -521,7 +521,7 @@ impl Service for FilesService {
Err(e) => return Either::Left(ok(req.error_response(e))),
};
// full filepath
// full file path
let path = match self.directory.join(&real_path.0).canonicalize() {
Ok(path) => path,
Err(e) => return self.handle_err(e, req),
@ -952,135 +952,92 @@ mod tests {
#[actix_rt::test]
async fn test_named_file_content_range_headers() {
let mut srv = test::init_service(
App::new().service(Files::new("/test", ".").index_file("tests/test.binary")),
)
.await;
let srv = test::start(|| {
App::new().service(Files::new("/", "."))
});
// Valid range header
let request = TestRequest::get()
.uri("/t%65st/tests/test.binary")
let response = srv
.get("/tests/test.binary")
.header(header::RANGE, "bytes=10-20")
.to_request();
let response = test::call_service(&mut srv, request).await;
let contentrange = response
.headers()
.get(header::CONTENT_RANGE)
.unwrap()
.to_str()
.send()
.await
.unwrap();
assert_eq!(contentrange, "bytes 10-20/100");
let content_range = response.headers().get(header::CONTENT_RANGE).unwrap();
assert_eq!(content_range.to_str().unwrap(), "bytes 10-20/100");
// Invalid range header
let request = TestRequest::get()
.uri("/t%65st/tests/test.binary")
let response = srv
.get("/tests/test.binary")
.header(header::RANGE, "bytes=10-5")
.to_request();
let response = test::call_service(&mut srv, request).await;
let contentrange = response
.headers()
.get(header::CONTENT_RANGE)
.unwrap()
.to_str()
.send()
.await
.unwrap();
assert_eq!(contentrange, "bytes */100");
let content_range = response.headers().get(header::CONTENT_RANGE).unwrap();
assert_eq!(content_range.to_str().unwrap(), "bytes */100");
}
#[actix_rt::test]
async fn test_named_file_content_length_headers() {
// use actix_web::body::{MessageBody, ResponseBody};
let mut srv = test::init_service(
App::new().service(Files::new("test", ".").index_file("tests/test.binary")),
)
.await;
let srv = test::start(|| {
App::new().service(Files::new("/", "."))
});
// Valid range header
let request = TestRequest::get()
.uri("/t%65st/tests/test.binary")
let response = srv
.get("/tests/test.binary")
.header(header::RANGE, "bytes=10-20")
.to_request();
let _response = test::call_service(&mut srv, request).await;
.send()
.await
.unwrap();
let content_length = response.headers().get(header::CONTENT_LENGTH).unwrap();
assert_eq!(content_length.to_str().unwrap(), "11");
// let contentlength = response
// .headers()
// .get(header::CONTENT_LENGTH)
// .unwrap()
// .to_str()
// .unwrap();
// assert_eq!(contentlength, "11");
// Invalid range header
let request = TestRequest::get()
.uri("/t%65st/tests/test.binary")
.header(header::RANGE, "bytes=10-8")
.to_request();
let response = test::call_service(&mut srv, request).await;
assert_eq!(response.status(), StatusCode::RANGE_NOT_SATISFIABLE);
// Valid range header, starting from 0
let response = srv
.get("/tests/test.binary")
.header(header::RANGE, "bytes=0-20")
.send()
.await
.unwrap();
let content_length = response.headers().get(header::CONTENT_LENGTH).unwrap();
assert_eq!(content_length.to_str().unwrap(), "21");
// Without range header
let request = TestRequest::get()
.uri("/t%65st/tests/test.binary")
// .no_default_headers()
.to_request();
let _response = test::call_service(&mut srv, request).await;
let mut response = srv.get("/tests/test.binary").send().await.unwrap();
let content_length = response.headers().get(header::CONTENT_LENGTH).unwrap();
assert_eq!(content_length.to_str().unwrap(), "100");
// let contentlength = response
// .headers()
// .get(header::CONTENT_LENGTH)
// .unwrap()
// .to_str()
// .unwrap();
// assert_eq!(contentlength, "100");
// Should be no transfer-encoding
let transfer_encoding = response.headers().get(header::TRANSFER_ENCODING);
assert!(transfer_encoding.is_none());
// chunked
let request = TestRequest::get()
.uri("/t%65st/tests/test.binary")
.to_request();
let response = test::call_service(&mut srv, request).await;
// with enabled compression
// {
// let te = response
// .headers()
// .get(header::TRANSFER_ENCODING)
// .unwrap()
// .to_str()
// .unwrap();
// assert_eq!(te, "chunked");
// }
let bytes = test::read_body(response).await;
// Check file contents
let bytes = response.body().await.unwrap();
let data = Bytes::from(fs::read("tests/test.binary").unwrap());
assert_eq!(bytes, data);
}
#[actix_rt::test]
async fn test_head_content_length_headers() {
let mut srv = test::init_service(
App::new().service(Files::new("test", ".").index_file("tests/test.binary")),
)
.await;
let srv = test::start(|| {
App::new().service(Files::new("/", "."))
});
// Valid range header
let request = TestRequest::default()
.method(Method::HEAD)
.uri("/t%65st/tests/test.binary")
.to_request();
let _response = test::call_service(&mut srv, request).await;
let response = srv
.head("/tests/test.binary")
.send()
.await
.unwrap();
// TODO: fix check
// let contentlength = response
// .headers()
// .get(header::CONTENT_LENGTH)
// .unwrap()
// .to_str()
// .unwrap();
// assert_eq!(contentlength, "100");
let content_length = response
.headers()
.get(header::CONTENT_LENGTH)
.unwrap()
.to_str()
.unwrap();
assert_eq!(content_length, "100");
}
#[actix_rt::test]

View File

@ -18,7 +18,7 @@ use actix_web::http::header::{
};
use actix_web::http::{ContentEncoding, StatusCode};
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
use futures::future::{ready, Ready};
use futures_util::future::{ready, Ready};
use crate::range::HttpRange;
use crate::ChunkedReadFile;
@ -388,11 +388,12 @@ impl NamedFile {
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() {
Ok(resp.status(StatusCode::PARTIAL_CONTENT).streaming(reader))
} else {
Ok(resp.body(SizedStream::new(length, reader)))
resp.status(StatusCode::PARTIAL_CONTENT);
}
Ok(resp.body(SizedStream::new(length, reader)))
}
}

View File

@ -1,37 +0,0 @@
[package]
name = "actix-framed"
version = "0.3.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix framed app server"
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-framed/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
edition = "2018"
[lib]
name = "actix_framed"
path = "src/lib.rs"
[dependencies]
actix-codec = "0.2.0"
actix-service = "1.0.1"
actix-router = "0.2.1"
actix-rt = "1.0.0"
actix-http = "1.0.1"
bytes = "0.5.3"
futures = "0.3.1"
pin-project = "0.4.6"
log = "0.4"
[dev-dependencies]
actix-server = "1.0.0"
actix-connect = { version = "1.0.0", features=["openssl"] }
actix-http-test = { version = "1.0.0", features=["openssl"] }
actix-utils = "1.0.3"

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,25 +0,0 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -1,8 +1,3 @@
# Framed app for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-framed)](https://crates.io/crates/actix-framed) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Framed app for actix web
## Documentation & community resources
* [API Documentation](https://docs.rs/actix-framed/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-framed](https://crates.io/crates/actix-framed)
* Minimum supported Rust version: 1.33 or later
**This crate has been deprecated and removed.**

View File

@ -1,24 +0,0 @@
# Changes
## [0.3.0] - 2019-12-25
* Migrate to actix-http 1.0
## [0.2.1] - 2019-07-20
* Remove unneeded actix-utils dependency
## [0.2.0] - 2019-05-12
* Update dependencies
## [0.1.0] - 2019-04-16
* Update tests
## [0.1.0-alpha.1] - 2019-04-12
* Initial release

View File

@ -1,221 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::h1::{Codec, SendResponse};
use actix_http::{Error, Request, Response};
use actix_router::{Path, Router, Url};
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use futures::future::{ok, FutureExt, LocalBoxFuture};
use crate::helpers::{BoxedHttpNewService, BoxedHttpService, HttpNewService};
use crate::request::FramedRequest;
use crate::state::State;
type BoxedResponse = LocalBoxFuture<'static, Result<(), Error>>;
pub trait HttpServiceFactory {
type Factory: ServiceFactory;
fn path(&self) -> &str;
fn create(self) -> Self::Factory;
}
/// Application builder
pub struct FramedApp<T, S = ()> {
state: State<S>,
services: Vec<(String, BoxedHttpNewService<FramedRequest<T, S>>)>,
}
impl<T: 'static> FramedApp<T, ()> {
pub fn new() -> Self {
FramedApp {
state: State::new(()),
services: Vec::new(),
}
}
}
impl<T: 'static, S: 'static> FramedApp<T, S> {
pub fn with(state: S) -> FramedApp<T, S> {
FramedApp {
services: Vec::new(),
state: State::new(state),
}
}
pub fn service<U>(mut self, factory: U) -> Self
where
U: HttpServiceFactory,
U::Factory: ServiceFactory<
Config = (),
Request = FramedRequest<T, S>,
Response = (),
Error = Error,
InitError = (),
> + 'static,
<U::Factory as ServiceFactory>::Future: 'static,
<U::Factory as ServiceFactory>::Service: Service<
Request = FramedRequest<T, S>,
Response = (),
Error = Error,
Future = LocalBoxFuture<'static, Result<(), Error>>,
>,
{
let path = factory.path().to_string();
self.services
.push((path, Box::new(HttpNewService::new(factory.create()))));
self
}
}
impl<T, S> IntoServiceFactory<FramedAppFactory<T, S>> for FramedApp<T, S>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: 'static,
{
fn into_factory(self) -> FramedAppFactory<T, S> {
FramedAppFactory {
state: self.state,
services: Rc::new(self.services),
}
}
}
#[derive(Clone)]
pub struct FramedAppFactory<T, S> {
state: State<S>,
services: Rc<Vec<(String, BoxedHttpNewService<FramedRequest<T, S>>)>>,
}
impl<T, S> ServiceFactory for FramedAppFactory<T, S>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: 'static,
{
type Config = ();
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type InitError = ();
type Service = FramedAppService<T, S>;
type Future = CreateService<T, S>;
fn new_service(&self, _: ()) -> Self::Future {
CreateService {
fut: self
.services
.iter()
.map(|(path, service)| {
CreateServiceItem::Future(
Some(path.clone()),
service.new_service(()),
)
})
.collect(),
state: self.state.clone(),
}
}
}
#[doc(hidden)]
pub struct CreateService<T, S> {
fut: Vec<CreateServiceItem<T, S>>,
state: State<S>,
}
enum CreateServiceItem<T, S> {
Future(
Option<String>,
LocalBoxFuture<'static, Result<BoxedHttpService<FramedRequest<T, S>>, ()>>,
),
Service(String, BoxedHttpService<FramedRequest<T, S>>),
}
impl<S: 'static, T: 'static> Future for CreateService<T, S>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<FramedAppService<T, S>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let mut done = true;
// poll http services
for item in &mut self.fut {
let res = match item {
CreateServiceItem::Future(ref mut path, ref mut fut) => {
match Pin::new(fut).poll(cx) {
Poll::Ready(Ok(service)) => {
Some((path.take().unwrap(), service))
}
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Pending => {
done = false;
None
}
}
}
CreateServiceItem::Service(_, _) => continue,
};
if let Some((path, service)) = res {
*item = CreateServiceItem::Service(path, service);
}
}
if done {
let router = self
.fut
.drain(..)
.fold(Router::build(), |mut router, item| {
match item {
CreateServiceItem::Service(path, service) => {
router.path(&path, service);
}
CreateServiceItem::Future(_, _) => unreachable!(),
}
router
});
Poll::Ready(Ok(FramedAppService {
router: router.finish(),
state: self.state.clone(),
}))
} else {
Poll::Pending
}
}
}
pub struct FramedAppService<T, S> {
state: State<S>,
router: Router<BoxedHttpService<FramedRequest<T, S>>>,
}
impl<S: 'static, T: 'static> Service for FramedAppService<T, S>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Future = BoxedResponse;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, (req, framed): (Request, Framed<T, Codec>)) -> Self::Future {
let mut path = Path::new(Url::new(req.uri().clone()));
if let Some((srv, _info)) = self.router.recognize_mut(&mut path) {
return srv.call(FramedRequest::new(req, framed, path, self.state.clone()));
}
SendResponse::new(framed, Response::NotFound().finish())
.then(|_| ok(()))
.boxed_local()
}
}

View File

@ -1,98 +0,0 @@
use std::task::{Context, Poll};
use actix_http::Error;
use actix_service::{Service, ServiceFactory};
use futures::future::{FutureExt, LocalBoxFuture};
pub(crate) type BoxedHttpService<Req> = Box<
dyn Service<
Request = Req,
Response = (),
Error = Error,
Future = LocalBoxFuture<'static, Result<(), Error>>,
>,
>;
pub(crate) type BoxedHttpNewService<Req> = Box<
dyn ServiceFactory<
Config = (),
Request = Req,
Response = (),
Error = Error,
InitError = (),
Service = BoxedHttpService<Req>,
Future = LocalBoxFuture<'static, Result<BoxedHttpService<Req>, ()>>,
>,
>;
pub(crate) struct HttpNewService<T: ServiceFactory>(T);
impl<T> HttpNewService<T>
where
T: ServiceFactory<Response = (), Error = Error>,
T::Response: 'static,
T::Future: 'static,
T::Service: Service<Future = LocalBoxFuture<'static, Result<(), Error>>> + 'static,
<T::Service as Service>::Future: 'static,
{
pub fn new(service: T) -> Self {
HttpNewService(service)
}
}
impl<T> ServiceFactory for HttpNewService<T>
where
T: ServiceFactory<Config = (), Response = (), Error = Error>,
T::Request: 'static,
T::Future: 'static,
T::Service: Service<Future = LocalBoxFuture<'static, Result<(), Error>>> + 'static,
<T::Service as Service>::Future: 'static,
{
type Config = ();
type Request = T::Request;
type Response = ();
type Error = Error;
type InitError = ();
type Service = BoxedHttpService<T::Request>;
type Future = LocalBoxFuture<'static, Result<Self::Service, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.0.new_service(());
async move {
fut.await.map_err(|_| ()).map(|service| {
let service: BoxedHttpService<_> =
Box::new(HttpServiceWrapper { service });
service
})
}
.boxed_local()
}
}
struct HttpServiceWrapper<T: Service> {
service: T,
}
impl<T> Service for HttpServiceWrapper<T>
where
T: Service<
Response = (),
Future = LocalBoxFuture<'static, Result<(), Error>>,
Error = Error,
>,
T::Request: 'static,
{
type Request = T::Request;
type Response = ();
type Error = Error;
type Future = LocalBoxFuture<'static, Result<(), Error>>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, req: Self::Request) -> Self::Future {
self.service.call(req)
}
}

View File

@ -1,17 +0,0 @@
#![allow(clippy::type_complexity, clippy::new_without_default, dead_code)]
mod app;
mod helpers;
mod request;
mod route;
mod service;
mod state;
pub mod test;
// re-export for convinience
pub use actix_http::{http, Error, HttpMessage, Response, ResponseError};
pub use self::app::{FramedApp, FramedAppService};
pub use self::request::FramedRequest;
pub use self::route::FramedRoute;
pub use self::service::{SendError, VerifyWebSockets};
pub use self::state::State;

View File

@ -1,172 +0,0 @@
use std::cell::{Ref, RefMut};
use actix_codec::Framed;
use actix_http::http::{HeaderMap, Method, Uri, Version};
use actix_http::{h1::Codec, Extensions, Request, RequestHead};
use actix_router::{Path, Url};
use crate::state::State;
pub struct FramedRequest<Io, S = ()> {
req: Request,
framed: Framed<Io, Codec>,
state: State<S>,
pub(crate) path: Path<Url>,
}
impl<Io, S> FramedRequest<Io, S> {
pub fn new(
req: Request,
framed: Framed<Io, Codec>,
path: Path<Url>,
state: State<S>,
) -> Self {
Self {
req,
framed,
state,
path,
}
}
}
impl<Io, S> FramedRequest<Io, S> {
/// Split request into a parts
pub fn into_parts(self) -> (Request, Framed<Io, Codec>, State<S>) {
(self.req, self.framed, self.state)
}
/// This method returns reference to the request head
#[inline]
pub fn head(&self) -> &RequestHead {
self.req.head()
}
/// This method returns muttable reference to the request head.
/// panics if multiple references of http request exists.
#[inline]
pub fn head_mut(&mut self) -> &mut RequestHead {
self.req.head_mut()
}
/// Shared application state
#[inline]
pub fn state(&self) -> &S {
self.state.get_ref()
}
/// Request's uri.
#[inline]
pub fn uri(&self) -> &Uri {
&self.head().uri
}
/// Read the Request method.
#[inline]
pub fn method(&self) -> &Method {
&self.head().method
}
/// Read the Request Version.
#[inline]
pub fn version(&self) -> Version {
self.head().version
}
#[inline]
/// Returns request's headers.
pub fn headers(&self) -> &HeaderMap {
&self.head().headers
}
/// The target path of this Request.
#[inline]
pub fn path(&self) -> &str {
self.head().uri.path()
}
/// The query string in the URL.
///
/// E.g., id=10
#[inline]
pub fn query_string(&self) -> &str {
if let Some(query) = self.uri().query().as_ref() {
query
} else {
""
}
}
/// Get a reference to the Path parameters.
///
/// Params is a container for url parameters.
/// A variable segment is specified in the form `{identifier}`,
/// where the identifier can be used later in a request handler to
/// access the matched value for that segment.
#[inline]
pub fn match_info(&self) -> &Path<Url> {
&self.path
}
/// Request extensions
#[inline]
pub fn extensions(&self) -> Ref<Extensions> {
self.head().extensions()
}
/// Mutable reference to a the request's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<Extensions> {
self.head().extensions_mut()
}
}
#[cfg(test)]
mod tests {
use std::convert::TryFrom;
use actix_http::http::{HeaderName, HeaderValue};
use actix_http::test::{TestBuffer, TestRequest};
use super::*;
#[test]
fn test_reqest() {
let buf = TestBuffer::empty();
let framed = Framed::new(buf, Codec::default());
let req = TestRequest::with_uri("/index.html?q=1")
.header("content-type", "test")
.finish();
let path = Path::new(Url::new(req.uri().clone()));
let mut freq = FramedRequest::new(req, framed, path, State::new(10u8));
assert_eq!(*freq.state(), 10);
assert_eq!(freq.version(), Version::HTTP_11);
assert_eq!(freq.method(), Method::GET);
assert_eq!(freq.path(), "/index.html");
assert_eq!(freq.query_string(), "q=1");
assert_eq!(
freq.headers()
.get("content-type")
.unwrap()
.to_str()
.unwrap(),
"test"
);
freq.head_mut().headers.insert(
HeaderName::try_from("x-hdr").unwrap(),
HeaderValue::from_static("test"),
);
assert_eq!(
freq.headers().get("x-hdr").unwrap().to_str().unwrap(),
"test"
);
freq.extensions_mut().insert(100usize);
assert_eq!(*freq.extensions().get::<usize>().unwrap(), 100usize);
let (_, _, state) = freq.into_parts();
assert_eq!(*state, 10);
}
}

View File

@ -1,159 +0,0 @@
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_http::{http::Method, Error};
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, FutureExt, LocalBoxFuture, Ready};
use log::error;
use crate::app::HttpServiceFactory;
use crate::request::FramedRequest;
/// Resource route definition
///
/// Route uses builder-like pattern for configuration.
/// If handler is not explicitly set, default *404 Not Found* handler is used.
pub struct FramedRoute<Io, S, F = (), R = (), E = ()> {
handler: F,
pattern: String,
methods: Vec<Method>,
state: PhantomData<(Io, S, R, E)>,
}
impl<Io, S> FramedRoute<Io, S> {
pub fn new(pattern: &str) -> Self {
FramedRoute {
handler: (),
pattern: pattern.to_string(),
methods: Vec::new(),
state: PhantomData,
}
}
pub fn get(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::GET)
}
pub fn post(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::POST)
}
pub fn put(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::PUT)
}
pub fn delete(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::DELETE)
}
pub fn method(mut self, method: Method) -> Self {
self.methods.push(method);
self
}
pub fn to<F, R, E>(self, handler: F) -> FramedRoute<Io, S, F, R, E>
where
F: FnMut(FramedRequest<Io, S>) -> R,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Debug,
{
FramedRoute {
handler,
pattern: self.pattern,
methods: self.methods,
state: PhantomData,
}
}
}
impl<Io, S, F, R, E> HttpServiceFactory for FramedRoute<Io, S, F, R, E>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Display,
{
type Factory = FramedRouteFactory<Io, S, F, R, E>;
fn path(&self) -> &str {
&self.pattern
}
fn create(self) -> Self::Factory {
FramedRouteFactory {
handler: self.handler,
methods: self.methods,
_t: PhantomData,
}
}
}
pub struct FramedRouteFactory<Io, S, F, R, E> {
handler: F,
methods: Vec<Method>,
_t: PhantomData<(Io, S, R, E)>,
}
impl<Io, S, F, R, E> ServiceFactory for FramedRouteFactory<Io, S, F, R, E>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Display,
{
type Config = ();
type Request = FramedRequest<Io, S>;
type Response = ();
type Error = Error;
type InitError = ();
type Service = FramedRouteService<Io, S, F, R, E>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(FramedRouteService {
handler: self.handler.clone(),
methods: self.methods.clone(),
_t: PhantomData,
})
}
}
pub struct FramedRouteService<Io, S, F, R, E> {
handler: F,
methods: Vec<Method>,
_t: PhantomData<(Io, S, R, E)>,
}
impl<Io, S, F, R, E> Service for FramedRouteService<Io, S, F, R, E>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Display,
{
type Request = FramedRequest<Io, S>;
type Response = ();
type Error = Error;
type Future = LocalBoxFuture<'static, Result<(), Error>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: FramedRequest<Io, S>) -> Self::Future {
let fut = (self.handler)(req);
async move {
let res = fut.await;
if let Err(e) = res {
error!("Error in request handler: {}", e);
}
Ok(())
}
.boxed_local()
}
}

View File

@ -1,156 +0,0 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::body::BodySize;
use actix_http::error::ResponseError;
use actix_http::h1::{Codec, Message};
use actix_http::ws::{verify_handshake, HandshakeError};
use actix_http::{Request, Response};
use actix_service::{Service, ServiceFactory};
use futures::future::{err, ok, Either, Ready};
use futures::Future;
/// Service that verifies incoming request if it is valid websocket
/// upgrade request. In case of error returns `HandshakeError`
pub struct VerifyWebSockets<T, C> {
_t: PhantomData<(T, C)>,
}
impl<T, C> Default for VerifyWebSockets<T, C> {
fn default() -> Self {
VerifyWebSockets { _t: PhantomData }
}
}
impl<T, C> ServiceFactory for VerifyWebSockets<T, C> {
type Config = C;
type Request = (Request, Framed<T, Codec>);
type Response = (Request, Framed<T, Codec>);
type Error = (HandshakeError, Framed<T, Codec>);
type InitError = ();
type Service = VerifyWebSockets<T, C>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: C) -> Self::Future {
ok(VerifyWebSockets { _t: PhantomData })
}
}
impl<T, C> Service for VerifyWebSockets<T, C> {
type Request = (Request, Framed<T, Codec>);
type Response = (Request, Framed<T, Codec>);
type Error = (HandshakeError, Framed<T, Codec>);
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, (req, framed): (Request, Framed<T, Codec>)) -> Self::Future {
match verify_handshake(req.head()) {
Err(e) => err((e, framed)),
Ok(_) => ok((req, framed)),
}
}
}
/// Send http/1 error response
pub struct SendError<T, R, E, C>(PhantomData<(T, R, E, C)>);
impl<T, R, E, C> Default for SendError<T, R, E, C>
where
T: AsyncRead + AsyncWrite,
E: ResponseError,
{
fn default() -> Self {
SendError(PhantomData)
}
}
impl<T, R, E, C> ServiceFactory for SendError<T, R, E, C>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
R: 'static,
E: ResponseError + 'static,
{
type Config = C;
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type InitError = ();
type Service = SendError<T, R, E, C>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: C) -> Self::Future {
ok(SendError(PhantomData))
}
}
impl<T, R, E, C> Service for SendError<T, R, E, C>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
R: 'static,
E: ResponseError + 'static,
{
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type Future = Either<Ready<Result<R, (E, Framed<T, Codec>)>>, SendErrorFut<T, R, E>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Result<R, (E, Framed<T, Codec>)>) -> Self::Future {
match req {
Ok(r) => Either::Left(ok(r)),
Err((e, framed)) => {
let res = e.error_response().drop_body();
Either::Right(SendErrorFut {
framed: Some(framed),
res: Some((res, BodySize::Empty).into()),
err: Some(e),
_t: PhantomData,
})
}
}
}
}
#[pin_project::pin_project]
pub struct SendErrorFut<T, R, E> {
res: Option<Message<(Response<()>, BodySize)>>,
framed: Option<Framed<T, Codec>>,
err: Option<E>,
_t: PhantomData<R>,
}
impl<T, R, E> Future for SendErrorFut<T, R, E>
where
E: ResponseError,
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<R, (E, Framed<T, Codec>)>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
if let Some(res) = self.res.take() {
if self.framed.as_mut().unwrap().write(res).is_err() {
return Poll::Ready(Err((
self.err.take().unwrap(),
self.framed.take().unwrap(),
)));
}
}
match self.framed.as_mut().unwrap().flush(cx) {
Poll::Ready(Ok(_)) => {
Poll::Ready(Err((self.err.take().unwrap(), self.framed.take().unwrap())))
}
Poll::Ready(Err(_)) => {
Poll::Ready(Err((self.err.take().unwrap(), self.framed.take().unwrap())))
}
Poll::Pending => Poll::Pending,
}
}
}

View File

@ -1,29 +0,0 @@
use std::ops::Deref;
use std::sync::Arc;
/// Application state
pub struct State<S>(Arc<S>);
impl<S> State<S> {
pub fn new(state: S) -> State<S> {
State(Arc::new(state))
}
pub fn get_ref(&self) -> &S {
self.0.as_ref()
}
}
impl<S> Deref for State<S> {
type Target = S;
fn deref(&self) -> &S {
self.0.as_ref()
}
}
impl<S> Clone for State<S> {
fn clone(&self) -> State<S> {
State(self.0.clone())
}
}

View File

@ -1,155 +0,0 @@
//! Various helpers for Actix applications to use during testing.
use std::convert::TryFrom;
use std::future::Future;
use actix_codec::Framed;
use actix_http::h1::Codec;
use actix_http::http::header::{Header, HeaderName, IntoHeaderValue};
use actix_http::http::{Error as HttpError, Method, Uri, Version};
use actix_http::test::{TestBuffer, TestRequest as HttpTestRequest};
use actix_router::{Path, Url};
use crate::{FramedRequest, State};
/// Test `Request` builder.
pub struct TestRequest<S = ()> {
req: HttpTestRequest,
path: Path<Url>,
state: State<S>,
}
impl Default for TestRequest<()> {
fn default() -> TestRequest {
TestRequest {
req: HttpTestRequest::default(),
path: Path::new(Url::new(Uri::default())),
state: State::new(()),
}
}
}
impl TestRequest<()> {
/// Create TestRequest and set request uri
pub fn with_uri(path: &str) -> Self {
Self::get().uri(path)
}
/// Create TestRequest and set header
pub fn with_hdr<H: Header>(hdr: H) -> Self {
Self::default().set(hdr)
}
/// Create TestRequest and set header
pub fn with_header<K, V>(key: K, value: V) -> Self
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
Self::default().header(key, value)
}
/// Create TestRequest and set method to `Method::GET`
pub fn get() -> Self {
Self::default().method(Method::GET)
}
/// Create TestRequest and set method to `Method::POST`
pub fn post() -> Self {
Self::default().method(Method::POST)
}
}
impl<S> TestRequest<S> {
/// Create TestRequest and set request uri
pub fn with_state(state: S) -> TestRequest<S> {
let req = TestRequest::get();
TestRequest {
state: State::new(state),
req: req.req,
path: req.path,
}
}
/// Set HTTP version of this request
pub fn version(mut self, ver: Version) -> Self {
self.req.version(ver);
self
}
/// Set HTTP method of this request
pub fn method(mut self, meth: Method) -> Self {
self.req.method(meth);
self
}
/// Set HTTP Uri of this request
pub fn uri(mut self, path: &str) -> Self {
self.req.uri(path);
self
}
/// Set a header
pub fn set<H: Header>(mut self, hdr: H) -> Self {
self.req.set(hdr);
self
}
/// Set a header
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
self.req.header(key, value);
self
}
/// Set request path pattern parameter
pub fn param(mut self, name: &'static str, value: &'static str) -> Self {
self.path.add_static(name, value);
self
}
/// Complete request creation and generate `Request` instance
pub fn finish(mut self) -> FramedRequest<TestBuffer, S> {
let req = self.req.finish();
self.path.get_mut().update(req.uri());
let framed = Framed::new(TestBuffer::empty(), Codec::default());
FramedRequest::new(req, framed, self.path, self.state)
}
/// This method generates `FramedRequest` instance and executes async handler
pub async fn run<F, R, I, E>(self, f: F) -> Result<I, E>
where
F: FnOnce(FramedRequest<TestBuffer, S>) -> R,
R: Future<Output = Result<I, E>>,
{
f(self.finish()).await
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let req = TestRequest::with_uri("/index.html")
.header("x-test", "test")
.param("test", "123")
.finish();
assert_eq!(*req.state(), ());
assert_eq!(req.version(), Version::HTTP_11);
assert_eq!(req.method(), Method::GET);
assert_eq!(req.path(), "/index.html");
assert_eq!(req.query_string(), "");
assert_eq!(
req.headers().get("x-test").unwrap().to_str().unwrap(),
"test"
);
assert_eq!(&req.match_info()["test"], "123");
}
}

View File

@ -1,159 +0,0 @@
use actix_codec::{AsyncRead, AsyncWrite};
use actix_http::{body, http::StatusCode, ws, Error, HttpService, Response};
use actix_http_test::test_server;
use actix_service::{pipeline_factory, IntoServiceFactory, ServiceFactory};
use actix_utils::framed::Dispatcher;
use bytes::Bytes;
use futures::{future, SinkExt, StreamExt};
use actix_framed::{FramedApp, FramedRequest, FramedRoute, SendError, VerifyWebSockets};
async fn ws_service<T: AsyncRead + AsyncWrite>(
req: FramedRequest<T>,
) -> Result<(), Error> {
let (req, mut framed, _) = req.into_parts();
let res = ws::handshake(req.head()).unwrap().message_body(());
framed
.send((res, body::BodySize::None).into())
.await
.unwrap();
Dispatcher::new(framed.into_framed(ws::Codec::new()), service)
.await
.unwrap();
Ok(())
}
async fn service(msg: ws::Frame) -> Result<ws::Message, Error> {
let msg = match msg {
ws::Frame::Ping(msg) => ws::Message::Pong(msg),
ws::Frame::Text(text) => {
ws::Message::Text(String::from_utf8_lossy(&text).to_string())
}
ws::Frame::Binary(bin) => ws::Message::Binary(bin),
ws::Frame::Close(reason) => ws::Message::Close(reason),
_ => panic!(),
};
Ok(msg)
}
#[actix_rt::test]
async fn test_simple() {
let mut srv = test_server(|| {
HttpService::build()
.upgrade(
FramedApp::new().service(FramedRoute::get("/index.html").to(ws_service)),
)
.finish(|_| future::ok::<_, Error>(Response::NotFound()))
.tcp()
});
assert!(srv.ws_at("/test").await.is_err());
// client service
let mut framed = srv.ws_at("/index.html").await.unwrap();
framed
.send(ws::Message::Text("text".to_string()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Text(Bytes::from_static(b"text"))
);
framed
.send(ws::Message::Binary("text".into()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Binary(Bytes::from_static(b"text"))
);
framed.send(ws::Message::Ping("text".into())).await.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Pong("text".to_string().into())
);
framed
.send(ws::Message::Close(Some(ws::CloseCode::Normal.into())))
.await
.unwrap();
let (item, _) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Close(Some(ws::CloseCode::Normal.into()))
);
}
#[actix_rt::test]
async fn test_service() {
let mut srv = test_server(|| {
pipeline_factory(actix_http::h1::OneRequest::new().map_err(|_| ())).and_then(
pipeline_factory(
pipeline_factory(VerifyWebSockets::default())
.then(SendError::default())
.map_err(|_| ()),
)
.and_then(
FramedApp::new()
.service(FramedRoute::get("/index.html").to(ws_service))
.into_factory()
.map_err(|_| ()),
),
)
});
// non ws request
let res = srv.get("/index.html").send().await.unwrap();
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
// not found
assert!(srv.ws_at("/test").await.is_err());
// client service
let mut framed = srv.ws_at("/index.html").await.unwrap();
framed
.send(ws::Message::Text("text".to_string()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Text(Bytes::from_static(b"text"))
);
framed
.send(ws::Message::Binary("text".into()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Binary(Bytes::from_static(b"text"))
);
framed.send(ws::Message::Ping("text".into())).await.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Pong("text".to_string().into())
);
framed
.send(ws::Message::Close(Some(ws::CloseCode::Normal.into())))
.await
.unwrap();
let (item, _) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Close(Some(ws::CloseCode::Normal.into()))
);
}

View File

@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-http
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

View File

@ -1,10 +1,63 @@
# Changes
# [Unreleased]
## [2.0.0-alpha.4] - 2020-05-21
### Changed
* Update the `time` dependency to 0.2.5
* Bump minimum supported Rust version to 1.40
* content_length function is removed, and you can set Content-Length by calling no_chunking function [#1439]
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Update `base64` dependency to 0.12
### Fixed
* Support parsing of `SameSite=None` [#1503]
[#1439]: https://github.com/actix/actix-web/pull/1439
[#1503]: https://github.com/actix/actix-web/pull/1503
## [2.0.0-alpha.3] - 2020-05-08
### Fixed
* Correct spelling of ConnectError::Unresolved [#1487]
* Fix a mistake in the encoding of websocket continuation messages wherein
Item::FirstText and Item::FirstBinary are each encoded as the other.
### Changed
* Implement `std::error::Error` for our custom errors [#1422]
* Remove `failure` support for `ResponseError` since that crate
will be deprecated in the near future.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1487]: https://github.com/actix/actix-web/pull/1487
## [2.0.0-alpha.2] - 2020-03-07
### Changed
* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively
to improve download speed for awc when downloading large objects. [#1394]
* client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394]
* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
[#1394]: https://github.com/actix/actix-web/pull/1394
[#1395]: https://github.com/actix/actix-web/pull/1395
## [2.0.0-alpha.1] - 2020-02-27
### Changed
* Update the `time` dependency to 0.2.7.
* Moved actors messages support from actix crate, enabled with feature `actors`.
* Breaking change: trait MessageBody requires Unpin and accepting Pin<&mut Self> instead of &mut self in the poll_next().
* MessageBody is not implemented for &'static [u8] anymore.
### Fixed

View File

@ -1,6 +1,6 @@
[package]
name = "actix-http"
version = "1.0.1"
version = "2.0.0-alpha.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix http primitives"
readme = "README.md"
@ -15,7 +15,7 @@ license = "MIT/Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "failure", "compress", "secure-cookies"]
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
[lib]
name = "actix_http"
@ -33,42 +33,44 @@ rustls = ["actix-tls/rustls", "actix-connect/rustls"]
# enable compressison support
compress = ["flate2", "brotli2"]
# failure integration. actix does not use failure anymore
failure = ["fail-ure"]
# support for secure cookies
secure-cookies = ["ring"]
# support for actix Actor messages
actors = ["actix"]
[dependencies]
actix-service = "1.0.1"
actix-service = "1.0.5"
actix-codec = "0.2.0"
actix-connect = "1.0.1"
actix-utils = "1.0.3"
actix-connect = "2.0.0-alpha.3"
actix-utils = "1.0.6"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "1.0.0", optional = true }
actix-tls = { version = "2.0.0-alpha.1", optional = true }
actix = { version = "0.10.0-alpha.1", optional = true }
base64 = "0.11"
base64 = "0.12"
bitflags = "1.2"
bytes = "0.5.3"
copyless = "0.1.4"
derive_more = "0.99.2"
either = "1.5.3"
encoding_rs = "0.8"
futures-core = "0.3.1"
futures-util = "0.3.1"
futures-channel = "0.3.1"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
h2 = "0.2.1"
http = "0.2.0"
httparse = "1.3"
indexmap = "1.3"
itoa = "0.4"
lazy_static = "1.4"
language-tags = "0.2"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1"
pin-project = "0.4.6"
pin-project = "0.4.17"
rand = "0.7"
regex = "1.3"
serde = "1.0"
@ -76,7 +78,7 @@ serde_json = "1.0"
sha-1 = "0.8"
slab = "0.4"
serde_urlencoded = "0.6.1"
time = { version = "0.2.5", default-features = false, features = ["std"] }
time = { version = "0.2.7", default-features = false, features = ["std"] }
# for secure cookie
ring = { version = "0.16.9", optional = true }
@ -85,16 +87,21 @@ ring = { version = "0.16.9", optional = true }
brotli2 = { version="0.3.2", optional = true }
flate2 = { version = "1.0.13", optional = true }
# optional deps
fail-ure = { version = "0.1.5", package="failure", optional = true }
[dev-dependencies]
actix-server = "1.0.0"
actix-connect = { version = "1.0.0", features=["openssl"] }
actix-http-test = { version = "1.0.0", features=["openssl"] }
actix-tls = { version = "1.0.0", features=["openssl"] }
futures = "0.3.1"
env_logger = "0.6"
actix-server = "1.0.1"
actix-connect = { version = "2.0.0-alpha.2", features = ["openssl"] }
actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] }
actix-tls = { version = "2.0.0-alpha.1", features = ["openssl"] }
criterion = "0.3"
env_logger = "0.7"
serde_derive = "1.0"
open-ssl = { version="0.10", package = "openssl" }
rust-tls = { version="0.16", package = "rustls" }
rust-tls = { version="0.17", package = "rustls" }
[[bench]]
name = "content-length"
harness = false
[[bench]]
name = "status-line"
harness = false

View File

@ -8,25 +8,40 @@ Actix http
* [API Documentation](https://docs.rs/actix-http/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
* Minimum supported Rust version: 1.31 or later
* Minimum supported Rust version: 1.40 or later
## Example
```rust
// see examples/framed_hello.rs for complete list of used crates.
extern crate actix_http;
use actix_http::{h1, Response, ServiceConfig};
use std::{env, io};
fn main() {
Server::new().bind("framed_hello", "127.0.0.1:8080", || {
IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())) // <- create h1 codec
.and_then(TakeItem::new().map_err(|_| ())) // <- read one request
.and_then(|(_req, _framed): (_, Framed<_, _>)| { // <- send response and close conn
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
.map_err(|_| ())
.map(|_| ())
})
}).unwrap().run();
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
})
.tcp()
})?
.run()
.await
}
```

View File

@ -0,0 +1,291 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -0,0 +1,222 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View File

@ -3,7 +3,7 @@ use std::{env, io};
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::StreamExt;
use futures_util::StreamExt;
use http::header::HeaderValue;
use log::info;
@ -17,23 +17,18 @@ async fn main() -> io::Result<()> {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|mut req: Request| {
async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header(
"x-head",
HeaderValue::from_static("dummy value!"),
)
.body(body),
)
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body),
)
})
.tcp()
})?

View File

@ -4,7 +4,7 @@ use actix_http::http::HeaderValue;
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::StreamExt;
use futures_util::StreamExt;
use log::info;
async fn handle_request(mut req: Request) -> Result<Response, Error> {

View File

@ -2,7 +2,7 @@ use std::{env, io};
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use futures_util::future;
use http::header::HeaderValue;
use log::info;

View File

@ -6,7 +6,7 @@ use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::ready;
use pin_project::{pin_project, project};
use pin_project::pin_project;
use crate::error::Error;
@ -15,8 +15,7 @@ use crate::error::Error;
pub enum BodySize {
None,
Empty,
Sized(usize),
Sized64(u64),
Sized(u64),
Stream,
}
@ -25,8 +24,7 @@ impl BodySize {
match self {
BodySize::None
| BodySize::Empty
| BodySize::Sized(0)
| BodySize::Sized64(0) => true,
| BodySize::Sized(0) => true,
_ => false,
}
}
@ -36,33 +34,46 @@ impl BodySize {
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody> MessageBody for Box<T> {
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
self.as_mut().poll_next(cx)
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project]
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(B),
Other(Body),
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
@ -98,10 +109,13 @@ impl<B: MessageBody> MessageBody for ResponseBody<B> {
}
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
match self {
ResponseBody::Body(ref mut body) => body.poll_next(cx),
ResponseBody::Other(ref mut body) => body.poll_next(cx),
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
@ -109,19 +123,18 @@ impl<B: MessageBody> MessageBody for ResponseBody<B> {
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Result<Bytes, Error>;
#[project]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
#[project]
match self.project() {
ResponseBody::Body(ref mut body) => body.poll_next(cx),
ResponseBody::Other(ref mut body) => body.poll_next(cx),
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project(project = BodyProj)]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
@ -131,7 +144,7 @@ pub enum Body {
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody>),
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
@ -141,7 +154,7 @@ impl Body {
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + 'static>(body: B) -> Body {
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
@ -151,24 +164,27 @@ impl MessageBody for Body {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len()),
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
match self {
Body::None => Poll::Ready(None),
Body::Empty => Poll::Ready(None),
Body::Bytes(ref mut bin) => {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
BodyProj::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(bin, Bytes::new()))))
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
Body::Message(ref mut body) => body.poll_next(cx),
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
@ -254,7 +270,7 @@ impl From<serde_json::Value> for Body {
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + 'static,
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
@ -263,7 +279,7 @@ where
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + 'static,
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
@ -273,87 +289,88 @@ where
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(self, Bytes::new()))))
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(self, BytesMut::new()).freeze())))
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::replace(self, "").as_ref(),
mem::take(self.get_mut()).as_ref(),
))))
}
}
}
impl MessageBody for &'static [u8] {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::replace(self, b"")))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::replace(self, Vec::new())))))
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::replace(self, String::new()).into_bytes(),
mem::take(self.get_mut()).into_bytes(),
))))
}
}
@ -361,19 +378,21 @@ impl MessageBody for String {
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
pub struct BodyStream<S, E> {
stream: Pin<Box<S>>,
#[pin_project]
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>>,
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream: Box::pin(stream),
stream,
_t: PhantomData,
}
}
@ -381,7 +400,7 @@ where
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>>,
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn size(&self) -> BodySize {
@ -393,10 +412,14 @@ where
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.stream.as_mut();
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
return Poll::Ready(match ready!(stream.as_mut().poll_next(cx)) {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
@ -406,26 +429,28 @@ where
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
pub struct SizedStream<S> {
#[pin_project]
pub struct SizedStream<S: Unpin> {
size: u64,
stream: Pin<Box<S>>,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream: Box::pin(stream) }
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn size(&self) -> BodySize {
BodySize::Sized64(self.size)
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
@ -433,10 +458,14 @@ where
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.stream.as_mut();
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
return Poll::Ready(match ready!(stream.as_mut().poll_next(cx)) {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
@ -447,8 +476,9 @@ where
#[cfg(test)]
mod tests {
use super::*;
use futures::stream;
use futures_util::stream;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
@ -476,7 +506,10 @@ mod tests {
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| "test".poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
@ -490,13 +523,12 @@ mod tests {
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!((&b"test"[..]).size(), BodySize::Sized(4));
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| (&b"test"[..]).poll_next(cx))
.await
.unwrap()
.ok(),
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
@ -505,10 +537,12 @@ mod tests {
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Vec::from("test").poll_next(cx))
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
@ -518,41 +552,44 @@ mod tests {
#[actix_rt::test]
async fn test_bytes() {
let mut b = Bytes::from("test");
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let mut b = BytesMut::from("test");
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let mut b = "test".to_owned();
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
@ -560,14 +597,17 @@ mod tests {
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| ().poll_next(cx)).await.is_none());
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let mut val = Box::new(());
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.poll_next(cx)).await.is_none());
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
@ -605,23 +645,54 @@ mod tests {
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let mut body = BodyStream::new(stream::iter(
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
@ -629,18 +700,39 @@ mod tests {
#[actix_rt::test]
async fn skips_empty_chunks() {
let mut body = SizedStream::new(
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push_str("!");
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -0,0 +1,39 @@
use std::time::Duration;
// These values are taken from hyper/src/proto/h2/client.rs
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
/// Connector configuration
#[derive(Clone)]
pub(crate) struct ConnectorConfig {
pub(crate) timeout: Duration,
pub(crate) conn_lifetime: Duration,
pub(crate) conn_keep_alive: Duration,
pub(crate) disconnect_timeout: Option<Duration>,
pub(crate) limit: usize,
pub(crate) conn_window_size: u32,
pub(crate) stream_window_size: u32,
}
impl Default for ConnectorConfig {
fn default() -> Self {
Self {
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Some(Duration::from_millis(3000)),
limit: 100,
conn_window_size: DEFAULT_H2_CONN_WINDOW,
stream_window_size: DEFAULT_H2_STREAM_WINDOW,
}
}
}
impl ConnectorConfig {
pub(crate) fn no_disconnect_timeout(&self) -> Self {
let mut res = self.clone();
res.disconnect_timeout = None;
res
}
}

View File

@ -1,12 +1,13 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures_util::future::{err, Either, Future, FutureExt, LocalBoxFuture, Ready};
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::{pin_project, project};
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
@ -204,7 +205,7 @@ where
}
}
#[pin_project]
#[pin_project(project = EitherIoProj)]
pub enum EitherIo<A, B> {
A(#[pin] A),
B(#[pin] B),
@ -215,16 +216,14 @@ where
A: AsyncRead,
B: AsyncRead,
{
#[project]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_read(cx, buf),
EitherIo::B(val) => val.poll_read(cx, buf),
EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf),
}
}
@ -244,41 +243,34 @@ where
A: AsyncWrite,
B: AsyncWrite,
{
#[project]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_write(cx, buf),
EitherIo::B(val) => val.poll_write(cx, buf),
EitherIoProj::A(val) => val.poll_write(cx, buf),
EitherIoProj::B(val) => val.poll_write(cx, buf),
}
}
#[project]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_flush(cx),
EitherIo::B(val) => val.poll_flush(cx),
EitherIoProj::A(val) => val.poll_flush(cx),
EitherIoProj::B(val) => val.poll_flush(cx),
}
}
#[project]
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_shutdown(cx),
EitherIo::B(val) => val.poll_shutdown(cx),
EitherIoProj::A(val) => val.poll_shutdown(cx),
EitherIoProj::B(val) => val.poll_shutdown(cx),
}
}
#[project]
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
@ -287,10 +279,9 @@ where
where
Self: Sized,
{
#[project]
match self.project() {
EitherIo::A(val) => val.poll_write_buf(cx, buf),
EitherIo::B(val) => val.poll_write_buf(cx, buf),
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -11,6 +11,7 @@ use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
@ -48,11 +49,7 @@ type SslConnector = ();
/// ```
pub struct Connector<T, U> {
connector: T,
timeout: Duration,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Duration,
limit: usize,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
@ -71,42 +68,47 @@ impl Connector<(), ()> {
> + Clone,
TcpStream,
> {
let ssl = {
#[cfg(feature = "openssl")]
{
use actix_connect::ssl::openssl::SslMethod;
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
{
let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
let mut config = ClientConfig::new();
config.set_protocols(&protos);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{}
};
Connector {
ssl,
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Duration::from_millis(3000),
limit: 100,
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
@ -122,11 +124,7 @@ impl<T, U> Connector<T, U> {
{
Connector {
connector,
timeout: self.timeout,
conn_lifetime: self.conn_lifetime,
conn_keep_alive: self.conn_keep_alive,
disconnect_timeout: self.disconnect_timeout,
limit: self.limit,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
@ -146,7 +144,7 @@ where
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self.config.timeout = timeout;
self
}
@ -163,12 +161,44 @@ where
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self.config.limit = limit;
self
}
@ -179,7 +209,7 @@ where
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.conn_keep_alive = dur;
self.config.conn_keep_alive = dur;
self
}
@ -189,7 +219,7 @@ where
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.conn_lifetime = dur;
self.config.conn_lifetime = dur;
self
}
@ -202,7 +232,7 @@ where
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.disconnect_timeout = dur;
self.config.disconnect_timeout = Some(dur);
self
}
@ -216,7 +246,7 @@ where
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.timeout,
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
@ -231,10 +261,7 @@ where
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
self.config.no_disconnect_timeout(),
),
}
}
@ -248,7 +275,7 @@ where
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.timeout,
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
@ -301,7 +328,7 @@ where
});
let tcp_service = TimeoutService::new(
self.timeout,
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
@ -316,18 +343,9 @@ where
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
ssl_pool: ConnectionPool::new(
ssl_service,
self.conn_lifetime,
self.conn_keep_alive,
Some(self.disconnect_timeout),
self.limit,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}

View File

@ -39,7 +39,7 @@ pub enum ConnectError {
H2(h2::Error),
/// Connecting took too long
#[display(fmt = "Timeout out while establishing connection")]
#[display(fmt = "Timeout while establishing connection")]
Timeout,
/// Connector has been disconnected
@ -48,20 +48,22 @@ pub enum ConnectError {
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolverd,
Unresolved,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}
impl std::error::Error for ConnectError {}
impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError {
match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolverd => ConnectError::Unresolverd,
actix_connect::ConnectError::Unresolved => ConnectError::Unresolved,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
}
}
@ -86,6 +88,8 @@ pub enum InvalidUrl {
HttpError(http::Error),
}
impl std::error::Error for InvalidUrl {}
/// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)]
pub enum SendRequestError {
@ -106,7 +110,7 @@ pub enum SendRequestError {
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Response took too long
#[display(fmt = "Timeout out while waiting for response")]
#[display(fmt = "Timeout while waiting for response")]
Timeout,
/// Tunnels are not supported for http2 connection
#[display(fmt = "Tunnels are not supported for http2 connection")]
@ -115,6 +119,8 @@ pub enum SendRequestError {
Body(Error),
}
impl std::error::Error for SendRequestError {}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn status_code(&self) -> StatusCode {
@ -139,6 +145,8 @@ pub enum FreezeRequestError {
Http(HttpError),
}
impl std::error::Error for FreezeRequestError {}
impl From<FreezeRequestError> for SendRequestError {
fn from(e: FreezeRequestError) -> Self {
match e {

View File

@ -8,7 +8,7 @@ use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{SinkExt, StreamExt};
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
@ -120,7 +120,7 @@ where
/// send request body to the peer
pub(crate) async fn send_body<I, B>(
mut body: B,
body: B,
framed: &mut Framed<I, h1::ClientCodec>,
) -> Result<(), SendRequestError>
where
@ -128,9 +128,10 @@ where
B: MessageBody,
{
let mut eof = false;
pin_mut!(body);
while !eof {
while !eof && !framed.is_write_buf_full() {
match poll_fn(|cx| body.poll_next(cx)).await {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.write(h1::Message::Chunk(Some(result?)))?;
}

View File

@ -1,10 +1,15 @@
use std::convert::TryFrom;
use std::future::Future;
use std::time;
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use futures_util::future::poll_fn;
use h2::{client::SendRequest, SendStream};
use futures_util::pin_mut;
use h2::{
client::{Builder, Connection, SendRequest},
SendStream,
};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{request::Request, Method, Version};
@ -13,6 +18,7 @@ use crate::header::HeaderMap;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
@ -58,10 +64,6 @@ where
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate.
@ -123,13 +125,14 @@ where
}
async fn send_body<B: MessageBody>(
mut body: B,
body: B,
mut send: SendStream<Bytes>,
) -> Result<(), SendRequestError> {
let mut buf = None;
pin_mut!(body);
loop {
if buf.is_none() {
match poll_fn(|cx| body.poll_next(cx)).await {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(Ok(b)) => {
send.reserve_capacity(b.len());
buf = Some(b);
@ -183,3 +186,18 @@ fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
}
}
}
pub(crate) fn handshake<Io>(
io: Io,
config: &ConnectorConfig,
) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
let mut builder = Builder::new();
builder
.initial_window_size(config.stream_window_size)
.initial_connection_window_size(config.conn_window_size)
.enable_push(false);
builder.handshake(io)
}

View File

@ -1,6 +1,7 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;

View File

@ -13,13 +13,16 @@ use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{handshake, Connection, SendRequest};
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
@ -49,20 +52,11 @@ where
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(
connector: T,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
) -> Self {
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
ConnectionPool(
Rc::new(RefCell::new(connector)),
Rc::new(RefCell::new(Inner {
conn_lifetime,
conn_keep_alive,
disconnect_timeout,
limit,
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
@ -111,7 +105,7 @@ where
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolverd);
return Err(ConnectError::Unresolved);
};
// acquire connection
@ -128,6 +122,8 @@ where
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
@ -137,7 +133,7 @@ where
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io).await?;
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
@ -199,7 +195,7 @@ where
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availibility();
inner.check_availability();
}
}
}
@ -236,7 +232,7 @@ where
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
inner.check_availability();
}
}
}
@ -254,10 +250,7 @@ struct AvailableConnection<Io> {
}
pub(crate) struct Inner<Io> {
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
config: ConnectorConfig,
acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
@ -310,7 +303,7 @@ where
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.limit > 0 && self.acquired >= self.limit {
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
@ -322,10 +315,10 @@ where
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.conn_keep_alive
|| (now - conn.created) > self.conn_lifetime
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
@ -337,7 +330,7 @@ where
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
@ -366,21 +359,21 @@ where
created,
used: Instant::now(),
});
self.check_availibility();
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availibility();
self.check_availability();
}
fn check_availibility(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.limit {
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
@ -422,6 +415,7 @@ where
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
@ -439,7 +433,7 @@ where
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = unsafe { self.get_unchecked_mut() };
let this = self.project();
let mut inner = this.inner.as_ref().borrow_mut();
inner.waker.register(cx.waker());
@ -478,6 +472,7 @@ where
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
@ -504,6 +499,7 @@ where
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
@ -516,6 +512,7 @@ where
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
@ -523,6 +520,7 @@ where
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
@ -536,7 +534,7 @@ where
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
inner.check_availability();
}
}
}
@ -592,7 +590,7 @@ where
)));
Poll::Ready(())
} else {
*this.h2 = Some(handshake(io).boxed_local());
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}

View File

@ -114,7 +114,7 @@ impl ServiceConfig {
}
#[inline]
/// Return state of connection keep-alive funcitonality
/// Return state of connection keep-alive functionality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
@ -211,7 +211,12 @@ impl Date {
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", OffsetDateTime::now().format("%a, %d %b %Y %H:%M:%S GMT")).unwrap();
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
}
}
@ -282,7 +287,6 @@ impl DateService {
mod tests {
use super::*;
// Test modifying the date from within the closure
// passed to `set_date`
#[test]
@ -290,9 +294,7 @@ mod tests {
let service = DateService::new();
// Make sure that `check_date` doesn't try to spawn a task
service.0.update();
service.set_date(|_| {
service.0.reset()
});
service.set_date(|_| service.0.reset());
}
#[test]

View File

@ -63,7 +63,7 @@ impl CookieBuilder {
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .expires(time::OffsetDateTime::now())
/// .expires(time::OffsetDateTime::now_utc())
/// .finish();
///
/// assert!(c.expires().is_some());
@ -109,7 +109,8 @@ impl CookieBuilder {
pub fn max_age_time(mut self, value: Duration) -> CookieBuilder {
// Truncate any nanoseconds from the Duration, as they aren't represented within `Max-Age`
// and would cause two otherwise identical `Cookie` instances to not be equivalent to one another.
self.cookie.set_max_age(Duration::seconds(value.whole_seconds()));
self.cookie
.set_max_age(Duration::seconds(value.whole_seconds()));
self
}

View File

@ -1,5 +1,5 @@
use std::collections::HashSet;
use std::mem::replace;
use std::mem;
use time::{Duration, OffsetDateTime};
@ -13,7 +13,7 @@ use super::secure::{Key, PrivateJar, SignedJar};
///
/// A `CookieJar` provides storage for any number of cookies. Any changes made
/// to the jar are tracked; the changes can be retrieved via the
/// [delta](#method.delta) method which returns an interator over the changes.
/// [delta](#method.delta) method which returns an iterator over the changes.
///
/// # Usage
///
@ -221,7 +221,7 @@ impl CookieJar {
if self.original_cookies.contains(cookie.name()) {
cookie.set_value("");
cookie.set_max_age(Duration::zero());
cookie.set_expires(OffsetDateTime::now() - Duration::days(365));
cookie.set_expires(OffsetDateTime::now_utc() - Duration::days(365));
self.delta_cookies.replace(DeltaCookie::removed(cookie));
} else {
self.delta_cookies.remove(cookie.name());
@ -273,7 +273,7 @@ impl CookieJar {
)]
pub fn clear(&mut self) {
self.delta_cookies.clear();
for delta in replace(&mut self.original_cookies, HashSet::new()) {
for delta in mem::take(&mut self.original_cookies) {
self.remove(delta.cookie);
}
}
@ -533,8 +533,8 @@ mod test {
#[test]
#[cfg(feature = "secure-cookies")]
fn delta() {
use time::Duration;
use std::collections::HashMap;
use time::Duration;
let mut c = CookieJar::new();

View File

@ -47,7 +47,7 @@
//! ```
#![doc(html_root_url = "https://docs.rs/cookie/0.11")]
#![deny(missing_docs)]
#![warn(missing_docs)]
mod builder;
mod delta;
@ -103,7 +103,7 @@ enum CookieStr {
impl CookieStr {
/// Retrieves the string `self` corresponds to. If `self` is derived from
/// indexes, the corresponding subslice of `string` is returned. Otherwise,
/// indexes, the corresponding sub-slice of `string` is returned. Otherwise,
/// the concrete string is returned.
///
/// # Panics
@ -733,7 +733,7 @@ impl<'c> Cookie<'c> {
pub fn make_permanent(&mut self) {
let twenty_years = Duration::days(365 * 20);
self.set_max_age(twenty_years);
self.set_expires(OffsetDateTime::now() + twenty_years);
self.set_expires(OffsetDateTime::now_utc() + twenty_years);
}
fn fmt_parameters(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -990,7 +990,7 @@ impl<'a, 'b> PartialEq<Cookie<'b>> for Cookie<'a> {
#[cfg(test)]
mod tests {
use super::{Cookie, SameSite};
use time::{offset, PrimitiveDateTime};
use time::PrimitiveDateTime;
#[test]
fn format() {
@ -1015,7 +1015,9 @@ mod tests {
assert_eq!(&cookie.to_string(), "foo=bar; Domain=www.rust-lang.org");
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%M:%S").unwrap().using_offset(offset!(UTC));
let expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%M:%S")
.unwrap()
.assume_utc();
let cookie = Cookie::build("foo", "bar").expires(expires).finish();
assert_eq!(
&cookie.to_string(),

View File

@ -6,7 +6,7 @@ use std::fmt;
use std::str::Utf8Error;
use percent_encoding::percent_decode;
use time::{Duration, offset};
use time::Duration;
use super::{Cookie, CookieStr, SameSite};
@ -172,6 +172,8 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
cookie.same_site = Some(SameSite::Strict);
} else if v.eq_ignore_ascii_case("lax") {
cookie.same_site = Some(SameSite::Lax);
} else if v.eq_ignore_ascii_case("none") {
cookie.same_site = Some(SameSite::None);
} else {
// We do nothing here, for now. When/if the `SameSite`
// attribute becomes standard, the spec says that we should
@ -188,7 +190,7 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
.or_else(|| time::parse(v, "%a, %d-%b-%Y %H:%M:%S").ok());
if let Some(time) = tm {
cookie.expires = Some(time.using_offset(offset!(UTC)))
cookie.expires = Some(time.assume_utc())
}
}
_ => {
@ -216,7 +218,7 @@ where
#[cfg(test)]
mod tests {
use super::{Cookie, SameSite};
use time::{offset, Duration, PrimitiveDateTime};
use time::{Duration, PrimitiveDateTime};
macro_rules! assert_eq_parse {
($string:expr, $expected:expr) => {
@ -261,6 +263,16 @@ mod tests {
assert_eq_parse!("foo=bar; SameSite=strict", expected);
assert_eq_parse!("foo=bar; SameSite=STrICT", expected);
assert_eq_parse!("foo=bar; SameSite=STRICT", expected);
let expected = Cookie::build("foo", "bar")
.same_site(SameSite::None)
.finish();
assert_eq_parse!("foo=bar; SameSite=None", expected);
assert_eq_parse!("foo=bar; SameSITE=None", expected);
assert_eq_parse!("foo=bar; SameSite=nOne", expected);
assert_eq_parse!("foo=bar; SameSite=NoNE", expected);
assert_eq_parse!("foo=bar; SameSite=NONE", expected);
}
#[test]
@ -376,7 +388,9 @@ mod tests {
);
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%M:%S").unwrap().using_offset(offset!(UTC));
let expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%M:%S")
.unwrap()
.assume_utc();
expected.set_expires(expires);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
@ -385,13 +399,38 @@ mod tests {
);
unexpected.set_domain("foo.com");
let bad_expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%S:%M").unwrap().using_offset(offset!(UTC));
let bad_expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%S:%M")
.unwrap()
.assume_utc();
expected.set_expires(bad_expires);
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
unexpected
);
expected.set_expires(expires);
expected.set_same_site(SameSite::Lax);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT; \
SameSite=Lax",
expected
);
expected.set_same_site(SameSite::Strict);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT; \
SameSite=Strict",
expected
);
expected.set_same_site(SameSite::None);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT; \
SameSite=None",
expected
);
}
#[test]
@ -414,8 +453,15 @@ mod tests {
#[test]
fn do_not_panic_on_large_max_ages() {
let max_duration = Duration::max_value();
let expected = Cookie::build("foo", "bar").max_age_time(max_duration).finish();
let overflow_duration = max_duration.checked_add(Duration::nanoseconds(1)).unwrap_or(max_duration);
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", overflow_duration.whole_seconds()), expected);
let expected = Cookie::build("foo", "bar")
.max_age_time(max_duration)
.finish();
let overflow_duration = max_duration
.checked_add(Duration::nanoseconds(1))
.unwrap_or(max_duration);
assert_eq_parse!(
format!(" foo=bar; Max-Age={:?}", overflow_duration.whole_seconds()),
expected
);
}
}

View File

@ -84,7 +84,7 @@ impl Key {
}
/// Generates signing/encryption keys from a secure, random source. Keys are
/// generated nondeterministically.
/// generated non-deterministically.
///
/// # Panics
///
@ -103,7 +103,7 @@ impl Key {
}
/// Attempts to generate signing/encryption keys from a secure, random
/// source. Keys are generated nondeterministically. If randomness cannot be
/// source. Keys are generated non-deterministically. If randomness cannot be
/// retrieved from the underlying operating system, returns `None`.
///
/// # Example

View File

@ -9,6 +9,7 @@ use brotli2::write::BrotliEncoder;
use bytes::Bytes;
use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready;
use pin_project::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
@ -19,8 +20,10 @@ use super::Writer;
const INPLACE: usize = 1024;
#[pin_project]
pub struct Encoder<B> {
eof: bool,
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
@ -76,67 +79,88 @@ impl<B: MessageBody> Encoder<B> {
}
}
#[pin_project(project = EncoderBodyProj)]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(B),
BoxedStream(Box<dyn MessageBody>),
Stream(#[pin] B),
BoxedStream(Box<dyn MessageBody + Unpin>),
}
impl<B: MessageBody> MessageBody for EncoderBody<B> {
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
EncoderBodyProj::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::take(b))))
}
}
EncoderBodyProj::Stream(b) => b.poll_next(cx),
EncoderBodyProj::BoxedStream(ref mut b) => {
Pin::new(b.as_mut()).poll_next(cx)
}
}
}
}
impl<B: MessageBody> MessageBody for Encoder<B> {
fn size(&self) -> BodySize {
if self.encoder.is_none() {
match self.body {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
self.body.size()
} else {
BodySize::Stream
}
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut this = self.project();
loop {
if self.eof {
if *this.eof {
return Poll::Ready(None);
}
if let Some(ref mut fut) = self.fut {
if let Some(ref mut fut) = this.fut {
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let chunk = encoder.take();
self.encoder = Some(encoder);
self.fut.take();
*this.encoder = Some(encoder);
this.fut.take();
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
}
let result = match self.body {
EncoderBody::Bytes(ref mut b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::replace(b, Bytes::new()))))
}
}
EncoderBody::Stream(ref mut b) => b.poll_next(cx),
EncoderBody::BoxedStream(ref mut b) => b.poll_next(cx),
};
let result = this.body.as_mut().poll_next(cx);
match result {
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut encoder) = self.encoder.take() {
if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
let chunk = encoder.take();
self.encoder = Some(encoder);
*this.encoder = Some(encoder);
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
*this.fut = Some(run(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
@ -146,12 +170,12 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
}
}
Poll::Ready(None) => {
if let Some(encoder) = self.encoder.take() {
if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish()?;
if chunk.is_empty() {
return Poll::Ready(None);
} else {
self.eof = true;
*this.eof = true;
return Poll::Ready(Some(Ok(chunk)));
}
} else {

View File

@ -1,5 +1,4 @@
//! Error and Result module
use std::any::TypeId;
use std::cell::RefCell;
use std::io::Write;
use std::str::Utf8Error;
@ -15,12 +14,11 @@ use derive_more::{Display, From};
pub use futures_channel::oneshot::Canceled;
use http::uri::InvalidUri;
use http::{header, Error as HttpError, StatusCode};
use httparse;
use serde::de::value::Error as DeError;
use serde_json::error::Error as JsonError;
use serde_urlencoded::ser::Error as FormError;
// re-export for convinience
// re-export for convenience
use crate::body::Body;
pub use crate::cookie::ParseError as CookieParseError;
use crate::helpers::Writer;
@ -36,7 +34,7 @@ pub type Result<T, E = Error> = result::Result<T, E>;
/// General purpose actix web error.
///
/// An actix web error is used to carry errors from `failure` or `std::error`
/// An actix web error is used to carry errors from `std::error`
/// through actix in a convenient way. It can be created through
/// converting errors with `into()`.
///
@ -60,12 +58,6 @@ impl Error {
}
}
/// A struct with a private constructor, for use with
/// `__private_get_type_id__`. Its single field is private,
/// ensuring that it can only be constructed from this module
#[doc(hidden)]
pub struct PrivateHelper(());
/// Error that can be converted to `Response`
pub trait ResponseError: fmt::Debug + fmt::Display {
/// Response's status code
@ -89,43 +81,10 @@ pub trait ResponseError: fmt::Debug + fmt::Display {
resp.set_body(Body::from(buf))
}
/// A helper method to get the type ID of the type
/// this trait is implemented on.
/// This method is unsafe to *implement*, since `downcast_ref` relies
/// on the returned `TypeId` to perform a cast.
///
/// Unfortunately, Rust has no notion of a trait method that is
/// unsafe to implement (marking it as `unsafe` makes it unsafe
/// to *call*). As a workaround, we require this method
/// to return a private type along with the `TypeId`. This
/// private type (`PrivateHelper`) has a private constructor,
/// making it impossible for safe code to construct outside of
/// this module. This ensures that safe code cannot violate
/// type-safety by implementing this method.
#[doc(hidden)]
fn __private_get_type_id__(&self) -> (TypeId, PrivateHelper)
where
Self: 'static,
{
(TypeId::of::<Self>(), PrivateHelper(()))
}
downcast_get_type_id!();
}
impl dyn ResponseError + 'static {
/// Downcasts a response error to a specific type.
pub fn downcast_ref<T: ResponseError + 'static>(&self) -> Option<&T> {
if self.__private_get_type_id__().0 == TypeId::of::<T>() {
// Safety: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe { Some(&*(self as *const dyn ResponseError as *const T)) }
} else {
None
}
}
}
downcast!(ResponseError);
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -374,6 +333,8 @@ pub enum PayloadError {
Io(io::Error),
}
impl std::error::Error for PayloadError {}
impl From<h2::Error> for PayloadError {
fn from(err: h2::Error) -> Self {
PayloadError::Http2Payload(err)
@ -471,7 +432,7 @@ pub enum DispatchError {
Unknown,
}
/// A set of error that can occure during parsing content type
/// A set of error that can occur during parsing content type
#[derive(PartialEq, Debug, Display)]
pub enum ContentTypeError {
/// Can not parse content type
@ -482,6 +443,8 @@ pub enum ContentTypeError {
UnknownEncoding,
}
impl std::error::Error for ContentTypeError {}
/// Return `BadRequest` for `ContentTypeError`
impl ResponseError for ContentTypeError {
fn status_code(&self) -> StatusCode {
@ -987,9 +950,15 @@ where
InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into()
}
#[cfg(feature = "failure")]
/// Compatibility for `failure::Error`
impl ResponseError for fail_ure::Error {}
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::MailboxError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::MailboxError {}
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::ResolverError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::actors::resolver::ResolverError {}
#[cfg(test)]
mod tests {

View File

@ -6,6 +6,8 @@ use fxhash::FxHashMap;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
}
@ -28,33 +30,30 @@ impl Extensions {
/// Check if container contains entry
pub fn contains<T: 'static>(&self) -> bool {
self.map.get(&TypeId::of::<T>()).is_some()
self.map.contains_key(&TypeId::of::<T>())
}
/// Get a reference to a type previously inserted on this `Extensions`.
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
.and_then(|boxed| boxed.downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
.and_then(|boxed| boxed.downcast_mut())
}
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
(boxed as Box<dyn Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
}
/// Clear the `Extensions` of all inserted extensions.
@ -70,22 +69,113 @@ impl fmt::Debug for Extensions {
}
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
#[cfg(test)]
mod tests {
use super::*;
let mut extensions = Extensions::new();
#[test]
fn test_remove() {
let mut map = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
map.insert::<i8>(123);
assert!(map.get::<i8>().is_some());
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
map.remove::<i8>();
assert!(map.get::<i8>().is_none());
}
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
#[test]
fn test_clear() {
let mut map = Extensions::new();
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
map.insert::<i8>(8);
map.insert::<i16>(16);
map.insert::<i32>(32);
assert!(map.contains::<i8>());
assert!(map.contains::<i16>());
assert!(map.contains::<i32>());
map.clear();
assert!(!map.contains::<i8>());
assert!(!map.contains::<i16>());
assert!(!map.contains::<i32>());
map.insert::<i8>(10);
assert_eq!(*map.get::<i8>().unwrap(), 10);
}
#[test]
fn test_integers() {
let mut map = Extensions::new();
map.insert::<i8>(8);
map.insert::<i16>(16);
map.insert::<i32>(32);
map.insert::<i64>(64);
map.insert::<i128>(128);
map.insert::<u8>(8);
map.insert::<u16>(16);
map.insert::<u32>(32);
map.insert::<u64>(64);
map.insert::<u128>(128);
assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some());
assert!(map.get::<i64>().is_some());
assert!(map.get::<i128>().is_some());
assert!(map.get::<u8>().is_some());
assert!(map.get::<u16>().is_some());
assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some());
}
#[test]
fn test_composition() {
struct Magi<T>(pub T);
struct Madoka {
pub god: bool,
}
struct Homura {
pub attempts: usize,
}
struct Mami {
pub guns: usize,
}
let mut map = Extensions::new();
map.insert(Magi(Madoka { god: false }));
map.insert(Magi(Homura { attempts: 0 }));
map.insert(Magi(Mami { guns: 999 }));
assert!(!map.get::<Magi<Madoka>>().unwrap().0.god);
assert_eq!(0, map.get::<Magi<Homura>>().unwrap().0.attempts);
assert_eq!(999, map.get::<Magi<Mami>>().unwrap().0.guns);
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}
}

View File

@ -8,7 +8,6 @@ use actix_codec::Decoder;
use bytes::{Buf, Bytes, BytesMut};
use http::header::{HeaderName, HeaderValue};
use http::{header, Method, StatusCode, Uri, Version};
use httparse;
use log::{debug, error, trace};
use crate::error::ParseError;
@ -19,7 +18,7 @@ use crate::request::Request;
const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96;
/// Incoming messagd decoder
/// Incoming message decoder
pub(crate) struct MessageDecoder<T: MessageType>(PhantomData<T>);
#[derive(Debug)]

View File

@ -10,6 +10,7 @@ use actix_service::Service;
use bitflags::bitflags;
use bytes::{Buf, BytesMut};
use log::{error, trace};
use pin_project::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
@ -41,6 +42,7 @@ bitflags! {
}
}
#[pin_project::pin_project]
/// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S, B, X, U>
where
@ -52,9 +54,11 @@ where
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
#[pin]
inner: DispatcherState<T, S, B, X, U>,
}
#[pin_project(project = DispatcherStateProj)]
enum DispatcherState<T, S, B, X, U>
where
S: Service<Request = Request>,
@ -65,11 +69,11 @@ where
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
Normal(InnerDispatcher<T, S, B, X, U>),
Upgrade(U::Future),
None,
Normal(#[pin] InnerDispatcher<T, S, B, X, U>),
Upgrade(Pin<Box<U::Future>>),
}
#[pin_project(project = InnerDispatcherProj)]
struct InnerDispatcher<T, S, B, X, U>
where
S: Service<Request = Request>,
@ -88,6 +92,7 @@ where
peer_addr: Option<net::SocketAddr>,
error: Option<DispatchError>,
#[pin]
state: State<S, B, X>,
payload: Option<PayloadSender>,
messages: VecDeque<DispatcherMessage>,
@ -95,7 +100,7 @@ where
ka_expire: Instant,
ka_timer: Option<Delay>,
io: T,
io: Option<T>,
read_buf: BytesMut,
write_buf: BytesMut,
codec: Codec,
@ -107,6 +112,7 @@ enum DispatcherMessage {
Error(Response<()>),
}
#[pin_project(project = StateProj)]
enum State<S, B, X>
where
S: Service<Request = Request>,
@ -114,9 +120,9 @@ where
B: MessageBody,
{
None,
ExpectCall(X::Future),
ServiceCall(S::Future),
SendPayload(ResponseBody<B>),
ExpectCall(Pin<Box<X::Future>>),
ServiceCall(Pin<Box<S::Future>>),
SendPayload(#[pin] ResponseBody<B>),
}
impl<S, B, X> State<S, B, X>
@ -141,7 +147,6 @@ where
}
}
}
enum PollResponse {
Upgrade(Request),
DoNothing,
@ -236,7 +241,7 @@ where
state: State::None,
error: None,
messages: VecDeque::new(),
io,
io: Some(io),
codec,
read_buf,
service,
@ -278,29 +283,33 @@ where
}
// if checked is set to true, delay disconnect until all tasks have finished.
fn client_disconnected(&mut self) {
self.flags
fn client_disconnected(self: Pin<&mut Self>) {
let this = self.project();
this.flags
.insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
}
/// Flush stream
///
/// true - got whouldblock
/// false - didnt get whouldblock
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Result<bool, DispatchError> {
/// true - got WouldBlock
/// false - didn't get WouldBlock
fn poll_flush(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
if self.write_buf.is_empty() {
return Ok(false);
}
let len = self.write_buf.len();
let mut written = 0;
let InnerDispatcherProj { io, write_buf, .. } = self.project();
let mut io = Pin::new(io.as_mut().unwrap());
while written < len {
match unsafe { Pin::new_unchecked(&mut self.io) }
.poll_write(cx, &self.write_buf[written..])
{
match io.as_mut().poll_write(cx, &write_buf[written..]) {
Poll::Ready(Ok(0)) => {
return Err(DispatchError::Io(io::Error::new(
io::ErrorKind::WriteZero,
@ -312,112 +321,117 @@ where
}
Poll::Pending => {
if written > 0 {
self.write_buf.advance(written);
write_buf.advance(written);
}
return Ok(true);
}
Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)),
}
}
if written == self.write_buf.len() {
unsafe { self.write_buf.set_len(0) }
if written == write_buf.len() {
unsafe { write_buf.set_len(0) }
} else {
self.write_buf.advance(written);
write_buf.advance(written);
}
Ok(false)
}
fn send_response(
&mut self,
self: Pin<&mut Self>,
message: Response<()>,
body: ResponseBody<B>,
) -> Result<State<S, B, X>, DispatchError> {
self.codec
.encode(Message::Item((message, body.size())), &mut self.write_buf)
let mut this = self.project();
this.codec
.encode(Message::Item((message, body.size())), &mut this.write_buf)
.map_err(|err| {
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
DispatchError::Io(err)
})?;
self.flags.set(Flags::KEEPALIVE, self.codec.keepalive());
this.flags.set(Flags::KEEPALIVE, this.codec.keepalive());
match body.size() {
BodySize::None | BodySize::Empty => Ok(State::None),
_ => Ok(State::SendPayload(body)),
}
}
fn send_continue(&mut self) {
self.write_buf
fn send_continue(self: Pin<&mut Self>) {
self.project()
.write_buf
.extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n");
}
fn poll_response(
&mut self,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<PollResponse, DispatchError> {
loop {
let state = match self.state {
State::None => match self.messages.pop_front() {
let mut this = self.as_mut().project();
let state = match this.state.project() {
StateProj::None => match this.messages.pop_front() {
Some(DispatcherMessage::Item(req)) => {
Some(self.handle_request(req, cx)?)
}
Some(DispatcherMessage::Error(res)) => {
Some(self.send_response(res, ResponseBody::Other(Body::Empty))?)
Some(self.as_mut().handle_request(req, cx)?)
}
Some(DispatcherMessage::Error(res)) => Some(
self.as_mut()
.send_response(res, ResponseBody::Other(Body::Empty))?,
),
Some(DispatcherMessage::Upgrade(req)) => {
return Ok(PollResponse::Upgrade(req));
}
None => None,
},
State::ExpectCall(ref mut fut) => {
match unsafe { Pin::new_unchecked(fut) }.poll(cx) {
Poll::Ready(Ok(req)) => {
self.send_continue();
self.state = State::ServiceCall(self.service.call(req));
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
Poll::Pending => None,
StateProj::ExpectCall(fut) => match fut.as_mut().poll(cx) {
Poll::Ready(Ok(req)) => {
self.as_mut().send_continue();
this = self.as_mut().project();
this.state
.set(State::ServiceCall(Box::pin(this.service.call(req))));
continue;
}
}
State::ServiceCall(ref mut fut) => {
match unsafe { Pin::new_unchecked(fut) }.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.state = self.send_response(res, body)?;
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
Poll::Pending => None,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.as_mut().send_response(res, body.into_body())?)
}
}
State::SendPayload(ref mut stream) => {
Poll::Pending => None,
},
StateProj::ServiceCall(fut) => match fut.as_mut().poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let state = self.as_mut().send_response(res, body)?;
this = self.as_mut().project();
this.state.set(state);
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.as_mut().send_response(res, body.into_body())?)
}
Poll::Pending => None,
},
StateProj::SendPayload(mut stream) => {
loop {
if self.write_buf.len() < HW_BUFFER_SIZE {
match stream.poll_next(cx) {
if this.write_buf.len() < HW_BUFFER_SIZE {
match stream.as_mut().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => {
self.codec.encode(
this.codec.encode(
Message::Chunk(Some(item)),
&mut self.write_buf,
&mut this.write_buf,
)?;
continue;
}
Poll::Ready(None) => {
self.codec.encode(
this.codec.encode(
Message::Chunk(None),
&mut self.write_buf,
&mut this.write_buf,
)?;
self.state = State::None;
this = self.as_mut().project();
this.state.set(State::None);
}
Poll::Ready(Some(Err(_))) => {
return Err(DispatchError::Unknown)
@ -433,9 +447,11 @@ where
}
};
this = self.as_mut().project();
// set new state
if let Some(state) = state {
self.state = state;
this.state.set(state);
if !self.state.is_empty() {
continue;
}
@ -443,7 +459,7 @@ where
// if read-backpressure is enabled and we consumed some data.
// we may read more data and retry
if self.state.is_call() {
if self.poll_request(cx)? {
if self.as_mut().poll_request(cx)? {
continue;
}
} else if !self.messages.is_empty() {
@ -457,16 +473,16 @@ where
}
fn handle_request(
&mut self,
mut self: Pin<&mut Self>,
req: Request,
cx: &mut Context<'_>,
) -> Result<State<S, B, X>, DispatchError> {
// Handle `EXPECT: 100-Continue` header
let req = if req.head().expect() {
let mut task = self.expect.call(req);
match unsafe { Pin::new_unchecked(&mut task) }.poll(cx) {
let mut task = Box::pin(self.as_mut().project().expect.call(req));
match task.as_mut().poll(cx) {
Poll::Ready(Ok(req)) => {
self.send_continue();
self.as_mut().send_continue();
req
}
Poll::Pending => return Ok(State::ExpectCall(task)),
@ -482,8 +498,8 @@ where
};
// Call service
let mut task = self.service.call(req);
match unsafe { Pin::new_unchecked(&mut task) }.poll(cx) {
let mut task = Box::pin(self.as_mut().project().service.call(req));
match task.as_mut().poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.send_response(res, body)
@ -499,7 +515,7 @@ where
/// Process one incoming requests
pub(self) fn poll_request(
&mut self,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
// limit a mount of non processed requests
@ -508,24 +524,25 @@ where
}
let mut updated = false;
let mut this = self.as_mut().project();
loop {
match self.codec.decode(&mut self.read_buf) {
match this.codec.decode(&mut this.read_buf) {
Ok(Some(msg)) => {
updated = true;
self.flags.insert(Flags::STARTED);
this.flags.insert(Flags::STARTED);
match msg {
Message::Item(mut req) => {
let pl = self.codec.message_type();
req.head_mut().peer_addr = self.peer_addr;
let pl = this.codec.message_type();
req.head_mut().peer_addr = *this.peer_addr;
// set on_connect data
if let Some(ref on_connect) = self.on_connect {
if let Some(ref on_connect) = this.on_connect {
on_connect.set(&mut req.extensions_mut());
}
if pl == MessageType::Stream && self.upgrade.is_some() {
self.messages.push_back(DispatcherMessage::Upgrade(req));
if pl == MessageType::Stream && this.upgrade.is_some() {
this.messages.push_back(DispatcherMessage::Upgrade(req));
break;
}
if pl == MessageType::Payload || pl == MessageType::Stream {
@ -533,41 +550,43 @@ where
let (req1, _) =
req.replace_payload(crate::Payload::H1(pl));
req = req1;
self.payload = Some(ps);
*this.payload = Some(ps);
}
// handle request early
if self.state.is_empty() {
self.state = self.handle_request(req, cx)?;
if this.state.is_empty() {
let state = self.as_mut().handle_request(req, cx)?;
this = self.as_mut().project();
this.state.set(state);
} else {
self.messages.push_back(DispatcherMessage::Item(req));
this.messages.push_back(DispatcherMessage::Item(req));
}
}
Message::Chunk(Some(chunk)) => {
if let Some(ref mut payload) = self.payload {
if let Some(ref mut payload) = this.payload {
payload.feed_data(chunk);
} else {
error!(
"Internal server error: unexpected payload chunk"
);
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
this.flags.insert(Flags::READ_DISCONNECT);
this.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
*this.error = Some(DispatchError::InternalError);
break;
}
}
Message::Chunk(None) => {
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.feed_eof();
} else {
error!("Internal server error: unexpected eof");
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
this.flags.insert(Flags::READ_DISCONNECT);
this.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
*this.error = Some(DispatchError::InternalError);
break;
}
}
@ -575,44 +594,49 @@ where
}
Ok(None) => break,
Err(ParseError::Io(e)) => {
self.client_disconnected();
self.error = Some(DispatchError::Io(e));
self.as_mut().client_disconnected();
this = self.as_mut().project();
*this.error = Some(DispatchError::Io(e));
break;
}
Err(e) => {
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::EncodingCorrupted);
}
// Malformed requests should be responded with 400
self.messages.push_back(DispatcherMessage::Error(
this.messages.push_back(DispatcherMessage::Error(
Response::BadRequest().finish().drop_body(),
));
self.flags.insert(Flags::READ_DISCONNECT);
self.error = Some(e.into());
this.flags.insert(Flags::READ_DISCONNECT);
*this.error = Some(e.into());
break;
}
}
}
if updated && self.ka_timer.is_some() {
if let Some(expire) = self.codec.config().keep_alive_expire() {
self.ka_expire = expire;
if updated && this.ka_timer.is_some() {
if let Some(expire) = this.codec.config().keep_alive_expire() {
*this.ka_expire = expire;
}
}
Ok(updated)
}
/// keep-alive timer
fn poll_keepalive(&mut self, cx: &mut Context<'_>) -> Result<(), DispatchError> {
if self.ka_timer.is_none() {
fn poll_keepalive(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<(), DispatchError> {
let mut this = self.as_mut().project();
if this.ka_timer.is_none() {
// shutdown timeout
if self.flags.contains(Flags::SHUTDOWN) {
if let Some(interval) = self.codec.config().client_disconnect_timer() {
self.ka_timer = Some(delay_until(interval));
if this.flags.contains(Flags::SHUTDOWN) {
if let Some(interval) = this.codec.config().client_disconnect_timer() {
*this.ka_timer = Some(delay_until(interval));
} else {
self.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
this.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
return Ok(());
@ -622,55 +646,56 @@ where
}
}
match Pin::new(&mut self.ka_timer.as_mut().unwrap()).poll(cx) {
match Pin::new(&mut this.ka_timer.as_mut().unwrap()).poll(cx) {
Poll::Ready(()) => {
// if we get timeout during shutdown, drop connection
if self.flags.contains(Flags::SHUTDOWN) {
if this.flags.contains(Flags::SHUTDOWN) {
return Err(DispatchError::DisconnectTimeout);
} else if self.ka_timer.as_mut().unwrap().deadline() >= self.ka_expire {
} else if this.ka_timer.as_mut().unwrap().deadline() >= *this.ka_expire {
// check for any outstanding tasks
if self.state.is_empty() && self.write_buf.is_empty() {
if self.flags.contains(Flags::STARTED) {
if this.state.is_empty() && this.write_buf.is_empty() {
if this.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
this.flags.insert(Flags::SHUTDOWN);
// start shutdown timer
if let Some(deadline) =
self.codec.config().client_disconnect_timer()
this.codec.config().client_disconnect_timer()
{
if let Some(mut timer) = self.ka_timer.as_mut() {
if let Some(mut timer) = this.ka_timer.as_mut() {
timer.reset(deadline);
let _ = Pin::new(&mut timer).poll(cx);
}
} else {
// no shutdown timeout, drop socket
self.flags.insert(Flags::WRITE_DISCONNECT);
this.flags.insert(Flags::WRITE_DISCONNECT);
return Ok(());
}
} else {
// timeout on first request (slow request) return 408
if !self.flags.contains(Flags::STARTED) {
if !this.flags.contains(Flags::STARTED) {
trace!("Slow request timeout");
let _ = self.send_response(
let _ = self.as_mut().send_response(
Response::RequestTimeout().finish().drop_body(),
ResponseBody::Other(Body::Empty),
);
this = self.as_mut().project();
} else {
trace!("Keep-alive connection timeout");
}
self.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
self.state = State::None;
this.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
this.state.set(State::None);
}
} else if let Some(deadline) =
self.codec.config().keep_alive_expire()
this.codec.config().keep_alive_expire()
{
if let Some(mut timer) = self.ka_timer.as_mut() {
if let Some(mut timer) = this.ka_timer.as_mut() {
timer.reset(deadline);
let _ = Pin::new(&mut timer).poll(cx);
}
}
} else if let Some(mut timer) = self.ka_timer.as_mut() {
timer.reset(self.ka_expire);
} else if let Some(mut timer) = this.ka_timer.as_mut() {
timer.reset(*this.ka_expire);
let _ = Pin::new(&mut timer).poll(cx);
}
}
@ -681,20 +706,6 @@ where
}
}
impl<T, S, B, X, U> Unpin for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
@ -711,20 +722,25 @@ where
#[inline]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().inner {
DispatcherState::Normal(ref mut inner) => {
inner.poll_keepalive(cx)?;
let this = self.as_mut().project();
match this.inner.project() {
DispatcherStateProj::Normal(mut inner) => {
inner.as_mut().poll_keepalive(cx)?;
if inner.flags.contains(Flags::SHUTDOWN) {
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
Poll::Ready(Ok(()))
} else {
// flush buffer
inner.poll_flush(cx)?;
if !inner.write_buf.is_empty() {
inner.as_mut().poll_flush(cx)?;
if !inner.write_buf.is_empty() || inner.io.is_none() {
Poll::Pending
} else {
match Pin::new(&mut inner.io).poll_shutdown(cx) {
match Pin::new(inner.project().io)
.as_pin_mut()
.unwrap()
.poll_shutdown(cx)
{
Poll::Ready(res) => {
Poll::Ready(res.map_err(DispatchError::from))
}
@ -736,53 +752,58 @@ where
// read socket into a buf
let should_disconnect =
if !inner.flags.contains(Flags::READ_DISCONNECT) {
read_available(cx, &mut inner.io, &mut inner.read_buf)?
let mut inner_p = inner.as_mut().project();
read_available(
cx,
inner_p.io.as_mut().unwrap(),
&mut inner_p.read_buf,
)?
} else {
None
};
inner.poll_request(cx)?;
inner.as_mut().poll_request(cx)?;
if let Some(true) = should_disconnect {
inner.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner.payload.take() {
let inner_p = inner.as_mut().project();
inner_p.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner_p.payload.take() {
payload.feed_eof();
}
};
loop {
let inner_p = inner.as_mut().project();
let remaining =
inner.write_buf.capacity() - inner.write_buf.len();
inner_p.write_buf.capacity() - inner_p.write_buf.len();
if remaining < LW_BUFFER_SIZE {
inner.write_buf.reserve(HW_BUFFER_SIZE - remaining);
inner_p.write_buf.reserve(HW_BUFFER_SIZE - remaining);
}
let result = inner.poll_response(cx)?;
let result = inner.as_mut().poll_response(cx)?;
let drain = result == PollResponse::DrainWriteBuf;
// switch to upgrade handler
if let PollResponse::Upgrade(req) = result {
if let DispatcherState::Normal(inner) =
std::mem::replace(&mut self.inner, DispatcherState::None)
{
let mut parts = FramedParts::with_read_buf(
inner.io,
inner.codec,
inner.read_buf,
);
parts.write_buf = inner.write_buf;
let framed = Framed::from_parts(parts);
self.inner = DispatcherState::Upgrade(
inner.upgrade.unwrap().call((req, framed)),
);
return self.poll(cx);
} else {
panic!()
}
let inner_p = inner.as_mut().project();
let mut parts = FramedParts::with_read_buf(
inner_p.io.take().unwrap(),
std::mem::take(inner_p.codec),
std::mem::take(inner_p.read_buf),
);
parts.write_buf = std::mem::take(inner_p.write_buf);
let framed = Framed::from_parts(parts);
let upgrade =
inner_p.upgrade.take().unwrap().call((req, framed));
self.as_mut()
.project()
.inner
.set(DispatcherState::Upgrade(Box::pin(upgrade)));
return self.poll(cx);
}
// we didnt get WouldBlock from write operation,
// we didn't get WouldBlock from write operation,
// so data get written to kernel completely (OSX)
// and we have to write again otherwise response can get stuck
if inner.poll_flush(cx)? || !drain {
if inner.as_mut().poll_flush(cx)? || !drain {
break;
}
}
@ -794,25 +815,26 @@ where
let is_empty = inner.state.is_empty();
let inner_p = inner.as_mut().project();
// read half is closed and we do not processing any responses
if inner.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner.flags.insert(Flags::SHUTDOWN);
if inner_p.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner_p.flags.insert(Flags::SHUTDOWN);
}
// keep-alive and stream errors
if is_empty && inner.write_buf.is_empty() {
if let Some(err) = inner.error.take() {
if is_empty && inner_p.write_buf.is_empty() {
if let Some(err) = inner_p.error.take() {
Poll::Ready(Err(err))
}
// disconnect if keep-alive is not enabled
else if inner.flags.contains(Flags::STARTED)
&& !inner.flags.intersects(Flags::KEEPALIVE)
else if inner_p.flags.contains(Flags::STARTED)
&& !inner_p.flags.intersects(Flags::KEEPALIVE)
{
inner.flags.insert(Flags::SHUTDOWN);
inner_p.flags.insert(Flags::SHUTDOWN);
self.poll(cx)
}
// disconnect if shutdown
else if inner.flags.contains(Flags::SHUTDOWN) {
else if inner_p.flags.contains(Flags::SHUTDOWN) {
self.poll(cx)
} else {
Poll::Pending
@ -822,13 +844,10 @@ where
}
}
}
DispatcherState::Upgrade(ref mut fut) => {
unsafe { Pin::new_unchecked(fut) }.poll(cx).map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
})
}
DispatcherState::None => panic!(),
DispatcherStateProj::Upgrade(fut) => fut.as_mut().poll(cx).map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
}),
}
}
}
@ -918,9 +937,12 @@ mod tests {
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherState::Normal(ref inner) = h1.inner {
if let DispatcherState::Normal(ref mut inner) = h1.inner {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(&inner.io.write_buf[..26], b"HTTP/1.1 400 Bad Request\r\n");
assert_eq!(
&inner.io.take().unwrap().write_buf[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;

View File

@ -4,7 +4,7 @@ use std::ptr::copy_nonoverlapping;
use std::slice::from_raw_parts_mut;
use std::{cmp, io};
use bytes::{buf::BufMutExt, BufMut, BytesMut};
use bytes::{BufMut, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
@ -95,15 +95,6 @@ pub(crate) trait MessageType: Sized {
}
}
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::Sized64(len) => {
if camel_case {
dst.put_slice(b"\r\nContent-Length: ");
} else {
dst.put_slice(b"\r\ncontent-length: ");
}
#[allow(clippy::write_with_newline)]
write!(dst.writer(), "{}\r\n", len)?;
}
BodySize::None => dst.put_slice(b"\r\n"),
}
@ -338,8 +329,7 @@ impl<T: MessageType> MessageEncoder<T> {
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len as u64),
BodySize::Sized64(len) => TransferEncoding::length(len),
BodySize::Sized(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
TransferEncoding::chunked()
@ -582,19 +572,6 @@ mod tests {
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Sized64(100),
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 100\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let mut head = RequestHead::default();
head.set_camel_case_headers(false);
head.headers.insert(DATE, HeaderValue::from_static("date"));

View File

@ -13,6 +13,7 @@ use crate::response::Response;
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<ResponseBody<B>>,
framed: Option<Framed<T, Codec>>,
}
@ -35,24 +36,27 @@ where
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
B: MessageBody + Unpin,
{
type Output = Result<Framed<T, Codec>, Error>;
// TODO: rethink if we need loops in polls
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
let mut this = self.project();
let mut body_done = this.body.is_none();
loop {
let mut body_ready = this.body.is_some();
let mut body_ready = !body_done;
let framed = this.framed.as_mut().unwrap();
// send body
if this.res.is_none() && this.body.is_some() {
while body_ready && this.body.is_some() && !framed.is_write_buf_full() {
match this.body.as_mut().unwrap().poll_next(cx)? {
if this.res.is_none() && body_ready {
while body_ready && !body_done && !framed.is_write_buf_full() {
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
Poll::Ready(item) => {
// body is done
if item.is_none() {
// body is done when item is None
body_done = item.is_none();
if body_done {
let _ = this.body.take();
}
framed.write(Message::Chunk(item))?;
@ -82,7 +86,7 @@ where
continue;
}
if this.body.is_some() {
if !body_done {
if body_ready {
continue;
} else {

View File

@ -165,10 +165,10 @@ struct ServiceResponse<F, I, E, B> {
_t: PhantomData<(I, E)>,
}
#[pin_project::pin_project]
#[pin_project::pin_project(project = ServiceResponseStateProj)]
enum ServiceResponseState<F, B> {
ServiceCall(#[pin] F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, ResponseBody<B>),
SendPayload(SendStream<Bytes>, #[pin] ResponseBody<B>),
}
impl<F, I, E, B> ServiceResponse<F, I, E, B>
@ -210,10 +210,6 @@ where
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
@ -249,70 +245,65 @@ where
{
type Output = ();
#[pin_project::project]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
#[project]
match this.state.project() {
ServiceResponseState::ServiceCall(call, send) => {
match call.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
ServiceResponseStateProj::ServiceCall(call, send) => match call.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Ok(stream) => stream,
};
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
if size.is_eof() {
Poll::Ready(())
} else {
this.state
.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
}
}
}
ServiceResponseState::SendPayload(ref mut stream, ref mut body) => loop {
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
}
},
ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => loop {
loop {
if let Some(ref mut buffer) = this.buffer {
match stream.poll_capacity(cx) {
@ -338,7 +329,7 @@ where
}
}
} else {
match body.poll_next(cx) {
match body.as_mut().poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true) {

View File

@ -83,13 +83,11 @@ where
Error = DispatchError,
InitError = S::InitError,
> {
pipeline_factory(fn_factory(|| {
async {
Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok::<_, DispatchError>((io, peer_addr))
}))
}
pipeline_factory(fn_factory(|| async {
Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok::<_, DispatchError>((io, peer_addr))
}))
}))
.and_then(self)
}

View File

@ -63,7 +63,7 @@ header! {
(AcceptCharset, ACCEPT_CHARSET) => (QualityItem<Charset>)+
test_accept_charset {
/// Test case from RFC
// Test case from RFC
test_header!(test1, vec![b"iso-8859-5, unicode-1-1;q=0.8"]);
}
}

View File

@ -90,40 +90,40 @@ pub enum DispositionParam {
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should
/// ignore unrecognizable parameters.
Unknown(String, String),
/// An unrecognized extended paramater as defined in
/// An unrecognized extended parameter as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single
/// trailling asterisk is not included. Recipients should ignore unrecognizable parameters.
/// trailing asterisk is not included. Recipients should ignore unrecognizable parameters.
UnknownExt(String, ExtendedValue),
}
impl DispositionParam {
/// Returns `true` if the paramater is [`Name`](DispositionParam::Name).
/// Returns `true` if the parameter is [`Name`](DispositionParam::Name).
#[inline]
pub fn is_name(&self) -> bool {
self.as_name().is_some()
}
/// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename).
/// Returns `true` if the parameter is [`Filename`](DispositionParam::Filename).
#[inline]
pub fn is_filename(&self) -> bool {
self.as_filename().is_some()
}
/// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt).
/// Returns `true` if the parameter is [`FilenameExt`](DispositionParam::FilenameExt).
#[inline]
pub fn is_filename_ext(&self) -> bool {
self.as_filename_ext().is_some()
}
/// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name`
/// Returns `true` if the parameter is [`Unknown`](DispositionParam::Unknown) and the `name`
#[inline]
/// matches.
pub fn is_unknown<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown(name).is_some()
}
/// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// Returns `true` if the parameter is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// `name` matches.
#[inline]
pub fn is_unknown_ext<T: AsRef<str>>(&self, name: T) -> bool {
@ -373,7 +373,7 @@ impl ContentDisposition {
let param = if param_name.eq_ignore_ascii_case("name") {
DispositionParam::Name(value)
} else if param_name.eq_ignore_ascii_case("filename") {
// See also comments in test_from_raw_uncessary_percent_decode.
// See also comments in test_from_raw_unnecessary_percent_decode.
DispositionParam::Filename(value)
} else {
DispositionParam::Unknown(param_name.to_owned(), value)
@ -423,7 +423,7 @@ impl ContentDisposition {
/// Return the value of *name* if exists.
pub fn get_name(&self) -> Option<&str> {
self.parameters.iter().filter_map(|p| p.as_name()).nth(0)
self.parameters.iter().filter_map(|p| p.as_name()).next()
}
/// Return the value of *filename* if exists.
@ -431,7 +431,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_filename())
.nth(0)
.next()
}
/// Return the value of *filename\** if exists.
@ -439,7 +439,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_filename_ext())
.nth(0)
.next()
}
/// Return the value of the parameter which the `name` matches.
@ -448,7 +448,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_unknown(name))
.nth(0)
.next()
}
/// Return the value of the extended parameter which the `name` matches.
@ -457,7 +457,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_unknown_ext(name))
.nth(0)
.next()
}
}
@ -529,7 +529,7 @@ impl fmt::Display for DispositionParam {
// ; to use within parameter values
//
//
// See also comments in test_from_raw_uncessary_percent_decode.
// See also comments in test_from_raw_unnecessary_percent_decode.
lazy_static! {
static ref RE: Regex = Regex::new("[\x00-\x08\x10-\x1F\x7F\"\\\\]").unwrap();
}
@ -676,7 +676,7 @@ mod tests {
fn test_from_raw_unordered() {
let a = HeaderValue::from_static(
"form-data; dummy=3; filename=\"sample.png\" ; name=upload;",
// Actually, a trailling semolocon is not compliant. But it is fine to accept.
// Actually, a trailing semicolon is not compliant. But it is fine to accept.
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
@ -833,7 +833,7 @@ mod tests {
}
#[test]
fn test_from_raw_uncessary_percent_decode() {
fn test_from_raw_unnecessary_percent_decode() {
// In fact, RFC7578 (multipart/form-data) Section 2 and 4.2 suggests that filename with
// non-ASCII characters MAY be percent-encoded.
// On the contrary, RFC6266 or other RFCs related to Content-Disposition response header

View File

@ -7,7 +7,7 @@ use header::{Header, Raw};
/// `Range` header, defined in [RFC7233](https://tools.ietf.org/html/rfc7233#section-3.1)
///
/// The "Range" header field on a GET request modifies the method
/// semantics to request transfer of only one or more subranges of the
/// semantics to request transfer of only one or more sub-ranges of the
/// selected representation data, rather than the entire selected
/// representation data.
///
@ -183,13 +183,13 @@ impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Range::Bytes(ref ranges) => {
try!(write!(f, "bytes="));
write!(f, "bytes=")?;
for (i, range) in ranges.iter().enumerate() {
if i != 0 {
try!(f.write_str(","));
f.write_str(",")?;
}
try!(Display::fmt(range, f));
Display::fmt(range, f)?;
}
Ok(())
}
@ -214,9 +214,9 @@ impl FromStr for Range {
}
Ok(Range::Bytes(ranges))
}
(Some(unit), Some(range_str)) if unit != "" && range_str != "" => Ok(
Range::Unregistered(unit.to_owned(), range_str.to_owned()),
),
(Some(unit), Some(range_str)) if unit != "" && range_str != "" => {
Ok(Range::Unregistered(unit.to_owned(), range_str.to_owned()))
}
_ => Err(::Error::Header),
}
}
@ -229,7 +229,8 @@ impl FromStr for ByteRangeSpec {
let mut parts = s.splitn(2, '-');
match (parts.next(), parts.next()) {
(Some(""), Some(end)) => end.parse()
(Some(""), Some(end)) => end
.parse()
.or(Err(::Error::Header))
.map(ByteRangeSpec::Last),
(Some(start), Some("")) => start
@ -272,163 +273,138 @@ impl Header for Range {
}
}
#[test]
fn test_parse_bytes_range_valid() {
let r: Range = Header::parse_header(&"bytes=1-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100,-".into()).unwrap();
let r3 = Range::bytes(1, 100);
assert_eq!(r, r2);
assert_eq!(r2, r3);
#[cfg(test)]
mod tests {
use super::*;
let r: Range = Header::parse_header(&"bytes=1-100,200-".into()).unwrap();
let r2: Range =
Header::parse_header(&"bytes= 1-100 , 101-xxx, 200- ".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::AllFrom(200),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
#[test]
fn test_parse_bytes_range_valid() {
let r: Range = Header::parse_header(&"bytes=1-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100,-".into()).unwrap();
let r3 = Range::bytes(1, 100);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"bytes=1-100,-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100, ,,-100".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::Last(100),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"bytes=1-100,200-".into()).unwrap();
let r2: Range =
Header::parse_header(&"bytes= 1-100 , 101-xxx, 200- ".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::AllFrom(200),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_unregistered_range_valid() {
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=abcd".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=xxx-yyy".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_invalid() {
let r: ::Result<Range> = Header::parse_header(&"bytes=1-a,-".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-2-3".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"abc".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-100=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"custom=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"=1-100".into());
assert_eq!(r.ok(), None);
}
#[test]
fn test_fmt() {
use header::Headers;
let mut headers = Headers::new();
headers.set(Range::Bytes(vec![
ByteRangeSpec::FromTo(0, 1000),
ByteRangeSpec::AllFrom(2000),
]));
assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n");
headers.clear();
headers.set(Range::Bytes(vec![]));
assert_eq!(&headers.to_string(), "Range: bytes=\r\n");
headers.clear();
headers.set(Range::Unregistered(
"custom".to_owned(),
"1-xxx".to_owned(),
));
assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n");
}
#[test]
fn test_byte_range_spec_to_satisfiable_range() {
assert_eq!(
Some((0, 0)),
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 2).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 5).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(3, 3).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(2, 1).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(0)
);
assert_eq!(
Some((0, 2)),
ByteRangeSpec::AllFrom(0).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::AllFrom(2).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(3).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(5).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(0).to_satisfiable_range(0)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::Last(2).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::Last(1).to_satisfiable_range(3)
);
assert_eq!(
Some((0, 2)),
ByteRangeSpec::Last(5).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::Last(0).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(2).to_satisfiable_range(0));
let r: Range = Header::parse_header(&"bytes=1-100,-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100, ,,-100".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::Last(100),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_unregistered_range_valid() {
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=abcd".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=xxx-yyy".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_invalid() {
let r: ::Result<Range> = Header::parse_header(&"bytes=1-a,-".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-2-3".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"abc".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-100=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"custom=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"=1-100".into());
assert_eq!(r.ok(), None);
}
#[test]
fn test_fmt() {
use header::Headers;
let mut headers = Headers::new();
headers.set(Range::Bytes(vec![
ByteRangeSpec::FromTo(0, 1000),
ByteRangeSpec::AllFrom(2000),
]));
assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n");
headers.clear();
headers.set(Range::Bytes(vec![]));
assert_eq!(&headers.to_string(), "Range: bytes=\r\n");
headers.clear();
headers.set(Range::Unregistered("custom".to_owned(), "1-xxx".to_owned()));
assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n");
}
#[test]
fn test_byte_range_spec_to_satisfiable_range() {
assert_eq!(
Some((0, 0)),
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 2).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 5).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::FromTo(3, 3).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::FromTo(2, 1).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(0));
assert_eq!(
Some((0, 2)),
ByteRangeSpec::AllFrom(0).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::AllFrom(2).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::AllFrom(3).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::AllFrom(5).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::AllFrom(0).to_satisfiable_range(0));
assert_eq!(Some((1, 2)), ByteRangeSpec::Last(2).to_satisfiable_range(3));
assert_eq!(Some((2, 2)), ByteRangeSpec::Last(1).to_satisfiable_range(3));
assert_eq!(Some((0, 2)), ByteRangeSpec::Last(5).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(0).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(2).to_satisfiable_range(0));
}
}

View File

@ -7,7 +7,7 @@ use http::header::{HeaderName, HeaderValue};
/// A set of HTTP headers
///
/// `HeaderMap` is an multimap of [`HeaderName`] to values.
/// `HeaderMap` is an multi-map of [`HeaderName`] to values.
///
/// [`HeaderName`]: struct.HeaderName.html
#[derive(Debug, Clone)]

View File

@ -137,17 +137,22 @@ impl FromStr for Charset {
}
}
#[test]
fn test_parse() {
assert_eq!(Us_Ascii, "us-ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-Ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-ASCII".parse().unwrap());
assert_eq!(Shift_Jis, "Shift-JIS".parse().unwrap());
assert_eq!(Ext("ABCD".to_owned()), "abcd".parse().unwrap());
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_display() {
assert_eq!("US-ASCII", format!("{}", Us_Ascii));
assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned())));
#[test]
fn test_parse() {
assert_eq!(Us_Ascii, "us-ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-Ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-ASCII".parse().unwrap());
assert_eq!(Shift_Jis, "Shift-JIS".parse().unwrap());
assert_eq!(Ext("ABCD".to_owned()), "abcd".parse().unwrap());
}
#[test]
fn test_display() {
assert_eq!("US-ASCII", format!("{}", Us_Ascii));
assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned())));
}
}

View File

@ -5,7 +5,7 @@ use std::time::{SystemTime, UNIX_EPOCH};
use bytes::{buf::BufMutExt, BytesMut};
use http::header::{HeaderValue, InvalidHeaderValue};
use time::{PrimitiveDateTime, OffsetDateTime, offset};
use time::{offset, OffsetDateTime, PrimitiveDateTime};
use crate::error::ParseError;
use crate::header::IntoHeaderValue;
@ -20,8 +20,8 @@ impl FromStr for HttpDate {
fn from_str(s: &str) -> Result<HttpDate, ParseError> {
match time_parser::parse_http_date(s) {
Some(t) => Ok(HttpDate(t.using_offset(offset!(UTC)))),
None => Err(ParseError::Header)
Some(t) => Ok(HttpDate(t.assume_utc())),
None => Err(ParseError::Header),
}
}
}
@ -40,7 +40,7 @@ impl From<OffsetDateTime> for HttpDate {
impl From<SystemTime> for HttpDate {
fn from(sys: SystemTime) -> HttpDate {
HttpDate(PrimitiveDateTime::from(sys).using_offset(offset!(UTC)))
HttpDate(PrimitiveDateTime::from(sys).assume_utc())
}
}
@ -49,7 +49,14 @@ impl IntoHeaderValue for HttpDate {
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let mut wrt = BytesMut::with_capacity(29).writer();
write!(wrt, "{}", self.0.to_offset(offset!(UTC)).format("%a, %d %b %Y %H:%M:%S GMT")).unwrap();
write!(
wrt,
"{}",
self.0
.to_offset(offset!(UTC))
.format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
HeaderValue::from_maybe_shared(wrt.get_mut().split().freeze())
}
}
@ -66,14 +73,13 @@ impl From<HttpDate> for SystemTime {
#[cfg(test)]
mod tests {
use super::HttpDate;
use time::{PrimitiveDateTime, date, time, offset};
use time::{date, time, PrimitiveDateTime};
#[test]
fn test_date() {
let nov_07 = HttpDate(PrimitiveDateTime::new(
date!(1994-11-07),
time!(8:48:37)
).using_offset(offset!(UTC)));
let nov_07 = HttpDate(
PrimitiveDateTime::new(date!(1994 - 11 - 07), time!(8:48:37)).assume_utc(),
);
assert_eq!(
"Sun, 07 Nov 1994 08:48:37 GMT".parse::<HttpDate>().unwrap(),

View File

@ -1,173 +1,46 @@
use std::{io, mem, ptr, slice};
use std::io;
use bytes::{BufMut, BytesMut};
use http::Version;
use crate::extensions::Extensions;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
const DIGITS_START: u8 = b'0';
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = [
b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1', b' ', b' ', b' ', b' ', b' ',
];
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
// trailing space before reason
bytes.put_u8(b' ');
}
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r', b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
}
pub fn write_content_length(n: u64, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
let mut buf = itoa::Buffer::new();
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
pub(crate) struct Writer<'a>(pub &'a mut BytesMut);
@ -196,8 +69,28 @@ impl<T: Clone + 'static> DataFactory for Data<T> {
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use super::*;
#[test]
fn test_status_line() {
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_11, 200, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/1.1 200 ");
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_09, 404, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/0.9 404 ");
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_09, 515, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/0.9 515 ");
}
#[test]
fn test_write_content_length() {
let mut bytes = BytesMut::new();
@ -231,5 +124,48 @@ mod tests {
bytes.reserve(50);
write_content_length(5909, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
bytes.reserve(50);
write_content_length(9999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9999\r\n"[..]);
bytes.reserve(50);
write_content_length(10001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10001\r\n"[..]);
bytes.reserve(50);
write_content_length(59094, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 59094\r\n"[..]);
bytes.reserve(50);
write_content_length(99999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99999\r\n"[..]);
bytes.reserve(50);
write_content_length(590947, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 590947\r\n"[..]
);
bytes.reserve(50);
write_content_length(999999, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 999999\r\n"[..]
);
bytes.reserve(50);
write_content_length(5909471, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 5909471\r\n"[..]
);
bytes.reserve(50);
write_content_length(59094718, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 59094718\r\n"[..]
);
bytes.reserve(50);
write_content_length(4294973728, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 4294973728\r\n"[..]
);
}
}

View File

@ -1,5 +1,5 @@
//! Basic http primitives for actix-net framework.
#![deny(rust_2018_idioms, warnings)]
#![warn(rust_2018_idioms, warnings)]
#![allow(
clippy::type_complexity,
clippy::too_many_arguments,
@ -10,6 +10,9 @@
#[macro_use]
extern crate log;
#[macro_use]
mod macros;
pub mod body;
mod builder;
pub mod client;

95
actix-http/src/macros.rs Normal file
View File

@ -0,0 +1,95 @@
#[macro_export]
macro_rules! downcast_get_type_id {
() => {
/// A helper method to get the type ID of the type
/// this trait is implemented on.
/// This method is unsafe to *implement*, since `downcast_ref` relies
/// on the returned `TypeId` to perform a cast.
///
/// Unfortunately, Rust has no notion of a trait method that is
/// unsafe to implement (marking it as `unsafe` makes it unsafe
/// to *call*). As a workaround, we require this method
/// to return a private type along with the `TypeId`. This
/// private type (`PrivateHelper`) has a private constructor,
/// making it impossible for safe code to construct outside of
/// this module. This ensures that safe code cannot violate
/// type-safety by implementing this method.
#[doc(hidden)]
fn __private_get_type_id__(&self) -> (std::any::TypeId, PrivateHelper)
where
Self: 'static,
{
(std::any::TypeId::of::<Self>(), PrivateHelper(()))
}
};
}
//Generate implementation for dyn $name
#[macro_export]
macro_rules! downcast {
($name:ident) => {
/// A struct with a private constructor, for use with
/// `__private_get_type_id__`. Its single field is private,
/// ensuring that it can only be constructed from this module
#[doc(hidden)]
pub struct PrivateHelper(());
impl dyn $name + 'static {
/// Downcasts generic body to a specific type.
pub fn downcast_ref<T: $name + 'static>(&self) -> Option<&T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// Safety: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe { Some(&*(self as *const dyn $name as *const T)) }
} else {
None
}
}
/// Downcasts a generic body to a mutable specific type.
pub fn downcast_mut<T: $name + 'static>(&mut self) -> Option<&mut T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// Safety: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe {
Some(&mut *(self as *const dyn $name as *const T as *mut T))
}
} else {
None
}
}
}
};
}
#[cfg(test)]
mod tests {
trait MB {
downcast_get_type_id!();
}
downcast!(MB);
impl MB for String {}
impl MB for () {}
#[actix_rt::test]
async fn test_any_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MB = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push_str("!");
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -99,13 +99,13 @@ impl RequestHead {
}
/// Is to uppercase headers with Camel-Case.
/// Befault is `false`
/// Default is `false`
#[inline]
pub fn camel_case_headers(&self) -> bool {
self.flags.contains(Flags::CAMEL_CASE)
}
/// Set `true` to send headers which are uppercased with Camel-Case.
/// Set `true` to send headers which are formatted as Camel-Case.
#[inline]
pub fn set_camel_case_headers(&mut self, val: bool) {
if val {

View File

@ -9,7 +9,6 @@ use std::{fmt, str};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use serde::Serialize;
use serde_json;
use crate::body::{Body, BodyStream, MessageBody, ResponseBody};
use crate::cookie::{Cookie, CookieJar};
@ -473,7 +472,9 @@ impl ResponseBuilder {
/// Disable chunked transfer encoding for HTTP/1.1 streaming responses.
#[inline]
pub fn no_chunking(&mut self) -> &mut Self {
pub fn no_chunking(&mut self, len: u64) -> &mut Self {
self.header(header::CONTENT_LENGTH, len);
if let Some(parts) = parts(&mut self.head, &self.err) {
parts.no_chunking(true);
}
@ -498,12 +499,6 @@ impl ResponseBuilder {
self
}
/// Set content length
#[inline]
pub fn content_length(&mut self, len: u64) -> &mut Self {
self.header(header::CONTENT_LENGTH, len)
}
/// Set a cookie
///
/// ```rust
@ -637,7 +632,7 @@ impl ResponseBuilder {
/// `ResponseBuilder` can not be used after this call.
pub fn streaming<S, E>(&mut self, stream: S) -> Response
where
S: Stream<Item = Result<Bytes, E>> + 'static,
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
self.body(Body::from_message(BodyStream::new(stream)))

View File

@ -10,7 +10,7 @@ use bytes::Bytes;
use futures_core::{ready, Future};
use futures_util::future::ok;
use h2::server::{self, Handshake};
use pin_project::{pin_project, project};
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::builder::HttpServiceBuilder;
@ -574,7 +574,7 @@ where
}
}
#[pin_project]
#[pin_project(project = StateProj)]
enum State<T, S, B, X, U>
where
S: Service<Request = Request>,
@ -650,16 +650,14 @@ where
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
#[project]
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), DispatchError>> {
#[project]
match self.as_mut().project() {
State::H1(disp) => disp.poll(cx),
State::H2(disp) => disp.poll(cx),
State::H2Handshake(ref mut data) => {
StateProj::H1(disp) => disp.poll(cx),
StateProj::H2(disp) => disp.poll(cx),
StateProj::H2Handshake(ref mut data) => {
let conn = if let Some(ref mut item) = data {
match Pin::new(&mut item.0).poll(cx) {
Poll::Ready(Ok(conn)) => conn,

View File

@ -1,4 +1,4 @@
use time::{PrimitiveDateTime, Date};
use time::{Date, OffsetDateTime, PrimitiveDateTime};
/// Attempt to parse a `time` string as one of either RFC 1123, RFC 850, or asctime.
pub fn parse_http_date(time: &str) -> Option<PrimitiveDateTime> {
@ -19,7 +19,7 @@ fn try_parse_rfc_850(time: &str) -> Option<PrimitiveDateTime> {
// If the `time` string contains a two-digit year, then as per RFC 2616 § 19.3,
// we consider the year as part of this century if it's within the next 50 years,
// otherwise we consider as part of the previous century.
let now = PrimitiveDateTime::now();
let now = OffsetDateTime::now_utc();
let century_start_year = (now.year() / 100) * 100;
let mut expanded_year = century_start_year + dt.year();
@ -29,10 +29,10 @@ fn try_parse_rfc_850(time: &str) -> Option<PrimitiveDateTime> {
match Date::try_from_ymd(expanded_year, dt.month(), dt.day()) {
Ok(date) => Some(PrimitiveDateTime::new(date, dt.time())),
Err(_) => None
Err(_) => None,
}
}
Err(_) => None
Err(_) => None,
}
}

View File

@ -137,7 +137,7 @@ impl Encoder for Codec {
Parser::write_message(
dst,
&data[..],
OpCode::Binary,
OpCode::Text,
false,
!self.flags.contains(Flags::SERVER),
)
@ -151,7 +151,7 @@ impl Encoder for Codec {
Parser::write_message(
dst,
&data[..],
OpCode::Text,
OpCode::Binary,
false,
!self.flags.contains(Flags::SERVER),
)

View File

@ -2,7 +2,6 @@ use std::convert::TryFrom;
use bytes::{Buf, BufMut, BytesMut};
use log::debug;
use rand;
use crate::ws::mask::apply_mask;
use crate::ws::proto::{CloseCode, CloseReason, OpCode};

View File

@ -58,6 +58,8 @@ pub enum ProtocolError {
Io(io::Error),
}
impl std::error::Error for ProtocolError {}
impl ResponseError for ProtocolError {}
/// Websocket handshake errors
@ -108,7 +110,7 @@ impl ResponseError for HandshakeError {
}
}
/// Verify `WebSocket` handshake request and create handshake reponse.
/// Verify `WebSocket` handshake request and create handshake response.
// /// `protocols` is a sequence of known protocols. On successful handshake,
// /// the returned response headers contain the first protocol in this list
// /// which the server also knows.
@ -168,7 +170,7 @@ pub fn verify_handshake(req: &RequestHead) -> Result<(), HandshakeError> {
Ok(())
}
/// Create websocket's handshake response
/// Create websocket handshake response
///
/// This function returns handshake `Response`, ready to send to peer.
pub fn handshake_response(req: &RequestHead) -> ResponseBuilder {

View File

@ -1,5 +1,3 @@
use base64;
use sha1;
use std::convert::{From, Into};
use std::fmt;
@ -205,7 +203,7 @@ impl<T: Into<String>> From<(CloseCode, T)> for CloseReason {
static WS_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
// TODO: hash is always same size, we dont need String
// TODO: hash is always same size, we don't need String
pub fn hash_key(key: &[u8]) -> String {
use sha1::Digest;
let mut hasher = sha1::Sha1::new();

View File

@ -1,6 +1,6 @@
use actix_service::ServiceFactory;
use bytes::Bytes;
use futures::future::{self, ok};
use futures_util::future::{self, ok};
use actix_http::{http, HttpService, Request, Response};
use actix_http_test::test_server;
@ -33,7 +33,8 @@ async fn test_h1_v2() {
HttpService::build()
.finish(|_| future::ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -61,7 +62,8 @@ async fn test_connection_close() {
.finish(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
.map(|_| ())
});
})
.await;
let response = srv.get("/").force_close().send().await.unwrap();
assert!(response.status().is_success());
@ -80,7 +82,8 @@ async fn test_with_query_parameter() {
})
.tcp()
.map(|_| ())
});
})
.await;
let request = srv.request(http::Method::GET, srv.url("/?qp=5"));
let response = request.send().await.unwrap();

View File

@ -5,8 +5,8 @@ use actix_http_test::test_server;
use actix_service::{fn_service, ServiceFactory};
use bytes::{Bytes, BytesMut};
use futures::future::{err, ok, ready};
use futures::stream::{once, Stream, StreamExt};
use futures_util::future::{err, ok, ready};
use futures_util::stream::{once, Stream, StreamExt};
use open_ssl::ssl::{AlpnError, SslAcceptor, SslFiletype, SslMethod};
use actix_http::error::{ErrorBadRequest, PayloadError};
@ -67,7 +67,8 @@ async fn test_h2() -> io::Result<()> {
.h2(|_| ok::<_, Error>(Response::Ok().finish()))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -85,7 +86,8 @@ async fn test_h2_1() -> io::Result<()> {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -97,15 +99,14 @@ async fn test_h2_body() -> io::Result<()> {
let data = "HELLOWORLD".to_owned().repeat(64 * 1024);
let mut srv = test_server(move || {
HttpService::build()
.h2(|mut req: Request<_>| {
async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
}
.h2(|mut req: Request<_>| async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send_body(data.clone()).await.unwrap();
assert!(response.status().is_success());
@ -133,7 +134,8 @@ async fn test_h2_content_length() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
@ -194,7 +196,7 @@ async fn test_h2_headers() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
}).await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -233,7 +235,8 @@ async fn test_h2_body2() {
.h2(|_| ok::<_, ()>(Response::Ok().body(STR)))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -250,7 +253,8 @@ async fn test_h2_head_empty() {
.finish(|_| ok::<_, ()>(Response::Ok().body(STR)))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -271,11 +275,12 @@ async fn test_h2_head_binary() {
let mut srv = test_server(move || {
HttpService::build()
.h2(|_| {
ok::<_, ()>(Response::Ok().content_length(STR.len() as u64).body(STR))
ok::<_, ()>(Response::Ok().body(STR))
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -297,7 +302,8 @@ async fn test_h2_head_binary2() {
.h2(|_| ok::<_, ()>(Response::Ok().body(STR)))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -320,7 +326,8 @@ async fn test_h2_body_length() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -344,7 +351,8 @@ async fn test_h2_body_chunked_explicit() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -371,7 +379,8 @@ async fn test_h2_response_http_error_handling() {
}))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR);
@ -388,7 +397,8 @@ async fn test_h2_service_error() {
.h2(|_| err::<Response, Error>(ErrorBadRequest("error")))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
@ -409,7 +419,8 @@ async fn test_h2_on_connect() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());

View File

@ -7,8 +7,8 @@ use actix_http_test::test_server;
use actix_service::{fn_factory_with_config, fn_service};
use bytes::{Bytes, BytesMut};
use futures::future::{self, err, ok};
use futures::stream::{once, Stream, StreamExt};
use futures_util::future::{self, err, ok};
use futures_util::stream::{once, Stream, StreamExt};
use rust_tls::{
internal::pemfile::{certs, pkcs8_private_keys},
NoClientAuth, ServerConfig as RustlsServerConfig,
@ -45,7 +45,8 @@ async fn test_h1() -> io::Result<()> {
HttpService::build()
.h1(|_| future::ok::<_, Error>(Response::Ok().finish()))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -58,7 +59,8 @@ async fn test_h2() -> io::Result<()> {
HttpService::build()
.h2(|_| future::ok::<_, Error>(Response::Ok().finish()))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -75,7 +77,8 @@ async fn test_h1_1() -> io::Result<()> {
future::ok::<_, Error>(Response::Ok().finish())
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -92,7 +95,8 @@ async fn test_h2_1() -> io::Result<()> {
future::ok::<_, Error>(Response::Ok().finish())
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -104,14 +108,13 @@ async fn test_h2_body1() -> io::Result<()> {
let data = "HELLOWORLD".to_owned().repeat(64 * 1024);
let mut srv = test_server(move || {
HttpService::build()
.h2(|mut req: Request<_>| {
async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
}
.h2(|mut req: Request<_>| async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send_body(data.clone()).await.unwrap();
assert!(response.status().is_success());
@ -138,7 +141,8 @@ async fn test_h2_content_length() {
future::ok::<_, ()>(Response::new(statuses[indx]))
})
.rustls(ssl_acceptor())
});
})
.await;
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
@ -197,7 +201,7 @@ async fn test_h2_headers() {
future::ok::<_, ()>(config.body(data.clone()))
})
.rustls(ssl_acceptor())
});
}).await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -235,7 +239,8 @@ async fn test_h2_body2() {
HttpService::build()
.h2(|_| future::ok::<_, ()>(Response::Ok().body(STR)))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -251,7 +256,8 @@ async fn test_h2_head_empty() {
HttpService::build()
.finish(|_| ok::<_, ()>(Response::Ok().body(STR)))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -275,10 +281,11 @@ async fn test_h2_head_binary() {
let mut srv = test_server(move || {
HttpService::build()
.h2(|_| {
ok::<_, ()>(Response::Ok().content_length(STR.len() as u64).body(STR))
ok::<_, ()>(Response::Ok().body(STR))
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -302,7 +309,8 @@ async fn test_h2_head_binary2() {
HttpService::build()
.h2(|_| ok::<_, ()>(Response::Ok().body(STR)))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -327,7 +335,8 @@ async fn test_h2_body_length() {
)
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -350,7 +359,8 @@ async fn test_h2_body_chunked_explicit() {
)
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -378,7 +388,8 @@ async fn test_h2_response_http_error_handling() {
}))
}))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::INTERNAL_SERVER_ERROR);
@ -394,7 +405,8 @@ async fn test_h2_service_error() {
HttpService::build()
.h2(|_| err::<Response, Error>(error::ErrorBadRequest("error")))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::BAD_REQUEST);
@ -410,7 +422,8 @@ async fn test_h1_service_error() {
HttpService::build()
.h1(|_| err::<Response, Error>(error::ErrorBadRequest("error")))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::BAD_REQUEST);

View File

@ -6,8 +6,8 @@ use actix_http_test::test_server;
use actix_rt::time::delay_for;
use actix_service::fn_service;
use bytes::Bytes;
use futures::future::{self, err, ok, ready, FutureExt};
use futures::stream::{once, StreamExt};
use futures_util::future::{self, err, ok, ready, FutureExt};
use futures_util::stream::{once, StreamExt};
use regex::Regex;
use actix_http::httpmessage::HttpMessage;
@ -27,7 +27,8 @@ async fn test_h1() {
future::ok::<_, ()>(Response::Ok().finish())
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -46,7 +47,8 @@ async fn test_h1_2() {
future::ok::<_, ()>(Response::Ok().finish())
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -65,7 +67,8 @@ async fn test_expect_continue() {
}))
.finish(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test HTTP/1.1\r\nexpect: 100-continue\r\n\r\n");
@ -95,7 +98,8 @@ async fn test_expect_continue_h1() {
}))
.h1(fn_service(|_| future::ok::<_, ()>(Response::Ok().finish())))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test HTTP/1.1\r\nexpect: 100-continue\r\n\r\n");
@ -130,7 +134,8 @@ async fn test_chunked_payload() {
})
}))
.tcp()
});
})
.await;
let returned_size = {
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
@ -172,7 +177,8 @@ async fn test_slow_request() {
.client_timeout(100)
.finish(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n");
@ -187,7 +193,8 @@ async fn test_http1_malformed_request() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP1.1\r\n");
@ -202,7 +209,8 @@ async fn test_http1_keepalive() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
@ -223,7 +231,8 @@ async fn test_http1_keepalive_timeout() {
.keep_alive(1)
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
@ -243,7 +252,8 @@ async fn test_http1_keepalive_close() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ =
@ -263,7 +273,8 @@ async fn test_http10_keepalive_default_close() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.0\r\n\r\n");
@ -282,7 +293,8 @@ async fn test_http10_keepalive() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream
@ -309,7 +321,8 @@ async fn test_http1_keepalive_disabled() {
.keep_alive(KeepAlive::Disabled)
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
@ -344,7 +357,8 @@ async fn test_content_length() {
future::ok::<_, ()>(Response::new(statuses[indx]))
})
.tcp()
});
})
.await;
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
@ -397,7 +411,7 @@ async fn test_h1_headers() {
}
future::ok::<_, ()>(builder.body(data.clone()))
}).tcp()
});
}).await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -435,7 +449,8 @@ async fn test_h1_body() {
HttpService::build()
.h1(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -451,7 +466,8 @@ async fn test_h1_head_empty() {
HttpService::build()
.h1(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.head("/").send().await.unwrap();
assert!(response.status().is_success());
@ -474,10 +490,11 @@ async fn test_h1_head_binary() {
let mut srv = test_server(|| {
HttpService::build()
.h1(|_| {
ok::<_, ()>(Response::Ok().content_length(STR.len() as u64).body(STR))
ok::<_, ()>(Response::Ok().body(STR))
})
.tcp()
});
})
.await;
let response = srv.head("/").send().await.unwrap();
assert!(response.status().is_success());
@ -501,7 +518,8 @@ async fn test_h1_head_binary2() {
HttpService::build()
.h1(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.head("/").send().await.unwrap();
assert!(response.status().is_success());
@ -526,7 +544,8 @@ async fn test_h1_body_length() {
)
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -549,7 +568,8 @@ async fn test_h1_body_chunked_explicit() {
)
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -579,7 +599,8 @@ async fn test_h1_body_chunked_implicit() {
ok::<_, ()>(Response::Ok().streaming(body))
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -611,7 +632,8 @@ async fn test_h1_response_http_error_handling() {
)
}))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::INTERNAL_SERVER_ERROR);
@ -627,7 +649,8 @@ async fn test_h1_service_error() {
HttpService::build()
.h1(|_| future::err::<Response, Error>(error::ErrorBadRequest("error")))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::BAD_REQUEST);
@ -647,7 +670,8 @@ async fn test_h1_on_connect() {
future::ok::<_, ()>(Response::Ok().finish())
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());

View File

@ -1,4 +1,5 @@
use std::cell::Cell;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
@ -9,9 +10,9 @@ use actix_http_test::test_server;
use actix_service::{fn_factory, Service};
use actix_utils::framed::Dispatcher;
use bytes::Bytes;
use futures::future;
use futures::task::{Context, Poll};
use futures::{Future, SinkExt, StreamExt};
use futures_util::future;
use futures_util::task::{Context, Poll};
use futures_util::{SinkExt, StreamExt};
struct WsService<T>(Arc<Mutex<(PhantomData<T>, Cell<bool>)>>);
@ -93,7 +94,8 @@ async fn test_simple() {
.finish(|_| future::ok::<_, ()>(Response::NotFound()))
.tcp()
}
});
})
.await;
// client service
let mut framed = srv.ws().await.unwrap();

View File

@ -1,17 +0,0 @@
# Changes
## [Unreleased] - 2020-xx-xx
* Update the `time` dependency to 0.2.5
## [0.2.1] - 2020-01-10
* Fix panic with already borrowed: BorrowMutError #1263
## [0.2.0] - 2019-12-20
* Use actix-web 2.0
## [0.1.0] - 2019-06-xx
* Move identity middleware to separate crate

View File

@ -1,29 +0,0 @@
[package]
name = "actix-identity"
version = "0.2.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Identity service for actix web framework."
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-identity/"
license = "MIT/Apache-2.0"
edition = "2018"
[lib]
name = "actix_identity"
path = "src/lib.rs"
[dependencies]
actix-web = { version = "2.0.0", default-features = false, features = ["secure-cookies"] }
actix-service = "1.0.2"
futures = "0.3.1"
serde = "1.0"
serde_json = "1.0"
time = { version = "0.2.5", default-features = false, features = ["std"] }
[dev-dependencies]
actix-rt = "1.0.0"
actix-http = "1.0.1"
bytes = "0.5.3"

View File

@ -1 +0,0 @@
../LICENSE-APACHE

View File

@ -1 +0,0 @@
../LICENSE-MIT

View File

@ -1,5 +1,7 @@
# Identity service for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-identity)](https://crates.io/crates/actix-identity) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**This crate moved to https://github.com/actix/actix-extras.**
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,17 @@
# Changes
## [0.2.1] - 2020-01-xx
## [0.3.0-alpha.1] - 2020-05-25
* Update `actix-web` to 3.0.0-alpha.3
* Bump minimum supported Rust version to 1.40
* Minimize `futures` dependencies
* Remove the unused `time` dependency
* Fix missing `std::error::Error` implement for `MultipartError`.
## [0.2.0] - 2019-12-20
* Release

Some files were not shown because too many files have changed in this diff Show More