Merge branch 'master' into auto-h2c

This commit is contained in:
Rob Ede 2023-04-02 03:27:33 +01:00 committed by GitHub
commit c8c550e6e7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 482 additions and 748 deletions

View File

@ -8,12 +8,16 @@ on:
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check_benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1

View File

@ -7,6 +7,10 @@ on:
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_and_test_nightly:
strategy:
@ -29,7 +33,7 @@ jobs:
CARGO_UNSTABLE_SPARSE_REGISTRY: true
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
@ -44,7 +48,7 @@ jobs:
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
toolchain: ${{ matrix.version }}
profile: minimal
override: true
@ -81,7 +85,7 @@ jobs:
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.8.2 --no-default-features --features ci-autoclean
cargo install cargo-cache --version 0.8.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
@ -93,7 +97,7 @@ jobs:
CARGO_INCREMENTAL: 0
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
@ -120,7 +124,7 @@ jobs:
CARGO_INCREMENTAL: 0
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable

View File

@ -9,6 +9,10 @@ on:
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_and_test:
strategy:
@ -17,7 +21,7 @@ jobs:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
version:
- 1.59.0 # MSRV
- stable
@ -25,30 +29,22 @@ jobs:
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
env: {}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
if: matrix.target.os == 'windows-latest'
run: choco install openssl
- name: Set OpenSSL dir in env
if: matrix.target.os == 'windows-latest'
run: echo 'OPENSSL_DIR=C:\Program Files\OpenSSL-Win64' | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
- name: Install Rust (${{ matrix.version }})
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
toolchain: ${{ matrix.version }}
- name: Install cargo-hack
uses: taiki-e/install-action@cargo-hack
@ -60,12 +56,6 @@ jobs:
cargo add const-str@0.3 --dev -p=actix-web
cargo add const-str@0.3 --dev -p=awc
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: workaround MSRV issues
if: matrix.version != 'stable'
run: |
@ -95,42 +85,34 @@ jobs:
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.8.2 --no-default-features --features ci-autoclean
cargo install cargo-cache --version 0.8.3 --no-default-features --features ci-autoclean
cargo-cache
io-uring:
name: io-uring tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
- name: Generate Cargo.lock
run: cargo generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
with: { toolchain: nightly }
- name: tests (io-uring)
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features"
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features"
rustdoc:
name: doc tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@nightly
- name: Generate Cargo.lock
run: cargo generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1
with: { toolchain: nightly }
- name: doc tests
run: cargo ci-doctest

View File

@ -4,46 +4,72 @@ on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: dtolnay/rust-toolchain@nightly
with: { components: rustfmt }
- uses: actions/checkout@v3
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
toolchain: nightly
components: rustfmt
- run: cargo fmt --all -- --check
clippy:
permissions:
checks: write # to add clippy checks to PR diffs
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
- uses: actions-rust-lang/setup-rust-toolchain@v1
with: { components: clippy }
- name: Generate Cargo.lock
run: cargo generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
- uses: giraffate/clippy-action@v1
with:
args: --workspace --tests --examples --all-features
token: ${{ secrets.GITHUB_TOKEN }}
reporter: 'github-pr-check'
github_token: ${{ secrets.GITHUB_TOKEN }}
clippy_flags: --workspace --all-features --tests --examples --bins -- -Dclippy::todo
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
- uses: actions-rust-lang/setup-rust-toolchain@v1
with: { components: rust-docs }
- name: Check for broken intra-doc links
uses: actions-rs/cargo@v1
env:
RUSTDOCFLAGS: "-D warnings"
env: { RUSTDOCFLAGS: "-D warnings" }
run: cargo doc --no-deps --all-features --workspace
public-api-diff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
command: doc
args: --no-deps --all-features --workspace
ref: ${{ github.base_ref }}
- uses: actions/checkout@v3
- uses: actions-rust-lang/setup-rust-toolchain@v1
with: { toolchain: nightly }
- uses: taiki-e/cache-cargo-install-action@v1
with: { tool: cargo-public-api }
- name: generate API diff
run: |
for f in $(find -mindepth 2 -maxdepth 2 -name Cargo.toml); do
cargo public-api --manifest-path "$f" diff ${{ github.event.pull_request.base.sha }}..${{ github.sha }}
done

View File

@ -6,26 +6,23 @@ on:
push:
branches: [master]
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# job currently (1st Feb 2022) segfaults
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- uses: actions-rust-lang/setup-rust-toolchain@v1
with: { toolchain: nightly }
- name: Generate coverage file
run: |

View File

@ -4,7 +4,13 @@ on:
push:
branches: [master]
permissions: {}
permissions:
contents: read # to fetch code (actions/checkout)
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
permissions:
@ -13,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@nightly

View File

@ -4,46 +4,45 @@ use derive_more::Display;
/// Errors which can occur when serving static files.
#[derive(Debug, PartialEq, Eq, Display)]
pub enum FilesError {
/// Path is not a directory
/// Path is not a directory.
#[allow(dead_code)]
#[display(fmt = "Path is not a directory. Unable to serve static files")]
#[display(fmt = "path is not a directory. Unable to serve static files")]
IsNotDirectory,
/// Cannot render directory
#[display(fmt = "Unable to render directory without index file")]
/// Cannot render directory.
#[display(fmt = "unable to render directory without index file")]
IsDirectory,
}
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError {
/// Returns `404 Not Found`.
fn status_code(&self) -> StatusCode {
StatusCode::NOT_FOUND
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Debug, PartialEq, Eq, Display)]
#[non_exhaustive]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")]
/// Segment started with the wrapped invalid character.
#[display(fmt = "segment started with invalid character: ('{_0}')")]
BadStart(char),
/// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")]
/// Segment contained the wrapped invalid character.
#[display(fmt = "segment contained invalid character ('{_0}')")]
BadChar(char),
/// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")]
/// Segment ended with the wrapped invalid character.
#[display(fmt = "segment ended with invalid character: ('{_0}')")]
BadEnd(char),
/// The path is not a valid UTF-8 string after doing percent decoding.
#[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")]
/// Path is not a valid UTF-8 string after percent-decoding.
#[display(fmt = "path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
}
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError {
/// Returns `400 Bad Request`.
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}

View File

@ -1,4 +1,36 @@
use derive_more::{Display, Error};
use std::fmt;
use derive_more::Error;
/// Copy of `http_range::HttpRangeParseError`.
#[derive(Debug, Clone)]
enum HttpRangeParseError {
InvalidRange,
NoOverlap,
}
impl From<http_range::HttpRangeParseError> for HttpRangeParseError {
fn from(err: http_range::HttpRangeParseError) -> Self {
match err {
http_range::HttpRangeParseError::InvalidRange => Self::InvalidRange,
http_range::HttpRangeParseError::NoOverlap => Self::NoOverlap,
}
}
}
#[derive(Debug, Clone, Error)]
#[non_exhaustive]
pub struct ParseRangeErr(#[error(not(source))] HttpRangeParseError);
impl fmt::Display for ParseRangeErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("invalid Range header: ")?;
f.write_str(match self.0 {
HttpRangeParseError::InvalidRange => "invalid syntax",
HttpRangeParseError::NoOverlap => "range starts after end of content",
})
}
}
/// HTTP Range header representation.
#[derive(Debug, Clone, Copy)]
@ -10,26 +42,22 @@ pub struct HttpRange {
pub length: u64,
}
#[derive(Debug, Clone, Display, Error)]
#[display(fmt = "Parse HTTP Range failed")]
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616.
///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
match http_range::HttpRange::parse(header, size) {
Ok(ranges) => Ok(ranges
let ranges = http_range::HttpRange::parse(header, size)
.map_err(|err| ParseRangeErr(err.into()))?;
Ok(ranges
.iter()
.map(|range| HttpRange {
start: range.start,
length: range.length,
})
.collect()),
Err(_) => Err(ParseRangeErr(())),
}
.collect())
}
}

View File

@ -34,7 +34,9 @@ use tokio::sync::mpsc;
/// ```no_run
/// use actix_http::HttpService;
/// use actix_http_test::test_server;
/// use actix_web::{web, App, HttpResponse, Error};
/// use actix_service::map_config;
/// use actix_service::ServiceFactoryExt;
/// use actix_web::{dev::AppConfig, web, App, Error, HttpResponse};
///
/// async fn my_handler() -> Result<HttpResponse, Error> {
/// Ok(HttpResponse::Ok().into())
@ -42,14 +44,19 @@ use tokio::sync::mpsc;
///
/// #[actix_web::test]
/// async fn test_example() {
/// let mut srv = TestServer::start(||
/// HttpService::new(
/// App::new().service(web::resource("/").to(my_handler))
/// )
/// );
/// let srv = test_server(|| {
/// let app = App::new().service(web::resource("/").to(my_handler));
///
/// HttpService::build()
/// .h1(map_config(app, |_| AppConfig::default()))
/// .tcp()
/// .map_err(|_| ())
/// })
/// .await;
///
/// let req = srv.get("/");
/// let response = req.send().await.unwrap();
///
/// assert!(response.status().is_success());
/// }
/// ```

View File

@ -2,6 +2,11 @@
## Unreleased - 2023-xx-xx
### Added
- Add `body::to_body_limit()` function.
- Add `body::BodyLimitExceeded` error type.
## 3.3.1 - 2023-03-02
### Fixed

View File

@ -126,17 +126,6 @@ name = "ws"
required-features = ["ws", "rustls"]
[[bench]]
name = "write-camel-case"
harness = false
[[bench]]
name = "status-line"
harness = false
[[bench]]
name = "uninit-headers"
harness = false
[[bench]]
name = "quality-value"
name = "response-body-compression"
harness = false
required-features = ["compress-brotli", "compress-gzip", "compress-zstd"]

View File

@ -1,97 +0,0 @@
#![allow(clippy::uninlined_format_args)]
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
fn bench_quality_display_impls(c: &mut Criterion) {
let mut group = c.benchmark_group("quality value display impls");
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| {
b.iter(|| _new::Quality(i).to_string())
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| _naive::Quality(i).to_string())
});
}
group.finish();
}
criterion_group!(benches, bench_quality_display_impls);
criterion_main!(benches);
mod _new {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
// some number in the range 1999
x => {
f.write_str("0.")?;
// this implementation avoids string allocation otherwise required
// for `.trim_end_matches('0')`
if x < 10 {
f.write_str("00")?;
// 0 is handled so it's not possible to have a trailing 0, we can just return
itoa_fmt(f, x)
} else if x < 100 {
f.write_str("0")?;
if x % 10 == 0 {
// trailing 0, divide by 10 and write
itoa_fmt(f, x / 10)
} else {
itoa_fmt(f, x)
}
} else {
// x is in range 101999
if x % 100 == 0 {
// two trailing 0s, divide by 100 and write
itoa_fmt(f, x / 100)
} else if x % 10 == 0 {
// one trailing 0, divide by 10 and write
itoa_fmt(f, x / 10)
} else {
itoa_fmt(f, x)
}
}
}
}
}
}
pub fn itoa_fmt<W: fmt::Write, V: itoa::Integer>(mut wr: W, value: V) -> fmt::Result {
let mut buf = itoa::Buffer::new();
wr.write_str(buf.format(value))
}
}
mod _naive {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
x => {
write!(f, "{}", format!("{:03}", x).trim_end_matches('0'))
}
}
}
}
}

View File

@ -0,0 +1,90 @@
#![allow(clippy::uninlined_format_args)]
use std::convert::Infallible;
use actix_http::{encoding::Encoder, ContentEncoding, Request, Response, StatusCode};
use actix_service::{fn_service, Service as _};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
static BODY: &[u8] = include_bytes!("../Cargo.toml");
fn compression_responses(c: &mut Criterion) {
let mut group = c.benchmark_group("compression responses");
group.bench_function("identity", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Identity,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("gzip", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Gzip,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("br", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Brotli,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.bench_function("zstd", |b| {
let rt = actix_rt::Runtime::new().unwrap();
let identity_svc = fn_service(|_: Request| async move {
let mut res = Response::with_body(StatusCode::OK, ());
let body = black_box(Encoder::response(
ContentEncoding::Zstd,
res.head_mut(),
BODY,
));
Ok::<_, Infallible>(black_box(res.set_body(black_box(body))))
});
b.iter(|| {
rt.block_on(identity_svc.call(Request::new())).unwrap();
});
});
group.finish();
}
criterion_group!(benches, compression_responses);
criterion_main!(benches);

View File

@ -1,214 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => {}
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View File

@ -1,135 +0,0 @@
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::BytesMut;
// A Miri run detects UB, seen on this playground:
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
fn bench_header_parsing(c: &mut Criterion) {
c.bench_function("Original (Unsound) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_new::parse_headers(&mut buf);
})
});
c.bench_function("Original (Unsound) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_new::parse_headers(&mut buf);
})
});
}
criterion_group!(benches, bench_header_parsing);
criterion_main!(benches);
const MAX_HEADERS: usize = 96;
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
#[derive(Clone, Copy)]
struct HeaderIndex {
name: (usize, usize),
value: (usize, usize),
}
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
}
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &[u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &[u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
Accept-Encoding: gzip,deflate\r\n\
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
Keep-Alive: 115\r\n\
Connection: keep-alive\r\n\
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
mod _new {
use super::*;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}
mod _original {
use super::*;
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
#![allow(invalid_value, clippy::uninit_assumed_init)]
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
#[allow(invalid_value)]
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}

View File

@ -1,93 +0,0 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
let len = black_box(bts.len());
_new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
let buffer = unsafe {
std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len);
std::slice::from_raw_parts_mut(buf, len)
};
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View File

@ -22,4 +22,4 @@ pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::none::None;
pub use self::size::BodySize;
pub use self::sized_stream::SizedStream;
pub use self::utils::to_bytes;
pub use self::utils::{to_bytes, to_bytes_limited, BodyLimitExceeded};

View File

@ -3,75 +3,196 @@ use std::task::Poll;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use derive_more::{Display, Error};
use futures_core::ready;
use super::{BodySize, MessageBody};
/// Collects the body produced by a `MessageBody` implementation into `Bytes`.
/// Collects all the bytes produced by `body`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// Consider using [`to_bytes_limited`] instead to protect against memory exhaustion.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes;
///
/// # async fn test_to_bytes() {
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, b"123"[..]);
/// # }
/// assert_eq!(bytes, "123");
/// # });
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
to_bytes_limited(body, usize::MAX)
.await
.expect("body should never yield more than usize::MAX bytes")
}
/// Error type returned from [`to_bytes_limited`] when body produced exceeds limit.
#[derive(Debug, Display, Error)]
#[display(fmt = "limit exceeded while collecting body bytes")]
#[non_exhaustive]
pub struct BodyLimitExceeded;
/// Collects the bytes produced by `body`, up to `limit` bytes.
///
/// If a chunk read from `poll_next` causes the total number of bytes read to exceed `limit`, an
/// `Err(BodyLimitExceeded)` is returned.
///
/// Any errors produced by the body stream are returned immediately as `Ok(Err(B::Error))`.
///
/// # Examples
///
/// ```
/// use actix_http::body::{self, to_bytes_limited};
/// use bytes::Bytes;
///
/// # actix_rt::System::new().block_on(async {
/// let body = body::None::new();
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes_limited(body, 10).await.unwrap().unwrap();
/// assert_eq!(bytes, "123");
///
/// let body = Bytes::from_static(b"123");
/// assert!(to_bytes_limited(body, 2).await.is_err());
/// # });
/// ```
pub async fn to_bytes_limited<B: MessageBody>(
body: B,
limit: usize,
) -> Result<Result<Bytes, B::Error>, BodyLimitExceeded> {
/// Sensible default (32kB) for initial, bounded allocation when collecting body bytes.
const INITIAL_ALLOC_BYTES: usize = 32 * 1024;
let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()),
BodySize::Sized(size) => size as usize,
// good enough first guess for chunk size
BodySize::Stream => 32_768,
BodySize::None | BodySize::Sized(0) => return Ok(Ok(Bytes::new())),
BodySize::Sized(size) if size as usize > limit => return Err(BodyLimitExceeded),
BodySize::Sized(size) => (size as usize).min(INITIAL_ALLOC_BYTES),
BodySize::Stream => INITIAL_ALLOC_BYTES,
};
let mut exceeded_limit = false;
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
poll_fn(|cx| loop {
match poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&bytes),
Some(Ok(bytes)) => {
// if limit is exceeded...
if buf.len() + bytes.len() > limit {
// ...set flag to true and break out of poll_fn
exceeded_limit = true;
return Poll::Ready(Ok(()));
}
buf.extend_from_slice(&bytes)
}
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await?;
.await
{
// propagate error returned from body poll
Err(err) => Ok(Err(err)),
Ok(buf.freeze())
// limit was exceeded while reading body
Ok(()) if exceeded_limit => Err(BodyLimitExceeded),
// otherwise return body buffer
Ok(()) => Ok(Ok(buf.freeze())),
}
}
#[cfg(test)]
mod test {
mod tests {
use std::io;
use futures_util::{stream, StreamExt as _};
use super::*;
use crate::{body::BodyStream, Error};
use crate::{
body::{BodyStream, SizedStream},
Error,
};
#[actix_rt::test]
async fn test_to_bytes() {
async fn to_bytes_complete() {
let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty());
let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
}
#[actix_rt::test]
async fn to_bytes_streams() {
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]);
}
#[actix_rt::test]
async fn to_bytes_limited_complete() {
let bytes = to_bytes_limited((), 0).await.unwrap().unwrap();
assert!(bytes.is_empty());
let bytes = to_bytes_limited((), 1).await.unwrap().unwrap();
assert!(bytes.is_empty());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 0)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 1)
.await
.is_err());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 2).await.is_ok());
assert!(to_bytes_limited(Bytes::from_static(b"12"), 3).await.is_ok());
}
#[actix_rt::test]
async fn to_bytes_limited_streams() {
// hinting a larger body fails
let body = SizedStream::new(8, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.is_err());
// hinting a smaller body is okay
let body = SizedStream::new(3, stream::empty().map(Ok::<_, Error>));
assert!(to_bytes_limited(body, 3).await.unwrap().unwrap().is_empty());
// hinting a smaller body then returning a larger one fails
let stream = stream::iter(vec![Bytes::from_static(b"1234")]).map(Ok::<_, Error>);
let body = SizedStream::new(3, stream);
assert!(to_bytes_limited(body, 3).await.is_err());
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
assert!(to_bytes_limited(body, 3).await.is_err());
}
#[actix_rt::test]
async fn to_body_limit_error() {
let err_stream = stream::once(async { Err(io::Error::new(io::ErrorKind::Other, "")) });
let body = SizedStream::new(8, err_stream);
// not too big, but propagates error from body stream
assert!(to_bytes_limited(body, 10).await.unwrap().is_err());
}
}

View File

@ -161,44 +161,44 @@ impl From<crate::ws::ProtocolError> for Error {
#[non_exhaustive]
pub enum ParseError {
/// An invalid `Method`, such as `GE.T`.
#[display(fmt = "Invalid Method specified")]
#[display(fmt = "invalid method specified")]
Method,
/// An invalid `Uri`, such as `exam ple.domain`.
#[display(fmt = "Uri error: {}", _0)]
#[display(fmt = "URI error: {}", _0)]
Uri(InvalidUri),
/// An invalid `HttpVersion`, such as `HTP/1.1`
#[display(fmt = "Invalid HTTP version specified")]
#[display(fmt = "invalid HTTP version specified")]
Version,
/// An invalid `Header`.
#[display(fmt = "Invalid Header provided")]
#[display(fmt = "invalid Header provided")]
Header,
/// A message head is too large to be reasonable.
#[display(fmt = "Message head is too large")]
#[display(fmt = "message head is too large")]
TooLarge,
/// A message reached EOF, but is not complete.
#[display(fmt = "Message is incomplete")]
#[display(fmt = "message is incomplete")]
Incomplete,
/// An invalid `Status`, such as `1337 ELITE`.
#[display(fmt = "Invalid Status provided")]
#[display(fmt = "invalid status provided")]
Status,
/// A timeout occurred waiting for an IO event.
#[allow(dead_code)]
#[display(fmt = "Timeout")]
#[display(fmt = "timeout")]
Timeout,
/// An `io::Error` that occurred while trying to read or write to a network stream.
#[display(fmt = "IO error: {}", _0)]
/// An I/O error that occurred while trying to read or write to a network stream.
#[display(fmt = "I/O error: {}", _0)]
Io(io::Error),
/// Parsing a field as string failed.
#[display(fmt = "UTF8 error: {}", _0)]
#[display(fmt = "UTF-8 error: {}", _0)]
Utf8(Utf8Error),
}
@ -257,22 +257,19 @@ impl From<ParseError> for Response<BoxBody> {
#[non_exhaustive]
pub enum PayloadError {
/// A payload reached EOF, but is not complete.
#[display(
fmt = "A payload reached EOF, but is not complete. Inner error: {:?}",
_0
)]
#[display(fmt = "payload reached EOF before completing: {:?}", _0)]
Incomplete(Option<io::Error>),
/// Content encoding stream corruption.
#[display(fmt = "Can not decode content-encoding.")]
#[display(fmt = "can not decode content-encoding")]
EncodingCorrupted,
/// Payload reached size limit.
#[display(fmt = "Payload reached size limit.")]
#[display(fmt = "payload reached size limit")]
Overflow,
/// Payload length is unknown.
#[display(fmt = "Payload length is unknown.")]
#[display(fmt = "payload length is unknown")]
UnknownLength,
/// HTTP/2 payload error.
@ -330,22 +327,23 @@ impl From<PayloadError> for Error {
#[non_exhaustive]
pub enum DispatchError {
/// Service error.
#[display(fmt = "Service Error")]
#[display(fmt = "service error")]
Service(Response<BoxBody>),
/// Body streaming error.
#[display(fmt = "Body error: {}", _0)]
#[display(fmt = "body error: {}", _0)]
Body(Box<dyn StdError>),
/// Upgrade service error.
#[display(fmt = "upgrade error")]
Upgrade,
/// An `io::Error` that occurred while trying to read or write to a network stream.
#[display(fmt = "IO error: {}", _0)]
#[display(fmt = "I/O error: {}", _0)]
Io(io::Error),
/// Request parse error.
#[display(fmt = "Request parse error: {}", _0)]
#[display(fmt = "request parse error: {}", _0)]
Parse(ParseError),
/// HTTP/2 error.
@ -354,19 +352,19 @@ pub enum DispatchError {
H2(h2::Error),
/// The first request did not complete within the specified timeout.
#[display(fmt = "The first request did not complete within the specified timeout")]
#[display(fmt = "request did not complete within the specified timeout")]
SlowRequestTimeout,
/// Disconnect timeout. Makes sense for ssl streams.
#[display(fmt = "Connection shutdown timeout")]
/// Disconnect timeout. Makes sense for TLS streams.
#[display(fmt = "connection shutdown timeout")]
DisconnectTimeout,
/// Handler dropped payload before reading EOF.
#[display(fmt = "Handler dropped payload before reading EOF")]
#[display(fmt = "handler dropped payload before reading EOF")]
HandlerDroppedPayload,
/// Internal error.
#[display(fmt = "Internal error")]
#[display(fmt = "internal error")]
InternalError,
}
@ -391,12 +389,12 @@ impl StdError for DispatchError {
#[cfg_attr(test, derive(PartialEq, Eq))]
#[non_exhaustive]
pub enum ContentTypeError {
/// Can not parse content type
#[display(fmt = "Can not parse content type")]
/// Can not parse content type.
#[display(fmt = "could not parse content type")]
ParseError,
/// Unknown content encoding
#[display(fmt = "Unknown content encoding")]
/// Unknown content encoding.
#[display(fmt = "unknown content encoding")]
UnknownEncoding,
}
@ -424,7 +422,7 @@ mod tests {
let err: Error = ParseError::Io(orig).into();
assert_eq!(
format!("{}", err),
"error parsing HTTP message: IO error: other"
"error parsing HTTP message: I/O error: other"
);
}
@ -451,7 +449,7 @@ mod tests {
let err = PayloadError::Incomplete(None);
assert_eq!(
err.to_string(),
"A payload reached EOF, but is not complete. Inner error: None"
"payload reached EOF before completing: None"
);
}
@ -471,7 +469,7 @@ mod tests {
match ParseError::from($from) {
e @ $error => {
let desc = format!("{}", e);
assert_eq!(desc, format!("IO error: {}", $from));
assert_eq!(desc, format!("I/O error: {}", $from));
}
_ => unreachable!("{:?}", $from),
}

View File

@ -26,39 +26,39 @@ pub use self::proto::{hash_key, CloseCode, CloseReason, OpCode};
#[derive(Debug, Display, Error, From)]
pub enum ProtocolError {
/// Received an unmasked frame from client.
#[display(fmt = "Received an unmasked frame from client.")]
#[display(fmt = "received an unmasked frame from client")]
UnmaskedFrame,
/// Received a masked frame from server.
#[display(fmt = "Received a masked frame from server.")]
#[display(fmt = "received a masked frame from server")]
MaskedFrame,
/// Encountered invalid opcode.
#[display(fmt = "Invalid opcode: {}.", _0)]
#[display(fmt = "invalid opcode ({})", _0)]
InvalidOpcode(#[error(not(source))] u8),
/// Invalid control frame length
#[display(fmt = "Invalid control frame length: {}.", _0)]
#[display(fmt = "invalid control frame length ({})", _0)]
InvalidLength(#[error(not(source))] usize),
/// Bad opcode.
#[display(fmt = "Bad opcode.")]
#[display(fmt = "bad opcode")]
BadOpCode,
/// A payload reached size limit.
#[display(fmt = "A payload reached size limit.")]
#[display(fmt = "payload reached size limit")]
Overflow,
/// Continuation is not started.
#[display(fmt = "Continuation is not started.")]
/// Continuation has not started.
#[display(fmt = "continuation has not started")]
ContinuationNotStarted,
/// Received new continuation but it is already started.
#[display(fmt = "Received new continuation but it is already started.")]
#[display(fmt = "received new continuation but it has already started")]
ContinuationStarted,
/// Unknown continuation fragment.
#[display(fmt = "Unknown continuation fragment: {}.", _0)]
#[display(fmt = "unknown continuation fragment: {}", _0)]
ContinuationFragment(#[error(not(source))] OpCode),
/// I/O error.
@ -70,27 +70,27 @@ pub enum ProtocolError {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Display, Error)]
pub enum HandshakeError {
/// Only get method is allowed.
#[display(fmt = "Method not allowed.")]
#[display(fmt = "method not allowed")]
GetMethodRequired,
/// Upgrade header if not set to WebSocket.
#[display(fmt = "WebSocket upgrade is expected.")]
#[display(fmt = "WebSocket upgrade is expected")]
NoWebsocketUpgrade,
/// Connection header is not set to upgrade.
#[display(fmt = "Connection upgrade is expected.")]
#[display(fmt = "connection upgrade is expected")]
NoConnectionUpgrade,
/// WebSocket version header is not set.
#[display(fmt = "WebSocket version header is required.")]
#[display(fmt = "WebSocket version header is required")]
NoVersionHeader,
/// Unsupported WebSocket version.
#[display(fmt = "Unsupported WebSocket version.")]
#[display(fmt = "unsupported WebSocket version")]
UnsupportedVersion,
/// WebSocket key is not set or wrong.
#[display(fmt = "Unknown websocket key.")]
#[display(fmt = "unknown WebSocket key")]
BadWebsocketKey,
}

View File

@ -39,13 +39,13 @@ impl WsService {
#[derive(Debug, Display, Error, From)]
enum WsServiceError {
#[display(fmt = "http error")]
#[display(fmt = "HTTP error")]
Http(actix_http::Error),
#[display(fmt = "ws handshake error")]
#[display(fmt = "WS handshake error")]
Ws(actix_http::ws::HandshakeError),
#[display(fmt = "io error")]
#[display(fmt = "I/O error")]
Io(std::io::Error),
#[display(fmt = "dispatcher error")]

View File

@ -2,10 +2,18 @@
## Unreleased - 2023-xx-xx
### Added
- Add `Resource::{get, post, etc...}` methods for more concisely adding routes that don't need additional guards.
### Changed
- Handler functions can now receive up to 16 extractor parameters.
## 4.3.1 - 2023-02-26
### Added
- Add support for custom methods with the `#[route]` macro. [#2969]
[#2969]: https://github.com/actix/actix-web/pull/2969

View File

@ -152,7 +152,7 @@ mod tests {
let resp_err: &dyn ResponseError = &err;
let err = resp_err.downcast_ref::<PayloadError>().unwrap();
assert_eq!(err.to_string(), "Payload reached size limit.");
assert_eq!(err.to_string(), "payload reached size limit");
let not_err = resp_err.downcast_ref::<ContentTypeError>();
assert!(not_err.is_none());

View File

@ -416,6 +416,10 @@ mod tuple_from_req {
tuple_from_req! { TupleFromRequest10; A, B, C, D, E, F, G, H, I, J }
tuple_from_req! { TupleFromRequest11; A, B, C, D, E, F, G, H, I, J, K }
tuple_from_req! { TupleFromRequest12; A, B, C, D, E, F, G, H, I, J, K, L }
tuple_from_req! { TupleFromRequest13; A, B, C, D, E, F, G, H, I, J, K, L, M }
tuple_from_req! { TupleFromRequest14; A, B, C, D, E, F, G, H, I, J, K, L, M, N }
tuple_from_req! { TupleFromRequest15; A, B, C, D, E, F, G, H, I, J, K, L, M, N, O }
tuple_from_req! { TupleFromRequest16; A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P }
}
#[cfg(test)]

View File

@ -151,6 +151,10 @@ factory_tuple! { A B C D E F G H I }
factory_tuple! { A B C D E F G H I J }
factory_tuple! { A B C D E F G H I J K }
factory_tuple! { A B C D E F G H I J K L }
factory_tuple! { A B C D E F G H I J K L M }
factory_tuple! { A B C D E F G H I J K L M N }
factory_tuple! { A B C D E F G H I J K L M N O }
factory_tuple! { A B C D E F G H I J K L M N O P }
#[cfg(test)]
mod tests {
@ -167,6 +171,7 @@ mod tests {
async fn handler_max(
_01: (), _02: (), _03: (), _04: (), _05: (), _06: (),
_07: (), _08: (), _09: (), _10: (), _11: (), _12: (),
_13: (), _14: (), _15: (), _16: (),
) {}
assert_impl_handler(handler_min);