Merge branch 'master' into feat/on_stop_hook

This commit is contained in:
fakeshadow 2021-04-26 09:45:14 -07:00 committed by GitHub
commit b6c477f26c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
89 changed files with 3190 additions and 2396 deletions

3
.cargo/config.toml Normal file
View File

@ -0,0 +1,3 @@
[alias]
chk = "hack check --workspace --all-features --tests --examples"
lint = "hack --clean-per-run clippy --workspace --tests --examples"

131
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,131 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
version:
- 1.46.0 # MSRV
- stable
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
VCPKGRS_DYNAMIC: 1
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v2
# install OpenSSL on Windows
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc' || matrix.target.triple == 'i686-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install OpenSSL
if: matrix.target.triple == 'i686-pc-windows-msvc'
run: vcpkg install openssl:x86-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
# - name: Install MSYS2
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# uses: msys2/setup-msys2@v2
# - name: Install MinGW Packages
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# run: |
# msys2 -c 'pacman -Sy --noconfirm pacman'
# msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with:
# command: generate-lockfile
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features
- name: check minimal + tests
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features --tests --examples
- name: check default
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --tests --examples
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --all-features --tests --examples
- name: tests
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features --no-fail-fast -- --nocapture
- name: Generate coverage file
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --verbose
- name: Upload to Codecov
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

View File

@ -1,34 +1,42 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
profile: minimal
components: rustfmt
override: true
- name: Check with rustfmt
- name: Rustfmt Check
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: clippy
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Check with Clippy
- name: Clippy Check
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests
args: --workspace --tests --all-features

View File

@ -1,82 +0,0 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.46.0
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache cargo dirs
uses: actions/cache@v2
with:
path:
~/.cargo/registry
~/.cargo/git
~/.cargo/bin
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --workspace
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
rustup update stable
rustup override set stable
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

View File

@ -1,43 +0,0 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Upload documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_server/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

View File

@ -1,45 +0,0 @@
name: CI (Windows-mingw)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-gnu
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-gnu
profile: minimal
override: true
- name: Install MSYS2
uses: msys2/setup-msys2@v2
- name: Install packages
run: |
msys2 -c 'pacman -Sy --noconfirm pacman'
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests

View File

@ -1,69 +0,0 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
target:
- x86_64-pc-windows-msvc
- i686-pc-windows-msvc
name: ${{ matrix.version }} - ${{ matrix.target }}
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target }}
profile: minimal
override: true
- name: Install OpenSSL (x64)
if: matrix.target == 'x86_64-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
- name: Install OpenSSL (x86)
if: matrix.target == 'i686-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x86-windows
Get-ChildItem C:\vcpkg\installed\x86-windows\bin
Get-ChildItem C:\vcpkg\installed\x86-windows\lib
Copy-Item C:\vcpkg\installed\x86-windows\bin\libcrypto-1_1.dll C:\vcpkg\installed\x86-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x86-windows\bin\libssl-1_1.dll C:\vcpkg\installed\x86-windows\bin\libssl.dll
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

View File

@ -10,6 +10,8 @@ members = [
"actix-tracing",
"actix-utils",
"bytestring",
"local-channel",
"local-waker",
]
[patch.crates-io]
@ -23,3 +25,5 @@ actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }
local-channel = { path = "local-channel" }
local-waker = { path = "local-waker" }

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Copyright 2017-NOW Actix Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
Copyright (c) 2017 Nikolay Kim
Copyright (c) 2017-NOW Actix Team
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@ -3,6 +3,10 @@
## Unreleased - 2021-xx-xx
## 0.4.0 - 2021-04-20
* No significant changes since v0.4.0-beta.1.
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
@ -23,28 +27,28 @@
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec` `.encode()` performance
* Simplify `BytesCodec` `.decode()`
* Improve `BytesCodec::encode()` performance.
* Simplify `BytesCodec::decode()`.
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies
* Use specific futures dependencies.
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation
* Fix buffer remaining capacity calculation.
## 0.2.0-alpha.3
* Use tokio 0.2
* Fix low/high watermark for write/read buffers
* Use tokio 0.2.
* Fix low/high watermark for write/read buffers.
## 0.2.0-alpha.2
* Migrated to `std::future`
* Migrated to `std::future`.
## 0.1.2 - 2019-03-27
@ -56,4 +60,4 @@
## 0.1.0 - 2018-12-09
* Move codec to separate crate
* Move codec to separate crate.

View File

@ -1,12 +1,10 @@
[package]
name = "actix-codec"
version = "0.4.0-beta.1"
version = "0.4.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec"
repository = "https://github.com/actix/actix-net"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"

View File

@ -7,7 +7,7 @@
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style)]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]

View File

@ -19,5 +19,5 @@ syn = { version = "^1", features = ["full"] }
[dev-dependencies]
actix-rt = "2.0.0"
futures-util = { version = "0.3", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
trybuild = "1"

View File

@ -581,10 +581,7 @@ impl ResourceDef {
mut for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
// TODO: MSRV: 1.45
#[allow(clippy::manual_strip)]
return if pattern.ends_with('*') {
let path = &pattern[..pattern.len() - 1];
return if let Some(path) = pattern.strip_suffix('*') {
let re = String::from("^") + path + "(.*)";
(re, vec![PatternElement::Str(String::from(path))], true, 0)
} else {
@ -670,8 +667,6 @@ pub(crate) fn insert_slash(path: &str) -> String {
#[cfg(test)]
mod tests {
use super::*;
use http::Uri;
use std::convert::TryFrom;
#[test]
fn test_parse_static() {
@ -833,8 +828,11 @@ mod tests {
assert!(re.is_match("/user/2345/sdg"));
}
#[cfg(feature = "http")]
#[test]
fn test_parse_urlencoded_param() {
use std::convert::TryFrom;
let re = ResourceDef::new("/user/{id}/test");
let mut path = Path::new("/user/2345/test");
@ -845,7 +843,7 @@ mod tests {
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
let uri = Uri::try_from("/user/qwe%25/test").unwrap();
let uri = http::Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(uri);
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");

View File

@ -170,13 +170,11 @@ impl Quoter {
idx += 1;
}
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
cloned.map(|data| {
// SAFETY: we get data from http::Uri, which does UTF-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { String::from_utf8_unchecked(data) })
} else {
None
}
unsafe { String::from_utf8_unchecked(data) }
})
}
}

View File

@ -3,6 +3,22 @@
## Unreleased - 2021-xx-xx
## 2.2.0 - 2021-03-29
* **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return
`Ready` object in ok variant. [#293]
* Breakage is acceptable since `ActixStream` was not intended to be public.
[#293] https://github.com/actix/actix-net/pull/293
## 2.1.0 - 2021-02-24
* Add `ActixStream` extension trait to include readiness methods. [#276]
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
[#276]: https://github.com/actix/actix-net/pull/276
[#282]: https://github.com/actix/actix-net/pull/282
## 2.0.2 - 2021-02-06
* Add `Arbiter::handle` to get a handle of an owned Arbiter. [#274]
* Add `System::try_current` for situations where actix may or may not be running a System. [#275]
@ -56,10 +72,7 @@
## 2.0.0-beta.1 - 2020-12-28
### Added
* Add `System::attach_to_tokio` method. [#173]
### Changed
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
@ -67,27 +80,19 @@
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
### Fixed
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## [1.1.1] - 2020-04-30
### Fixed
## 1.1.1 - 2020-04-30
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## [1.1.0] - 2020-04-08
**This version has been yanked.**
### Added
## 1.1.0 - 2020-04-08 (YANKED)
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
@ -97,96 +102,57 @@
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## [1.0.0] - 2019-12-11
## 1.0.0 - 2019-12-11
* Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
## 1.0.0-alpha.2 - 2019-12-02
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## [1.0.0-alpha.1] - 2019-11-22
### Changed
## 1.0.0-alpha.1 - 2019-11-22
* Migrate to std::future and tokio 0.2
## [0.2.6] - 2019-11-14
### Fixed
## 0.2.6 - 2019-11-14
* Allow to join arbiter's thread. #60
* Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added
## 0.2.5 - 2019-09-02
* Add arbiter specific storage
## [0.2.4] - 2019-07-17
### Changed
## 0.2.4 - 2019-07-17
* Avoid a copy of the Future when initializing the Box. #29
## [0.2.3] - 2019-06-22
### Added
* Allow to start System using exsiting CurrentThread Handle #22
## 0.2.3 - 2019-06-22
* Allow to start System using existing CurrentThread Handle #22
## [0.2.2] - 2019-03-28
### Changed
## 0.2.2 - 2019-03-28
* Moved `blocking` module to `actix-threadpool` crate
## [0.2.1] - 2019-03-11
### Added
## 0.2.1 - 2019-03-11
* Added `blocking` module
* Arbiter::exec_fn - execute fn on the arbiter's thread
* Arbiter::exec - execute fn on the arbiter's thread and wait result
* Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
* Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
## [0.2.0] - 2019-03-06
## 0.2.0 - 2019-03-06
* `run` method returns `io::Result<()>`
* Removed `Handle`
## [0.1.0] - 2018-12-09
## 0.1.0 - 2018-12-09
* Initial release

View File

@ -1,6 +1,6 @@
[package]
name = "actix-rt"
version = "2.0.2"
version = "2.2.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
@ -8,7 +8,7 @@ authors = [
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-rt"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
@ -26,7 +26,7 @@ macros = ["actix-macros"]
actix-macros = { version = "0.2.0", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.2", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
tokio = { version = "1.3", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[dev-dependencies]
tokio = { version = "1.2", features = ["full"] }

View File

@ -2,4 +2,13 @@
> Tokio-based single-threaded async runtime for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-rt?label=latest)](https://crates.io/crates/actix-rt)
[![Documentation](https://docs.rs/actix-rt/badge.svg?version=2.2.0)](https://docs.rs/actix-rt/2.2.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-rt.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-rt/2.2.0/status.svg)](https://deps.rs/crate/actix-rt/2.2.0)
![Download](https://img.shields.io/crates/d/actix-rt.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/WghFtEH6Hb)
See crate documentation for more: https://docs.rs/actix-rt.

View File

@ -0,0 +1,60 @@
//! An example on how to build a multi-thread tokio runtime for Actix System.
//! Then spawn async task that can make use of work stealing of tokio runtime.
use actix_rt::System;
fn main() {
System::with_tokio_rt(|| {
// build system with a multi-thread tokio runtime.
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
})
.block_on(async_main());
}
// async main function that acts like #[actix_web::main] or #[tokio::main]
async fn async_main() {
let (tx, rx) = tokio::sync::oneshot::channel();
// get a handle to system arbiter and spawn async task on it
System::current().arbiter().spawn(async {
// use tokio::spawn to get inside the context of multi thread tokio runtime
let h1 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
});
// work stealing occurs for this task spawn
let h2 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
});
h1.await.unwrap();
h2.await.unwrap();
let _ = tx.send(());
});
rx.await.unwrap();
let (tx, rx) = tokio::sync::oneshot::channel();
let now = std::time::Instant::now();
// without additional tokio::spawn, all spawned tasks run on single thread
System::current().arbiter().spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
let _ = tx.send(());
});
// previous spawn task has blocked the system arbiter thread
// so this task will wait for 2 seconds until it can be run
System::current().arbiter().spawn(async move {
println!("thread id is {:?}", std::thread::current().id());
assert!(now.elapsed() > std::time::Duration::from_secs(2));
});
rx.await.unwrap();
}

View File

@ -70,13 +70,74 @@ pub mod signal {
}
pub mod net {
//! TCP/UDP/Unix bindings (Tokio re-exports).
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
use std::{
future::Future,
io,
task::{Context, Poll},
};
pub use tokio::io::Ready;
use tokio::io::{AsyncRead, AsyncWrite, Interest};
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpStream};
pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
#[cfg(unix)]
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
/// Extension trait over async read+write types that can also signal readiness.
#[doc(hidden)]
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
#[cfg(unix)]
impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_read_ready(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_write_ready(cx)
}
}
}
pub mod time {

View File

@ -6,6 +6,19 @@
[#230]: https://github.com/actix/actix-net/pull/230
## 2.0.0-beta.5 - 2021-04-20
* Server shutdown would notify all workers to exit regardless if shutdown is graceful.
This would make all worker shutdown immediately in force shutdown case. [#333]
[#333]: https://github.com/actix/actix-net/pull/333
## 2.0.0-beta.4 - 2021-04-01
* Prevent panic when `shutdown_timeout` is very large. [f9262db]
[f9262db]: https://github.com/actix/actix-net/commit/f9262db
## 2.0.0-beta.3 - 2021-02-06
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]

View File

@ -1,18 +1,15 @@
[package]
name = "actix-server"
version = "2.0.0-beta.3"
version = "2.0.0-beta.5"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server"
repository = "https://github.com/actix/actix-net"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".cargo/config"]
edition = "2018"
[lib]
@ -23,20 +20,21 @@ path = "src/lib.rs"
default = []
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
actix-service = "2.0.0"
actix-utils = "3.0.0"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
slab = "0.4"
tokio = { version = "1", features = ["sync"] }
tokio = { version = "1.2", features = ["sync"] }
[dev-dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = "2.0.0"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }

View File

@ -9,15 +9,17 @@
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
use std::{
env, io,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use std::{env, io};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use actix_service::{fn_service, ServiceFactoryExt as _};
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
@ -25,7 +27,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env::set_var("RUST_LOG", "info");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
@ -41,7 +43,7 @@ async fn main() -> io::Result<()> {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
pipeline_factory(move |mut stream: TcpStream| {
fn_service(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {

View File

@ -2,7 +2,7 @@ use std::time::Duration;
use std::{io, thread};
use actix_rt::{
time::{sleep_until, Instant},
time::{sleep, Instant},
System,
};
use log::{error, info};
@ -12,18 +12,21 @@ use slab::Slab;
use crate::server::Server;
use crate::socket::{MioListener, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandle};
use crate::worker::{Conn, WorkerHandleAccept};
use crate::Token;
struct ServerSocketInfo {
// addr for socket. mainly used for logging.
/// Address of socket. Mainly used for logging.
addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
/// Beware this is the crate token for identify socket and should not be confused
/// with `mio::Token`.
token: Token,
lst: MioListener,
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
/// Timeout is used to mark the deadline when this socket's listener should be registered again
/// after an error.
timeout: Option<Instant>,
}
@ -63,7 +66,7 @@ impl AcceptLoop {
pub(crate) fn start(
&mut self,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
handles: Vec<WorkerHandleAccept>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
@ -77,12 +80,59 @@ impl AcceptLoop {
struct Accept {
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandle>,
handles: Vec<WorkerHandleAccept>,
srv: Server,
next: usize,
avail: Availability,
backpressure: bool,
}
/// Array of u128 with every bit as marker for a worker handle's availability.
struct Availability([u128; 4]);
impl Default for Availability {
fn default() -> Self {
Self([0; 4])
}
}
impl Availability {
/// Check if any worker handle is available
fn available(&self) -> bool {
self.0.iter().any(|a| *a != 0)
}
/// Set worker handle available state by index.
fn set_available(&mut self, idx: usize, avail: bool) {
let (offset, idx) = if idx < 128 {
(0, idx)
} else if idx < 128 * 2 {
(1, idx - 128)
} else if idx < 128 * 3 {
(2, idx - 128 * 2)
} else if idx < 128 * 4 {
(3, idx - 128 * 3)
} else {
panic!("Max WorkerHandle count is 512")
};
let off = 1 << idx as u128;
if avail {
self.0[offset] |= off;
} else {
self.0[offset] &= !off
}
}
/// Set all worker handle to available state.
/// This would result in a re-check on all workers' availability.
fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
handles.iter().for_each(|handle| {
self.set_available(handle.idx(), true);
})
}
}
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
@ -102,7 +152,7 @@ impl Accept {
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
srv: Server,
handles: Vec<WorkerHandle>,
handles: Vec<WorkerHandleAccept>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
@ -113,6 +163,7 @@ impl Accept {
System::set_current(sys);
let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(sockets);
})
.unwrap();
@ -122,7 +173,7 @@ impl Accept {
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
handles: Vec<WorkerHandleAccept>,
srv: Server,
) -> (Accept, Slab<ServerSocketInfo>) {
let mut sockets = Slab::new();
@ -145,12 +196,18 @@ impl Accept {
});
}
let mut avail = Availability::default();
// Assume all handles are avail at construct time.
avail.set_available_all(&handles);
let accept = Accept {
poll,
waker,
handles,
srv,
next: 0,
avail,
backpressure: false,
};
@ -163,12 +220,8 @@ impl Accept {
loop {
if let Err(e) = self.poll.poll(&mut events, None) {
match e.kind() {
std::io::ErrorKind::Interrupted => {
continue;
}
_ => {
panic!("Poll error: {}", e);
}
std::io::ErrorKind::Interrupted => continue,
_ => panic!("Poll error: {}", e),
}
}
@ -184,38 +237,28 @@ impl Accept {
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
// from backpressure.
Some(WakerInterest::WorkerAvailable(idx)) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
self.avail.set_available(idx, true);
}
// a new worker thread is made and it's handle would be added
// to Accept
// a new worker thread is made and it's handle would be added to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.avail.set_available(handle.idx(), true);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
// got timer interest and it's time to try register socket(s) again
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
self.deregister_all(&mut sockets);
}
Some(WakerInterest::Resume) => {
drop(guard);
@ -226,10 +269,9 @@ impl Accept {
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
// waker queue is drained
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
// Reset the WakerQueue before break so it does not grow infinitely
WakerQueue::reset(&mut guard);
break 'waker;
}
@ -246,16 +288,23 @@ impl Accept {
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now();
sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() {
if now > inst {
self.register_logged(token, info);
} else {
sockets
.iter_mut()
// Only sockets that had an associated timeout were deregistered.
.filter(|(_, info)| info.timeout.is_some())
.for_each(|(token, info)| {
let inst = info.timeout.take().unwrap();
if now < inst {
info.timeout = Some(inst);
} else if !self.backpressure {
self.register_logged(token, info);
}
}
});
// Drop the timeout if server is in backpressure and socket timeout is expired.
// When server recovers from backpressure it will register all sockets without
// a timeout value so this socket register will be delayed till then.
});
}
#[cfg(not(target_os = "windows"))]
@ -293,136 +342,236 @@ impl Accept {
self.poll.registry().deregister(&mut info.lst)
}
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
match self.deregister(info) {
Ok(_) => info!("Paused accepting connections on {}", info.addr),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
}
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
// This is a best effort implementation with following limitation:
//
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
// is removed in the process.
//
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
// before expected timing.
sockets
.iter_mut()
// Take all timeout.
// This is to prevent Accept::process_timer method re-register a socket afterwards.
.map(|(_, info)| (info.timeout.take(), info))
// Socket info with a timeout is already deregistered so skip them.
.filter(|(timeout, _)| timeout.is_none())
.for_each(|(_, info)| self.deregister_logged(info));
}
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes
continue;
// Only operate when server is in a different backpressure than the given flag.
if self.backpressure != on {
self.backpressure = on;
sockets
.iter_mut()
// Only operate on sockets without associated timeout.
// Sockets with it should be handled by `accept` and `process_timer` methods.
// They are already deregistered or need to be reregister in the future.
.filter(|(_, info)| info.timeout.is_none())
.for_each(|(token, info)| {
if on {
self.deregister_logged(info);
} else {
self.register_logged(token, info);
}
self.register_logged(token, info);
}
}
} else if on {
self.backpressure = true;
self.deregister_all(sockets);
});
}
}
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut conn: Conn) {
if self.backpressure {
while !self.handles.is_empty() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
break;
}
Err(tmp) => {
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
// send_connection would remove fault worker from handles.
// worst case here is conn get dropped after all handles are gone.
while let Err(c) = self.send_connection(sockets, conn) {
conn = c
}
} else {
let mut idx = 0;
while idx < self.handles.len() {
idx += 1;
if self.handles[self.next].available() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
return;
}
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => {
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
self.maybe_backpressure(sockets, true);
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
while self.avail.available() {
let next = self.next();
let idx = next.idx();
if next.available() {
self.avail.set_available(idx, true);
match self.send_connection(sockets, conn) {
Ok(_) => return,
Err(c) => conn = c,
}
} else {
self.avail.set_available(idx, false);
self.set_next();
}
self.set_next();
}
// enable backpressure
// Sending Conn failed due to either all workers are in error or not available.
// Enter backpressure state and try again.
self.maybe_backpressure(sockets, true);
self.accept_one(sockets, msg);
self.accept_one(sockets, conn);
}
}
// set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
// Send connection to worker and handle error.
fn send_connection(
&mut self,
sockets: &mut Slab<ServerSocketInfo>,
conn: Conn,
) -> Result<(), Conn> {
match self.next().send(conn) {
Ok(_) => {
self.set_next();
Ok(())
}
Err(conn) => {
// Worker thread is error and could be gone.
// Remove worker handle and notify `ServerBuilder`.
self.remove_next();
if self.handles.is_empty() {
error!("No workers");
self.maybe_backpressure(sockets, true);
// All workers are gone and Conn is nowhere to be sent.
// Treat this situation as Ok and drop Conn.
return Ok(());
} else if self.handles.len() <= self.next {
self.next = 0;
}
Err(conn)
}
}
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop {
let msg = if let Some(info) = sockets.get_mut(token) {
match info.lst.accept() {
Ok(Some((io, addr))) => Conn {
let info = sockets
.get_mut(token)
.expect("ServerSocketInfo is removed from Slab");
match info.lst.accept() {
Ok(io) => {
let msg = Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e);
if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err);
}
// sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep_until(Instant::now() + Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
};
self.accept_one(sockets, msg);
}
} else {
return;
};
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
self.accept_one(sockets, msg);
// deregister listener temporary
self.deregister_logged(info);
// sleep after error. write the timeout to socket info as later
// the poll would need it mark which socket and when it's
// listener should be registered
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep(Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
};
}
}
fn next(&self) -> &WorkerHandleAccept {
&self.handles[self.next]
}
/// Set next worker handle that would accept connection.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
/// Remove next worker handle that fail to accept connection.
fn remove_next(&mut self) {
let handle = self.handles.swap_remove(self.next);
let idx = handle.idx();
// A message is sent to `ServerBuilder` future to notify it a new worker
// should be made.
self.srv.worker_faulted(idx);
self.avail.set_available(idx, false);
}
}
#[cfg(test)]
mod test {
use super::Availability;
fn single(aval: &mut Availability, idx: usize) {
aval.set_available(idx, true);
assert!(aval.available());
aval.set_available(idx, true);
aval.set_available(idx, false);
assert!(!aval.available());
aval.set_available(idx, false);
assert!(!aval.available());
}
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
idx.iter().for_each(|idx| aval.set_available(*idx, true));
assert!(aval.available());
while let Some(idx) = idx.pop() {
assert!(aval.available());
aval.set_available(idx, false);
}
assert!(!aval.available());
}
#[test]
fn availability() {
let mut aval = Availability::default();
single(&mut aval, 1);
single(&mut aval, 128);
single(&mut aval, 256);
single(&mut aval, 511);
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
multi(&mut aval, idx);
multi(&mut aval, (0..511).collect())
}
#[test]
#[should_panic]
fn overflow() {
let mut aval = Availability::default();
single(&mut aval, 512);
}
#[test]
fn pin_point() {
let mut aval = Availability::default();
aval.set_available(438, true);
aval.set_available(479, true);
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
}
}

View File

@ -1,12 +1,12 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem};
use std::{
future::Future,
io, mem,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_rt::net::TcpStream;
use actix_rt::time::{sleep_until, Instant};
use actix_rt::{self as rt, System};
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
use log::{error, info};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
@ -19,7 +19,10 @@ use crate::signals::{Signal, Signals};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{self, ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandle};
use crate::worker::{
ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandleAccept,
WorkerHandleServer,
};
use crate::{join_all, Token};
use futures_core::future::LocalBoxFuture;
@ -28,7 +31,7 @@ pub struct ServerBuilder {
threads: usize,
token: Token,
backlog: u32,
handles: Vec<(usize, WorkerHandle)>,
handles: Vec<(usize, WorkerHandleServer)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop,
@ -120,18 +123,18 @@ impl ServerBuilder {
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn maxconn(self, num: usize) -> Self {
worker::max_concurrent_connections(num);
pub fn maxconn(mut self, num: usize) -> Self {
self.worker_config.max_concurrent_connections(num);
self
}
/// Stop actix system.
/// Stop Actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling
/// Disable signal handling.
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
@ -139,9 +142,8 @@ impl ServerBuilder {
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
@ -150,11 +152,10 @@ impl ServerBuilder {
self
}
/// Execute external configuration as part of the server building
/// process.
/// Execute external configuration as part of the server building process.
///
/// This function is useful for moving parts of configuration to a
/// different module or even library.
/// This function is useful for moving parts of configuration to a different module or
/// even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
@ -271,6 +272,7 @@ impl ServerBuilder {
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
@ -284,10 +286,11 @@ impl ServerBuilder {
// start workers
let handles = (0..self.threads)
.map(|idx| {
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
let (handle_accept, handle_server) =
self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle_server));
handle
handle_accept
})
.collect();
@ -402,41 +405,18 @@ impl ServerBuilder {
std::mem::swap(&mut self.on_stop, &mut on_stop);
// stop workers
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let stop = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let fut = join_all(iter);
rt::spawn(async move {
on_stop().await;
rt::spawn(async move {
on_stop().await;
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
});
// we need to stop system if server was spawned
} else {
rt::spawn(async move {
on_stop().await;
if exit {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
}
});
if graceful {
let _ = join_all(stop).await;
}
if let Some(tx) = completion {
let _ = tx.send(());
@ -444,7 +424,12 @@ impl ServerBuilder {
for tx in notify {
let _ = tx.send(());
}
}
if exit {
sleep(Duration::from_millis(300)).await;
System::current().stop();
}
});
}
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
@ -470,9 +455,10 @@ impl ServerBuilder {
break;
}
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
let (handle_accept, handle_server) =
self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle_server));
self.accept.wake(WakerInterest::Worker(handle_accept));
}
}
}

View File

@ -7,14 +7,14 @@ use actix_service::{
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
ServiceFactory as BaseServiceFactory,
};
use actix_utils::counter::CounterGuard;
use actix_utils::{counter::CounterGuard, future::ready};
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::builder::bind_addr;
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::{ready, Token};
use crate::Token;
pub struct ServiceConfig {
pub(crate) services: Vec<(String, MioTcpListener)>,
@ -243,7 +243,7 @@ impl ServiceRuntime {
type BoxedNewService = Box<
dyn BaseServiceFactory<
(Option<CounterGuard>, MioStream),
(CounterGuard, MioStream),
Response = (),
Error = (),
InitError = (),
@ -257,7 +257,7 @@ struct ServiceFactory<T> {
inner: T,
}
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
impl<T> BaseServiceFactory<(CounterGuard, MioStream)> for ServiceFactory<T>
where
T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static,

View File

@ -55,24 +55,6 @@ pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
@ -132,6 +114,8 @@ impl<T> Future for JoinAll<T> {
mod test {
use super::*;
use actix_utils::future::ready;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];

View File

@ -3,12 +3,15 @@ use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::counter::CounterGuard;
use actix_utils::{
counter::CounterGuard,
future::{ready, Ready},
};
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::{ready, Ready, Token};
use crate::Token;
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
@ -26,7 +29,7 @@ pub(crate) trait InternalServiceFactory: Send {
pub(crate) type BoxedServerService = Box<
dyn Service<
(Option<CounterGuard>, MioStream),
(CounterGuard, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
@ -47,7 +50,7 @@ impl<S, I> StreamService<S, I> {
}
}
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
impl<S, I> Service<(CounterGuard, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
@ -62,7 +65,7 @@ where
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
fn call(&self, (guard, req): (CounterGuard, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);

View File

@ -12,18 +12,7 @@ pub(crate) use {
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
#[cfg(windows)]
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(unix)]
use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
use mio::{event::Source, Interest, Registry, Token};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
@ -40,15 +29,11 @@ impl MioListener {
}
}
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
pub(crate) fn accept(&self) -> io::Result<MioStream> {
match *self {
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
}
}
}
@ -135,7 +120,7 @@ impl fmt::Display for MioListener {
pub(crate) enum SocketAddr {
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(MioSocketAddr),
Uds(mio::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
@ -160,9 +145,9 @@ impl fmt::Debug for SocketAddr {
#[derive(Debug)]
pub enum MioStream {
Tcp(MioTcpStream),
Tcp(mio::net::TcpStream),
#[cfg(unix)]
Uds(MioUnixStream),
Uds(mio::net::UnixStream),
}
/// helper trait for converting mio stream to tokio stream.
@ -170,47 +155,60 @@ pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(windows)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
mod win_impl {
use super::*;
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
mod unix_impl {
use super::*;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use actix_rt::net::UnixStream;
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}

View File

@ -92,10 +92,10 @@ impl TestServer {
let port = addr.port();
TestServerRuntime {
system,
addr,
host,
port,
system,
}
}

View File

@ -6,9 +6,9 @@ use std::{
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandle;
use crate::worker::WorkerHandleAccept;
/// waker token for `mio::Poll` instance
/// Waker token for `mio::Poll` instance.
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
@ -30,7 +30,7 @@ impl Deref for WakerQueue {
}
impl WakerQueue {
/// construct a waker queue with given `Poll`'s `Registry` and capacity.
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
@ -41,7 +41,7 @@ impl WakerQueue {
Ok(Self(Arc::new((waker, queue))))
}
/// push a new interest to the queue and wake up the accept poll afterwards.
/// Push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
@ -55,24 +55,24 @@ impl WakerQueue {
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// get a MutexGuard of the waker queue.
/// Get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// reset the waker queue so it does not grow infinitely.
/// Reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// *. These interests should not be confused with `mio::Interest` and mostly not I/O related
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable,
WorkerAvailable(usize),
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
@ -84,6 +84,6 @@ pub(crate) enum WakerInterest {
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandle`.
Worker(WorkerHandle),
/// `WorkerHandleAccept`.
Worker(WorkerHandleAccept),
}

View File

@ -1,123 +1,131 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{
future::Future,
mem,
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter};
use actix_rt::{
spawn,
time::{sleep, Instant, Sleep},
Arbiter,
};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
use futures_core::{future::LocalBoxFuture, ready};
use log::{error, info, trace};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
oneshot,
};
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{MioStream, SocketAddr};
use crate::socket::MioStream;
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
pub(crate) struct WorkerCommand(Conn);
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopCommand {
/// Stop worker message. Returns `true` on successful graceful shutdown.
/// and `false` if some connections still alive when shutdown execute.
pub(crate) struct Stop {
graceful: bool,
result: oneshot::Sender<bool>,
tx: oneshot::Sender<bool>,
}
#[derive(Debug)]
pub(crate) struct Conn {
pub io: MioStream,
pub token: Token,
pub peer: Option<SocketAddr>,
}
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
fn handle_pair(
idx: usize,
tx1: UnboundedSender<Conn>,
tx2: UnboundedSender<Stop>,
avail: WorkerAvailability,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let accept = WorkerHandleAccept { tx: tx1, avail };
/// Sets the maximum per-worker number of concurrent connections.
let server = WorkerHandleServer { idx, tx: tx2 };
(accept, server)
}
/// Handle to worker that can send connection message to worker and share the
/// availability of worker to other thread.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
#[derive(Clone)]
pub(crate) struct WorkerHandle {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
/// Held by [Accept](crate::accept::Accept).
pub(crate) struct WorkerHandleAccept {
tx: UnboundedSender<Conn>,
avail: WorkerAvailability,
}
impl WorkerHandle {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerHandle {
idx,
tx1,
tx2,
avail,
}
impl WorkerHandleAccept {
#[inline(always)]
pub(crate) fn idx(&self) -> usize {
self.avail.idx
}
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
#[inline(always)]
pub(crate) fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx.send(msg).map_err(|msg| msg.0)
}
pub fn available(&self) -> bool {
#[inline(always)]
pub(crate) fn available(&self) -> bool {
self.avail.available()
}
}
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.send(StopCommand { graceful, result });
/// Handle to worker than can send stop message to worker.
///
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
pub(crate) struct WorkerHandleServer {
pub idx: usize,
tx: UnboundedSender<Stop>,
}
impl WorkerHandleServer {
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.send(Stop { graceful, tx });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
idx: usize,
waker: WakerQueue,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(waker: WakerQueue) -> Self {
pub fn new(idx: usize, waker: WakerQueue) -> Self {
WorkerAvailability {
idx,
waker,
available: Arc::new(AtomicBool::new(false)),
}
}
#[inline(always)]
pub fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
}
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
// Ordering:
//
// There could be multiple set calls happen in one <ServerWorker as Future>::poll.
// Order is important between them.
let old = self.available.swap(val, Ordering::AcqRel);
// Notify the accept on switched to available.
if !old && val {
self.waker.wake(WakerInterest::WorkerAvailable);
self.waker.wake(WakerInterest::WorkerAvailable(self.idx));
}
}
}
@ -126,14 +134,16 @@ impl WorkerAvailability {
///
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
pub(crate) struct ServerWorker {
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<WorkerService>,
// UnboundedReceiver<Conn> should always be the first field.
// It must be dropped as soon as ServerWorker dropping.
rx: UnboundedReceiver<Conn>,
rx2: UnboundedReceiver<Stop>,
services: Box<[WorkerService]>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<dyn InternalServiceFactory>>,
factories: Box<[Box<dyn InternalServiceFactory>]>,
state: WorkerState,
config: ServerWorkerConfig,
shutdown_timeout: Duration,
}
struct WorkerService {
@ -164,6 +174,7 @@ enum WorkerServiceStatus {
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
max_concurrent_connections: usize,
}
impl Default for ServerWorkerConfig {
@ -173,6 +184,7 @@ impl Default for ServerWorkerConfig {
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
max_concurrent_connections: 25600,
}
}
}
@ -182,6 +194,10 @@ impl ServerWorkerConfig {
self.max_blocking_threads = num;
}
pub(crate) fn max_concurrent_connections(&mut self, num: usize) {
self.max_concurrent_connections = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
@ -193,7 +209,9 @@ impl ServerWorker {
factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability,
config: ServerWorkerConfig,
) -> WorkerHandle {
) -> (WorkerHandleAccept, WorkerHandleServer) {
assert!(!availability.available());
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
@ -208,20 +226,7 @@ impl ServerWorker {
.unwrap()
})
.spawn(async move {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| ServerWorker {
rx,
rx2,
availability,
factories,
config,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable,
});
let fut = wrk
.factories
let fut = factories
.iter()
.enumerate()
.map(|(idx, factory)| {
@ -234,54 +239,76 @@ impl ServerWorker {
})
.collect::<Vec<_>>();
// a second spawn to make sure worker future runs as non boxed future.
// As Arbiter::spawn would box the future before send it to arbiter.
// a second spawn to run !Send future tasks.
spawn(async move {
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
match res {
Ok(services) => {
for item in services {
for (factory, token, service) in item {
assert_eq!(token.0, wrk.services.len());
wrk.services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
}
}
}
let res = join_all(fut)
.await
.into_iter()
.collect::<Result<Vec<_>, _>>();
let services = match res {
Ok(res) => res
.into_iter()
.flatten()
.fold(Vec::new(), |mut services, (factory, token, service)| {
assert_eq!(token.0, services.len());
services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
services
})
.into_boxed_slice(),
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
return;
}
}
wrk.await
};
// a third spawn to make sure ServerWorker runs as non boxed future.
spawn(ServerWorker {
rx,
rx2,
services,
availability,
conns: Counter::new(config.max_concurrent_connections),
factories: factories.into_boxed_slice(),
state: Default::default(),
shutdown_timeout: config.shutdown_timeout,
});
});
});
WorkerHandle::new(idx, tx1, tx2, avail)
handle_pair(idx, tx1, tx2, avail)
}
fn restart_service(&mut self, token: Token, factory_id: usize) {
let factory = &self.factories[factory_id];
trace!("Service {:?} failed, restarting", factory.name(token));
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state = WorkerState::Restarting(Restart {
factory_id,
token,
fut: factory.create(),
});
}
fn shutdown(&mut self, force: bool) {
if force {
self.services.iter_mut().for_each(|srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopped;
}
self.services
.iter_mut()
.filter(|srv| srv.status == WorkerServiceStatus::Available)
.for_each(|srv| {
srv.status = if force {
WorkerServiceStatus::Stopped
} else {
WorkerServiceStatus::Stopping
};
});
} else {
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
}
});
}
}
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx);
let mut failed = None;
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
@ -312,171 +339,178 @@ impl ServerWorker {
"Service {:?} readiness check returned error, restarting",
self.factories[srv.factory].name(Token(idx))
);
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed;
return Err((Token(idx), srv.factory));
}
}
}
}
if let Some(idx) = failed {
Err(idx)
} else {
Ok(ready)
}
Ok(ready)
}
}
enum WorkerState {
Available,
Unavailable,
Restarting(
usize,
Token,
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
),
Shutdown(
Pin<Box<Sleep>>,
Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>,
),
Restarting(Restart),
Shutdown(Shutdown),
}
struct Restart {
factory_id: usize,
token: Token,
fut: LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
}
// Shutdown keep states necessary for server shutdown:
// Sleep for interval check the shutdown progress.
// Instant for the start time of shutdown.
// Sender for send back the shutdown outcome(force/grace) to StopCommand caller.
struct Shutdown {
timer: Pin<Box<Sleep>>,
start_from: Instant,
tx: oneshot::Sender<bool>,
}
impl Default for WorkerState {
fn default() -> Self {
Self::Unavailable
}
}
impl Drop for ServerWorker {
fn drop(&mut self) {
// Set availability to true so if accept try to send connection to this worker
// it would find worker is gone and remove it.
// This is helpful when worker is dropped unexpected.
self.availability.set(true);
// Stop the Arbiter ServerWorker runs on on drop.
Arbiter::current().stop();
}
}
impl Future for ServerWorker {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().get_mut();
// `StopWorker` message handler
if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_recv(cx)
if let Poll::Ready(Some(Stop { graceful, tx })) = Pin::new(&mut this.rx2).poll_recv(cx)
{
self.availability.set(false);
let num = num_connections();
this.availability.set(false);
let num = this.conns.total();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = result.send(true);
let _ = tx.send(true);
return Poll::Ready(());
} else if graceful {
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(sleep_until(Instant::now() + self.config.shutdown_timeout)),
Some(result),
);
} else {
let _ = result.send(true);
return Poll::Ready(());
}
info!("Graceful worker shutdown, {} connections", num);
this.shutdown(false);
this.state = WorkerState::Shutdown(Shutdown {
timer: Box::pin(sleep(Duration::from_secs(1))),
start_from: Instant::now(),
tx,
});
} else {
info!("Force shutdown worker, {} connections", num);
self.shutdown(true);
let _ = result.send(false);
this.shutdown(true);
let _ = tx.send(false);
return Poll::Ready(());
}
}
match self.state {
WorkerState::Unavailable => match self.check_readiness(cx) {
match this.state {
WorkerState::Unavailable => match this.check_readiness(cx) {
Ok(true) => {
self.state = WorkerState::Available;
self.availability.set(true);
this.state = WorkerState::Available;
this.availability.set(true);
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
this.restart_service(token, idx);
self.poll(cx)
}
},
WorkerState::Restarting(idx, token, ref mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => {
// only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0].created(service);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
}
Poll::Ready(Err(_)) => {
WorkerState::Restarting(ref mut restart) => {
let factory_id = restart.factory_id;
let token = restart.token;
let service = ready!(restart.fut.as_mut().poll(cx))
.unwrap_or_else(|_| {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
Poll::Pending => return Poll::Pending,
}
this.factories[factory_id].name(token)
)
})
.into_iter()
// Find the same token from vector. There should be only one
// So the first match would be enough.
.find(|(t, _)| *t == token)
.map(|(_, service)| service)
.expect("No BoxedServerService found");
trace!(
"Service {:?} has been restarted",
this.factories[factory_id].name(token)
);
this.services[token.0].created(service);
this.state = WorkerState::Unavailable;
self.poll(cx)
}
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
}
WorkerState::Shutdown(ref mut shutdown) => {
// Wait for 1 second.
ready!(shutdown.timer.as_mut().poll(cx));
// check graceful timeout
if Pin::new(t2).poll(cx).is_ready() {
let _ = tx.take().unwrap().send(false);
self.shutdown(true);
Arbiter::current().stop();
return Poll::Ready(());
if this.conns.total() == 0 {
// Graceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(true);
}
Poll::Ready(())
} else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
// Timeout forceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(false);
}
Poll::Ready(())
} else {
// Reset timer and wait for 1 second.
let time = Instant::now() + Duration::from_secs(1);
shutdown.timer.as_mut().reset(time);
shutdown.timer.as_mut().poll(cx)
}
// sleep for 1 second and then check again
if t1.as_mut().poll(cx).is_ready() {
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
let _ = t1.as_mut().poll(cx);
}
Poll::Pending
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match self.check_readiness(cx) {
Ok(true) => (),
match this.check_readiness(cx) {
Ok(true) => {}
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable;
this.availability.set(false);
this.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
this.restart_service(token, idx);
this.availability.set(false);
return self.poll(cx);
}
}
match Pin::new(&mut self.rx).poll_recv(cx) {
match ready!(Pin::new(&mut this.rx).poll_recv(cx)) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
Some(msg) => {
let guard = this.conns.get();
let _ = this.services[msg.token.0].service.call((guard, msg.io));
}
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
None => return Poll::Ready(()),
};
},
}

View File

@ -1,10 +1,12 @@
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{mpsc, Arc};
use std::{net, thread, time};
use std::{net, thread, time::Duration};
use actix_rt::{net::TcpStream, time::sleep};
use actix_server::Server;
use actix_service::fn_service;
use futures_util::future::{lazy, ok};
use actix_utils::future::ok;
use futures_util::future::lazy;
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
@ -30,12 +32,13 @@ fn test_bind() {
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
@ -62,7 +65,7 @@ fn test_listen() {
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
@ -71,11 +74,11 @@ fn test_listen() {
#[test]
#[cfg(unix)]
fn test_start() {
use std::io::Read;
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
use std::io::Read;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
@ -110,16 +113,16 @@ fn test_start() {
// pause
let _ = srv.pause();
thread::sleep(time::Duration::from_millis(200));
thread::sleep(Duration::from_millis(200));
let mut conn = net::TcpStream::connect(addr).unwrap();
conn.set_read_timeout(Some(time::Duration::from_millis(100)))
conn.set_read_timeout(Some(Duration::from_millis(100)))
.unwrap();
let res = conn.read_exact(&mut buf);
assert!(res.is_err());
// resume
let _ = srv.resume();
thread::sleep(time::Duration::from_millis(100));
thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
@ -131,10 +134,10 @@ fn test_start() {
// stop
let _ = srv.stop(false);
thread::sleep(time::Duration::from_millis(100));
thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(time::Duration::from_millis(100));
thread::sleep(Duration::from_millis(100));
sys.stop();
let _ = h.join();
}
@ -175,11 +178,12 @@ fn test_configure() {
.workers(1)
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr1).is_ok());
assert!(net::TcpStream::connect(addr2).is_ok());
@ -189,100 +193,387 @@ fn test_configure() {
let _ = h.join();
}
#[test]
#[cfg(unix)]
fn test_on_stop_graceful() {
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
#[actix_rt::test]
async fn test_max_concurrent_connections() {
// Note:
// A tcp listener would accept connects based on it's backlog setting.
//
// The limit test on the other hand is only for concurrent tcp stream limiting a work
// thread accept.
let bool = std::sync::Arc::new(AtomicBool::new(false));
use tokio::io::AsyncWriteExt;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
thread::spawn({
let bool = bool.clone();
move || {
actix_rt::System::new().block_on(async {
let srv = Server::build()
.backlog(100)
.disable_signals()
.on_stop(move || {
let bool = bool.clone();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
let max_conn = 3;
let h = thread::spawn(move || {
actix_rt::System::new().block_on(async {
let server = Server::build()
// Set a relative higher backlog.
.backlog(12)
// max connection for a worker is 3.
.maxconn(max_conn)
.workers(1)
.disable_signals()
.bind("test", addr, move || {
let counter = counter.clone();
fn_service(move |_io: TcpStream| {
let counter = counter.clone();
async move {
bool.store(true, Ordering::SeqCst);
counter.fetch_add(1, Ordering::SeqCst);
sleep(Duration::from_secs(20)).await;
counter.fetch_sub(1, Ordering::SeqCst);
Ok::<(), ()>(())
}
})
.bind("test", addr, move || {
fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
.unwrap()
.run();
})?
.run();
tx.send(srv.clone()).unwrap();
let _ = tx.send((server.clone(), actix_rt::System::current()));
srv.await
})
}
server.await
})
});
let srv = rx.recv().unwrap();
let _ = srv.stop(true);
thread::sleep(time::Duration::from_millis(300));
assert!(bool.load(Ordering::SeqCst));
let (srv, sys) = rx.recv().unwrap();
let mut conns = vec![];
for _ in 0..12 {
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
conns.push(conn);
}
sleep(Duration::from_secs(5)).await;
// counter would remain at 3 even with 12 successful connection.
// and 9 of them remain in backlog.
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
for mut conn in conns {
conn.shutdown().await.unwrap();
}
srv.stop(false).await;
sys.stop();
let _ = h.join().unwrap();
}
#[test]
#[cfg(unix)]
fn test_on_stop_force() {
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
#[actix_rt::test]
async fn test_service_restart() {
use std::task::{Context, Poll};
let bool = std::sync::Arc::new(AtomicBool::new(false));
use actix_service::{fn_factory, Service};
use futures_core::future::LocalBoxFuture;
use tokio::io::AsyncWriteExt;
struct TestService(Arc<AtomicUsize>);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let TestService(ref counter) = self;
let c = counter.fetch_add(1, Ordering::SeqCst);
// Force the service to restart on first readiness check.
if c > 0 {
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(()))
}
}
fn call(&self, _: TcpStream) -> Self::Future {
Box::pin(async { Ok(()) })
}
}
let addr1 = unused_addr();
let addr2 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = Arc::new(AtomicUsize::new(0));
let num_clone = num.clone();
let num2_clone = num2.clone();
let h = thread::spawn(move || {
actix_rt::System::new().block_on(async {
let server = Server::build()
.backlog(1)
.disable_signals()
.configure(move |cfg| {
let num = num.clone();
let num2 = num2.clone();
cfg.bind("addr1", addr1)
.unwrap()
.bind("addr2", addr2)
.unwrap()
.apply(move |rt| {
let num = num.clone();
let num2 = num2.clone();
rt.service(
"addr1",
fn_factory(move || {
let num = num.clone();
async move { Ok::<_, ()>(TestService(num)) }
}),
);
rt.service(
"addr2",
fn_factory(move || {
let num2 = num2.clone();
async move { Ok::<_, ()>(TestService(num2)) }
}),
);
})
})
.unwrap()
.workers(1)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
for _ in 0..5 {
TcpStream::connect(addr1)
.await
.unwrap()
.shutdown()
.await
.unwrap();
TcpStream::connect(addr2)
.await
.unwrap()
.shutdown()
.await
.unwrap();
}
sleep(Duration::from_secs(3)).await;
assert!(num_clone.load(Ordering::SeqCst) > 5);
assert!(num2_clone.load(Ordering::SeqCst) > 5);
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
let addr1 = unused_addr();
let addr2 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = Arc::new(AtomicUsize::new(0));
let num_clone = num.clone();
let num2_clone = num2.clone();
let h = thread::spawn(move || {
let num = num.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.backlog(1)
.disable_signals()
.bind("addr1", addr1, move || {
let num = num.clone();
fn_factory(move || {
let num = num.clone();
async move { Ok::<_, ()>(TestService(num)) }
})
})
.unwrap()
.bind("addr2", addr2, move || {
let num2 = num2.clone();
fn_factory(move || {
let num2 = num2.clone();
async move { Ok::<_, ()>(TestService(num2)) }
})
})
.unwrap()
.workers(1)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
for _ in 0..5 {
TcpStream::connect(addr1)
.await
.unwrap()
.shutdown()
.await
.unwrap();
TcpStream::connect(addr2)
.await
.unwrap()
.shutdown()
.await
.unwrap();
}
sleep(Duration::from_secs(3)).await;
assert!(num_clone.load(Ordering::SeqCst) > 5);
assert!(num2_clone.load(Ordering::SeqCst) > 5);
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
}
#[ignore]
#[actix_rt::test]
async fn worker_restart() {
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
struct TestServiceFactory(Arc<AtomicUsize>);
impl ServiceFactory<TcpStream> for TestServiceFactory {
type Response = ();
type Error = ();
type Config = ();
type Service = TestService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Self::Config) -> Self::Future {
let counter = self.0.fetch_add(1, Ordering::Relaxed);
Box::pin(async move { Ok(TestService(counter)) })
}
}
struct TestService(usize);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: TcpStream) -> Self::Future {
let counter = self.0;
let mut stream = stream.into_std().unwrap();
use std::io::Write;
let str = counter.to_string();
let buf = str.as_bytes();
let mut written = 0;
while written < buf.len() {
if let Ok(n) = stream.write(&buf[written..]) {
written += n;
}
}
stream.flush().unwrap();
stream.shutdown(net::Shutdown::Write).unwrap();
// force worker 2 to restart service once.
if counter == 2 {
panic!("panic on purpose")
} else {
Box::pin(async { Ok(()) })
}
}
}
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
thread::spawn({
let bool = bool.clone();
move || {
actix_rt::System::new().block_on(async {
let srv = Server::build()
.backlog(100)
.disable_signals()
.on_stop(move || {
let bool = bool.clone();
async move {
bool.store(true, Ordering::SeqCst);
}
})
.bind("test", addr, move || {
fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
.unwrap()
.run();
let counter = Arc::new(AtomicUsize::new(1));
let h = thread::spawn(move || {
let counter = counter.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.disable_signals()
.bind("addr", addr, move || TestServiceFactory(counter.clone()))
.unwrap()
.workers(2)
.run();
tx.send(srv.clone()).unwrap();
srv.await
})
}
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let srv = rx.recv().unwrap();
let _ = srv.stop(false);
thread::sleep(time::Duration::from_millis(300));
assert!(bool.load(Ordering::SeqCst));
let (server, sys) = rx.recv().unwrap();
sleep(Duration::from_secs(3)).await;
let mut buf = [0; 8];
// worker 1 would not restart and return it's id consistently.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// worker 2 dead after return response.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("2", id);
stream.shutdown().await.unwrap();
// request to worker 1
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarting and work goes to worker 1.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarted but worker 1 was still the next to accept connection.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 accept connection again but it's id is 3.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("3", id);
stream.shutdown().await.unwrap();
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
}

View File

@ -3,6 +3,20 @@
## Unreleased - 2021-xx-xx
## 2.0.0 - 2021-04-16
* Removed pipeline and related structs/functions. [#335]
[#335]: https://github.com/actix/actix-net/pull/335
## 2.0.0-beta.5 - 2021-03-15
* Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
* Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
[#288]: https://github.com/actix/actix-net/pull/288
[#290]: https://github.com/actix/actix-net/pull/290
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]

View File

@ -1,6 +1,6 @@
[package]
name = "actix-service"
version = "2.0.0-beta.4"
version = "2.0.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
@ -8,11 +8,8 @@ authors = [
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-service"
readme = "README.md"
categories = ["network-programming", "asynchronous"]
repository = "https://github.com/actix/actix-net"
license = "MIT OR Apache-2.0"
edition = "2018"
@ -22,8 +19,10 @@ path = "src/lib.rs"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
paste = "1"
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "2.0.0"
actix-utils = "3.0.0"
futures-util = { version = "0.3.7", default-features = false }

View File

@ -3,11 +3,11 @@
> Service trait and combinators for representing asynchronous request/response operations.
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0-beta.4)](https://docs.rs/actix-service/2.0.0-beta.4)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0)](https://docs.rs/actix-service/2.0.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0-beta.4/status.svg)](https://deps.rs/crate/actix-service/2.0.0-beta.4)
[![Download](https://img.shields.io/crates/d/actix-service.svg)](https://crates.io/crates/actix-service)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0/status.svg)](https://deps.rs/crate/actix-service/2.0.0)
![Download](https://img.shields.io/crates/d/actix-service.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@ -11,11 +11,11 @@ use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end
/// of another service which completes successfully.
/// Service for the `and_then` combinator, chaining a computation onto the end of another service
/// which completes successfully.
///
/// This is created by the `Pipeline::and_then` method.
pub(crate) struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
pub struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
@ -64,7 +64,7 @@ where
}
pin_project! {
pub(crate) struct AndThenServiceResponse<A, B, Req>
pub struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
@ -117,7 +117,7 @@ where
}
/// `.and_then()` service factory combinator
pub(crate) struct AndThenServiceFactory<A, B, Req>
pub struct AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
@ -200,7 +200,7 @@ where
}
pin_project! {
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
pub struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
@ -272,7 +272,9 @@ mod tests {
use futures_util::future::lazy;
use crate::{
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
fn_factory, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);

View File

@ -214,7 +214,11 @@ mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{ok, pipeline, pipeline_factory, Ready, Service, ServiceFactory};
use crate::{
ok,
pipeline::{pipeline, pipeline_factory},
Ready, Service, ServiceFactory,
};
#[derive(Clone)]
struct Srv;

View File

@ -1,21 +1,69 @@
use alloc::boxed::Box;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
//! Trait object forms of services and service factories.
use alloc::{boxed::Box, rc::Rc};
use core::{future::Future, pin::Pin};
use paste::paste;
use crate::{Service, ServiceFactory};
/// A boxed future with no send bound or lifetime parameters.
pub type BoxFuture<T> = Pin<Box<dyn Future<Output = T>>>;
pub type BoxService<Req, Res, Err> =
Box<dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>>;
macro_rules! service_object {
($name: ident, $type: tt, $fn_name: ident) => {
paste! {
#[doc = "Type alias for service trait object using `" $type "`."]
pub type $name<Req, Res, Err> = $type<
dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>,
>;
#[doc = "Wraps service as a trait object using [`" $name "`]."]
pub fn $fn_name<S, Req>(service: S) -> $name<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
$type::new(ServiceWrapper::new(service))
}
}
};
}
service_object!(BoxService, Box, service);
service_object!(RcService, Rc, rc_service);
struct ServiceWrapper<S> {
inner: S,
}
impl<S> ServiceWrapper<S> {
fn new(inner: S) -> Self {
Self { inner }
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
crate::forward_ready!(inner);
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.inner.call(req))
}
}
/// Wrapper for a service factory that will map it's services to boxed trait object services.
pub struct BoxServiceFactory<Cfg, Req, Res, Err, InitErr>(Inner<Cfg, Req, Res, Err, InitErr>);
/// Create boxed service factory
/// Wraps a service factory that returns service trait objects.
pub fn factory<SF, Req>(
factory: SF,
) -> BoxServiceFactory<SF::Config, Req, SF::Response, SF::Error, SF::InitError>
@ -28,20 +76,7 @@ where
SF::Error: 'static,
SF::InitError: 'static,
{
BoxServiceFactory(Box::new(FactoryWrapper {
factory,
_t: PhantomData,
}))
}
/// Create boxed service
pub fn service<S, Req>(service: S) -> BoxService<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
Box::new(ServiceWrapper(service, PhantomData))
BoxServiceFactory(Box::new(FactoryWrapper(factory)))
}
type Inner<C, Req, Res, Err, InitErr> = Box<
@ -66,9 +101,9 @@ where
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = C;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, InitErr>>;
@ -77,12 +112,9 @@ where
}
}
struct FactoryWrapper<SF, Req, Cfg> {
factory: SF,
_t: PhantomData<(Req, Cfg)>,
}
struct FactoryWrapper<SF>(SF);
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF, Req, Cfg>
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF>
where
Req: 'static,
Res: 'static,
@ -95,47 +127,13 @@ where
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = Cfg;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, Self::InitError>>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let fut = self.factory.new_service(cfg);
Box::pin(async {
let res = fut.await;
res.map(ServiceWrapper::boxed)
})
}
}
struct ServiceWrapper<S: Service<Req>, Req>(S, PhantomData<Req>);
impl<S, Req> ServiceWrapper<S, Req>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
fn boxed(service: S) -> BoxService<Req, S::Response, S::Error> {
Box::new(ServiceWrapper(service, PhantomData))
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S, Req>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.0.call(req))
let f = self.0.new_service(cfg);
Box::pin(async { f.await.map(|s| Box::new(ServiceWrapper::new(s)) as _) })
}
}

View File

@ -1,5 +1,12 @@
use crate::{dev, Service, ServiceFactory};
use crate::{
and_then::{AndThenService, AndThenServiceFactory},
map::Map,
map_err::MapErr,
transform_err::TransformMapInitErr,
IntoService, IntoServiceFactory, Service, ServiceFactory, Transform,
};
/// An extension trait for [`Service`]s that provides a variety of convenient adapters.
pub trait ServiceExt<Req>: Service<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
@ -10,12 +17,12 @@ pub trait ServiceExt<Req>: Service<Req> {
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
fn map<F, R>(self, f: F) -> dev::Map<Self, F, Req, R>
fn map<F, R>(self, f: F) -> Map<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R,
{
dev::Map::new(self, f)
Map::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
@ -26,17 +33,34 @@ pub trait ServiceExt<Req>: Service<Req> {
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
fn map_err<F, E>(self, f: F) -> dev::MapErr<Self, Req, F, E>
fn map_err<F, E>(self, f: F) -> MapErr<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E,
{
dev::MapErr::new(self, f)
MapErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
///
/// This function can be used to chain two services together and ensure that the second service
/// isn't called until call to the fist service have finished. Result of the call to the first
/// service is used as an input parameter for the second service's call.
///
/// Note that this function consumes the receiving service and returns a wrapped version of it.
fn and_then<I, S1>(self, service: I) -> AndThenService<Self, S1, Req>
where
Self: Sized,
I: IntoService<S1, Self::Response>,
S1: Service<Self::Response, Error = Self::Error>,
{
AndThenService::new(self, service.into_service())
}
}
impl<S, Req> ServiceExt<Req> for S where S: Service<Req> {}
/// An extension trait for [`ServiceFactory`]s that provides a variety of convenient adapters.
pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
@ -65,6 +89,36 @@ pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
{
crate::map_init_err::MapInitErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
fn and_then<I, SF1>(self, factory: I) -> AndThenServiceFactory<Self, SF1, Req>
where
Self: Sized,
Self::Config: Clone,
I: IntoServiceFactory<SF1, Self::Response>,
SF1: ServiceFactory<
Self::Response,
Config = Self::Config,
Error = Self::Error,
InitError = Self::InitError,
>,
{
AndThenServiceFactory::new(self, factory.into_factory())
}
}
impl<S, Req> ServiceFactoryExt<Req> for S where S: ServiceFactory<Req> {}
impl<SF, Req> ServiceFactoryExt<Req> for SF where SF: ServiceFactory<Req> {}
/// An extension trait for [`Transform`]s that provides a variety of convenient adapters.
pub trait TransformExt<S, Req>: Transform<S, Req> {
/// Return a new `Transform` whose init error is mapped to to a different type.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, Req> TransformExt<T, Req> for T where T: Transform<T, Req> {}

View File

@ -1,4 +1,4 @@
use core::{future::Future, marker::PhantomData, task::Poll};
use core::{future::Future, marker::PhantomData};
use crate::{ok, IntoService, IntoServiceFactory, Ready, Service, ServiceFactory};
@ -15,8 +15,7 @@ where
/// Create `ServiceFactory` for function that can produce services
///
/// # Example
///
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory, fn_service, Service, ServiceFactory};
@ -62,11 +61,10 @@ where
/// Create `ServiceFactory` for function that accepts config argument and can produce services
///
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could
/// act as a `ServiceFactory`.
///
/// # Example
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could act as
/// a `ServiceFactory`.
///
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory_with_config, fn_service, Service, ServiceFactory};

View File

@ -1,7 +1,8 @@
//! See [`Service`] docs for information on this crate's foundational trait.
#![no_std]
#![deny(rust_2018_idioms, nonstandard_style)]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
@ -21,6 +22,7 @@ mod apply_cfg;
pub mod boxed;
mod ext;
mod fn_service;
mod macros;
mod map;
mod map_config;
mod map_err;
@ -33,11 +35,10 @@ mod transform_err;
pub use self::apply::{apply_fn, apply_fn_factory};
pub use self::apply_cfg::{apply_cfg, apply_cfg_factory};
pub use self::ext::{ServiceExt, ServiceFactoryExt};
pub use self::ext::{ServiceExt, ServiceFactoryExt, TransformExt};
pub use self::fn_service::{fn_factory, fn_factory_with_config, fn_service};
pub use self::map_config::{map_config, unit_config};
pub use self::pipeline::{pipeline, pipeline_factory, Pipeline, PipelineFactory};
pub use self::transform::{apply, Transform};
pub use self::transform::{apply, ApplyTransform, Transform};
#[allow(unused_imports)]
use self::ready::{err, ok, ready, Ready};
@ -52,8 +53,14 @@ use self::ready::{err, ok, ready, Ready};
/// async fn(Request) -> Result<Response, Err>
/// ```
///
/// The `Service` trait just generalizes this form where each parameter is described as an
/// associated type on the trait. Services can also have mutable state that influence computation.
/// The `Service` trait just generalizes this form. Requests are defined as a generic type parameter
/// and responses and other details are defined as associated types on the trait impl. Notice that
/// this design means that services can receive many request types and converge them to a single
/// response type.
///
/// Services can also have mutable state that influence computation by using a `Cell`, `RefCell`
/// or `Mutex`. Services intentionally do not take `&mut self` to reduce overhead in the
/// common cases.
///
/// `Service` provides a symmetric and uniform API; the same abstractions can be used to represent
/// both clients and servers. Services describe only _transformation_ operations which encourage
@ -63,11 +70,10 @@ use self::ready::{err, ok, ready, Ready};
/// ```ignore
/// struct MyService;
///
/// impl Service for MyService {
/// type Request = u8;
/// impl Service<u8> for MyService {
/// type Response = u64;
/// type Error = MyError;
/// type Future = Pin<Box<Future<Output=Result<Self::Response, Self::Error>>>>;
/// type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
///
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
///
@ -76,10 +82,13 @@ use self::ready::{err, ok, ready, Ready};
/// ```
///
/// Sometimes it is not necessary to implement the Service trait. For example, the above service
/// could be rewritten as a simple function and passed to [fn_service](fn_service()).
/// could be rewritten as a simple function and passed to [`fn_service`](fn_service()).
///
/// ```ignore
/// async fn my_service(req: u8) -> Result<u64, MyError>;
///
/// let svc = fn_service(my_service)
/// svc.call(123)
/// ```
pub trait Service<Req> {
/// Responses given by the service.
@ -93,40 +102,40 @@ pub trait Service<Req> {
/// Returns `Ready` when the service is able to process requests.
///
/// If the service is at capacity, then `Pending` is returned and the task
/// is notified when the service becomes ready again. This function is
/// expected to be called while on a task.
/// If the service is at capacity, then `Pending` is returned and the task is notified when the
/// service becomes ready again. This function is expected to be called while on a task.
///
/// This is a **best effort** implementation. False positives are permitted.
/// It is permitted for the service to return `Ready` from a `poll_ready`
/// call and the next invocation of `call` results in an error.
/// This is a best effort implementation. False positives are permitted. It is permitted for
/// the service to return `Ready` from a `poll_ready` call and the next invocation of `call`
/// results in an error.
///
/// # Notes
/// 1. `.poll_ready()` might be called on different task from actual service call.
/// 1. In case of chained services, `.poll_ready()` get called for all services at once.
/// 1. `poll_ready` might be called on a different task to `call`.
/// 1. In cases of chained services, `.poll_ready()` is called for all services at once.
fn poll_ready(&self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
/// Process the request and return the response asynchronously.
///
/// This function is expected to be callable off task. As such,
/// implementations should take care to not call `poll_ready`. If the
/// service is at capacity and the request is unable to be handled, the
/// returned `Future` should resolve to an error.
/// This function is expected to be callable off-task. As such, implementations of `call` should
/// take care to not call `poll_ready`. If the service is at capacity and the request is unable
/// to be handled, the returned `Future` should resolve to an error.
///
/// Calling `call` without calling `poll_ready` is permitted. The
/// implementation must be resilient to this fact.
/// Invoking `call` without first invoking `poll_ready` is permitted. Implementations must be
/// resilient to this fact.
fn call(&self, req: Req) -> Self::Future;
}
/// Factory for creating `Service`s.
///
/// Acts as a service factory. This is useful for cases where new `Service`s
/// must be produced. One case is a TCP server listener. The listener
/// accepts new TCP streams, obtains a new `Service` using the
/// `ServiceFactory` trait, and uses the new `Service` to process inbound
/// requests on that new TCP stream.
/// This is useful for cases where new `Service`s must be produced. One case is a TCP
/// server listener: a listener accepts new connections, constructs a new `Service` for each using
/// the `ServiceFactory` trait, and uses the new `Service` to process inbound requests on that new
/// connection.
///
/// `Config` is a service factory configuration type.
///
/// Simple factories may be able to use [`fn_factory`] or [`fn_factory_with_config`] to
/// reduce boilerplate.
pub trait ServiceFactory<Req> {
/// Responses given by the created services.
type Response;
@ -143,13 +152,14 @@ pub trait ServiceFactory<Req> {
/// Errors potentially raised while building a service.
type InitError;
/// The future of the `Service` instance.
/// The future of the `Service` instance.g
type Future: Future<Output = Result<Self::Service, Self::InitError>>;
/// Create and return a new service asynchronously.
fn new_service(&self, cfg: Self::Config) -> Self::Future;
}
// TODO: remove implement on mut reference.
impl<'a, S, Req> Service<Req> for &'a mut S
where
S: Service<Req> + 'a,
@ -167,6 +177,23 @@ where
}
}
impl<'a, S, Req> Service<Req> for &'a S
where
S: Service<Req> + 'a,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Box<S>
where
S: Service<Req> + ?Sized,
@ -184,24 +211,25 @@ where
}
}
impl<S, Req> Service<Req> for RefCell<S>
impl<S, Req> Service<Req> for Rc<S>
where
S: Service<Req>,
S: Service<Req> + ?Sized,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow().poll_ready(ctx)
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
self.borrow().call(request)
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Rc<RefCell<S>>
/// This impl is deprecated since v2 because the `Service` trait now receives shared reference.
impl<S, Req> Service<Req> for RefCell<S>
where
S: Service<Req>,
{
@ -294,44 +322,3 @@ where
{
tp.into_service()
}
pub mod dev {
pub use crate::apply::{Apply, ApplyFactory};
pub use crate::fn_service::{
FnService, FnServiceConfig, FnServiceFactory, FnServiceNoConfig,
};
pub use crate::map::{Map, MapServiceFactory};
pub use crate::map_config::{MapConfig, UnitConfig};
pub use crate::map_err::{MapErr, MapErrServiceFactory};
pub use crate::map_init_err::MapInitErr;
pub use crate::transform::ApplyTransform;
pub use crate::transform_err::TransformMapInitErr;
}
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
};
}
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}

181
actix-service/src/macros.rs Normal file
View File

@ -0,0 +1,181 @@
/// An implementation of [`poll_ready`]() that always signals readiness.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct IdentityService;
///
/// impl Service<u32> for IdentityService {
/// type Response = u32;
/// type Error = ();
/// type Future = Ready<Result<Self::Response, Self::Error>>;
///
/// actix_service::always_ready!();
///
/// fn call(&self, req: u32) -> Self::Future {
/// ready(Ok(req))
/// }
/// }
/// ```
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
::core::task::Poll::Ready(Ok(()))
}
};
}
/// An implementation of [`poll_ready`] that forwards readiness checks to a
/// named struct field.
///
/// Tuple structs are not supported.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct WrapperService<S> {
/// inner: S,
/// }
///
/// impl<S> Service<()> for WrapperService<S>
/// where
/// S: Service<()>,
/// {
/// type Response = S::Response;
/// type Error = S::Error;
/// type Future = S::Future;
///
/// actix_service::forward_ready!(inner);
///
/// fn call(&self, req: ()) -> Self::Future {
/// self.inner.call(req)
/// }
/// }
/// ```
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}
#[cfg(test)]
mod tests {
use core::{
cell::Cell,
convert::Infallible,
task::{self, Context, Poll},
};
use futures_util::{
future::{ready, Ready},
task::noop_waker,
};
use crate::Service;
struct IdentityService;
impl Service<u32> for IdentityService {
type Response = u32;
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
always_ready!();
fn call(&self, req: u32) -> Self::Future {
ready(Ok(req))
}
}
struct CountdownService(Cell<u32>);
impl Service<()> for CountdownService {
type Response = ();
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let count = self.0.get();
if count == 0 {
Poll::Ready(Ok(()))
} else {
self.0.set(count - 1);
cx.waker().wake_by_ref();
Poll::Pending
}
}
fn call(&self, _: ()) -> Self::Future {
ready(Ok(()))
}
}
struct WrapperService<S> {
inner: S,
}
impl<S> Service<()> for WrapperService<S>
where
S: Service<()>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
forward_ready!(inner);
fn call(&self, _: ()) -> Self::Future {
self.inner.call(())
}
}
#[test]
fn test_always_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = IdentityService;
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
}
#[test]
fn test_forward_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = WrapperService {
inner: CountdownService(Cell::new(3)),
};
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_ready());
}
}

View File

@ -180,7 +180,7 @@ where
F: Fn(A::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrServiceFuture { f, fut }
MapErrServiceFuture { fut, f }
}
}

View File

@ -1,3 +1,6 @@
// TODO: see if pipeline is necessary
#![allow(dead_code)]
use core::{
marker::PhantomData,
task::{Context, Poll},
@ -11,7 +14,7 @@ use crate::then::{ThenService, ThenServiceFactory};
use crate::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Construct new pipeline with one service in pipeline chain.
pub fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
pub(crate) fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
where
I: IntoService<S, Req>,
S: Service<Req>,
@ -23,7 +26,7 @@ where
}
/// Construct new pipeline factory with one service factory.
pub fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
pub(crate) fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
@ -35,7 +38,7 @@ where
}
/// Pipeline service - pipeline allows to compose multiple service into one service.
pub struct Pipeline<S, Req> {
pub(crate) struct Pipeline<S, Req> {
service: S,
_phantom: PhantomData<Req>,
}
@ -157,7 +160,7 @@ impl<S: Service<Req>, Req> Service<Req> for Pipeline<S, Req> {
}
/// Pipeline factory
pub struct PipelineFactory<SF, Req> {
pub(crate) struct PipelineFactory<SF, Req> {
factory: SF,
_phantom: PhantomData<Req>,
}

View File

@ -246,7 +246,11 @@ mod tests {
use futures_util::future::lazy;
use crate::{err, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory};
use crate::{
err, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
#[derive(Clone)]
struct Srv1(Rc<Cell<usize>>);

View File

@ -9,10 +9,9 @@ use core::{
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::transform_err::TransformMapInitErr;
use crate::{IntoServiceFactory, Service, ServiceFactory};
/// Apply transform to a service.
/// Apply a [`Transform`] to a [`Service`].
pub fn apply<T, S, I, Req>(t: T, factory: I) -> ApplyTransform<T, S, Req>
where
I: IntoServiceFactory<S, Req>,
@ -22,14 +21,12 @@ where
ApplyTransform::new(t, factory.into_factory())
}
/// The `Transform` trait defines the interface of a service factory that wraps inner service
/// during construction.
/// Defines the interface of a service factory that wraps inner service during construction.
///
/// Transform(middleware) wraps inner service and runs during
/// inbound and/or outbound processing in the request/response lifecycle.
/// It may modify request and/or response.
/// Transformers wrap an inner service and runs during inbound and/or outbound processing in the
/// service lifecycle. It may modify request and/or response.
///
/// For example, timeout transform:
/// For example, a timeout service wrapper:
///
/// ```ignore
/// pub struct Timeout<S> {
@ -37,11 +34,7 @@ where
/// timeout: Duration,
/// }
///
/// impl<S> Service for Timeout<S>
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type Future = TimeoutServiceResponse<S>;
@ -51,33 +44,28 @@ where
/// fn call(&self, req: S::Request) -> Self::Future {
/// TimeoutServiceResponse {
/// fut: self.service.call(req),
/// sleep: Delay::new(clock::now() + self.timeout),
/// sleep: Sleep::new(clock::now() + self.timeout),
/// }
/// }
/// }
/// ```
///
/// Timeout service in above example is decoupled from underlying service implementation
/// and could be applied to any service.
/// This wrapper service is decoupled from the underlying service implementation and could be
/// applied to any service.
///
/// The `Transform` trait defines the interface of a Service factory. `Transform`
/// is often implemented for middleware, defining how to construct a
/// middleware Service. A Service that is constructed by the factory takes
/// the Service that follows it during execution as a parameter, assuming
/// ownership of the next Service.
/// The `Transform` trait defines the interface of a service wrapper. `Transform` is often
/// implemented for middleware, defining how to construct a middleware Service. A Service that is
/// constructed by the factory takes the Service that follows it during execution as a parameter,
/// assuming ownership of the next Service.
///
/// Factory for `Timeout` middleware from the above example could look like this:
/// A transform for the `Timeout` middleware could look like this:
///
/// ```ignore
/// pub struct TimeoutTransform {
/// timeout: Duration,
/// }
///
/// impl<S> Transform<S> for TimeoutTransform
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type InitError = S::Error;
@ -85,15 +73,15 @@ where
/// type Future = Ready<Result<Self::Transform, Self::InitError>>;
///
/// fn new_transform(&self, service: S) -> Self::Future {
/// ok(TimeoutService {
/// ready(Ok(Timeout {
/// service,
/// timeout: self.timeout,
/// })
/// }))
/// }
/// }
/// ```
pub trait Transform<S, Req> {
/// Responses given by the service.
/// Responses produced by the service.
type Response;
/// Errors produced by the service.
@ -110,16 +98,6 @@ pub trait Transform<S, Req> {
/// Creates and returns a new Transform component, asynchronously
fn new_transform(&self, service: S) -> Self::Future;
/// Map this transform's factory error to a different error,
/// returning a new transform service factory.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, S, Req> Transform<S, Req> for Rc<T>
@ -152,7 +130,7 @@ where
}
}
/// `Apply` transform to new service
/// Apply a [`Transform`] to a [`Service`].
pub struct ApplyTransform<T, S, Req>(Rc<(T, S)>, PhantomData<Req>);
impl<T, S, Req> ApplyTransform<T, S, Req>
@ -240,3 +218,53 @@ where
}
}
}
#[cfg(test)]
mod tests {
use core::time::Duration;
use actix_utils::future::{ready, Ready};
use super::*;
use crate::Service;
// pseudo-doctest for Transform trait
pub struct TimeoutTransform {
timeout: Duration,
}
// pseudo-doctest for Transform trait
impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
type Response = S::Response;
type Error = S::Error;
type InitError = S::Error;
type Transform = Timeout<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(Timeout {
service,
_timeout: self.timeout,
}))
}
}
// pseudo-doctest for Transform trait
pub struct Timeout<S> {
service: S,
_timeout: Duration,
}
// pseudo-doctest for Transform trait
impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
self.service.call(req)
}
}
}

View File

@ -9,10 +9,8 @@ use pin_project_lite::pin_project;
use super::Transform;
/// Transform for the `map_init_err` combinator, changing the type of a new
/// transform's init error.
///
/// This is created by the `Transform::map_init_err` method.
/// Transform for the [`TransformExt::map_init_err`] combinator, changing the type of a new
/// [`Transform`]'s initialization error.
pub struct TransformMapInitErr<T, S, Req, F, E> {
transform: T,
mapper: F,

View File

@ -3,6 +3,30 @@
## Unreleased - 2021-xx-xx
## 3.0.0-beta.5 - 2021-03-29
* Changed `connect::ssl::rustls::RustlsConnectorService` to return error when `DNSNameRef`
generation failed instead of panic. [#296]
* Remove `connect::ssl::openssl::OpensslConnectServiceFactory`. [#297]
* Remove `connect::ssl::openssl::OpensslConnectService`. [#297]
* Add `connect::ssl::native_tls` module for native tls support. [#295]
* Rename `accept::{nativetls => native_tls}`. [#295]
* Remove `connect::TcpConnectService` type. service caller expect a `TcpStream` should use
`connect::ConnectService` instead and call `Connection<T, TcpStream>::into_parts`. [#299]
[#295]: https://github.com/actix/actix-net/pull/295
[#296]: https://github.com/actix/actix-net/pull/296
[#297]: https://github.com/actix/actix-net/pull/297
[#299]: https://github.com/actix/actix-net/pull/299
## 3.0.0-beta.4 - 2021-02-24
* Rename `accept::openssl::{SslStream => TlsStream}`.
* Add `connect::Connect::set_local_addr` to attach local `IpAddr`. [#282]
* `connector::TcpConnector` service will try to bind to local_addr of `IpAddr` when given. [#282]
[#282]: https://github.com/actix/actix-net/pull/282
## 3.0.0-beta.3 - 2021-02-06
* Remove `trust-dns-proto` and `trust-dns-resolver`. [#248]
* Use `std::net::ToSocketAddrs` as simple and basic default resolver. [#248]

View File

@ -1,6 +1,6 @@
[package]
name = "actix-tls"
version = "3.0.0-beta.3"
version = "3.0.0-beta.5"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "TLS acceptor and connector services for Actix ecosystem"
keywords = ["network", "tls", "ssl", "async", "transport"]
@ -41,9 +41,9 @@ uri = ["http"]
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
actix-rt = { version = "2.2.0", default-features = false }
actix-service = "2.0.0"
actix-utils = "3.0.0"
derive_more = "0.99.5"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
@ -52,7 +52,7 @@ log = "0.4"
tokio-util = { version = "0.6.3", default-features = false }
# openssl
tls-openssl = { package = "openssl", version = "0.10", optional = true }
tls-openssl = { package = "openssl", version = "0.10.9", optional = true }
tokio-openssl = { version = "0.6", optional = true }
# rustls
@ -63,8 +63,8 @@ webpki-roots = { version = "0.21", optional = true }
tokio-native-tls = { version = "0.3", optional = true }
[dev-dependencies]
actix-rt = "2.0.0"
actix-server = "2.0.0-beta.3"
actix-rt = "2.2.0"
actix-server = "2.0.0-beta.5"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
@ -72,5 +72,5 @@ log = "0.4"
trust-dns-resolver = "0.20.0"
[[example]]
name = "basic"
name = "tcp-rustls"
required-features = ["accept", "rustls"]

View File

@ -29,23 +29,19 @@ use std::{
},
};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use actix_tls::accept::rustls::Acceptor as RustlsAcceptor;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::rustls::{Acceptor as RustlsAcceptor, TlsStream};
use futures_util::future::ok;
use log::info;
use rustls::{
internal::pemfile::certs, internal::pemfile::rsa_private_keys, NoClientAuth, ServerConfig,
};
#[derive(Debug)]
struct ServiceState {
num: Arc<AtomicUsize>,
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env::set_var("RUST_LOG", "info");
env_logger::init();
let mut tls_config = ServerConfig::new(NoClientAuth::new());
@ -72,11 +68,12 @@ async fn main() -> io::Result<()> {
let count = Arc::clone(&count);
// Set up TLS service factory
pipeline_factory(tls_acceptor.clone())
tls_acceptor
.clone()
.map_err(|err| println!("Rustls error: {:?}", err))
.and_then(move |stream| {
.and_then(move |stream: TlsStream<TcpStream>| {
let num = count.fetch_add(1, Ordering::Relaxed);
info!("[{}] Got TLS connection: {:?}", num, stream);
info!("[{}] Got TLS connection: {:?}", num, &*stream);
ok(())
})
})?

View File

@ -16,7 +16,7 @@ pub mod openssl;
pub mod rustls;
#[cfg(feature = "native-tls")]
pub mod nativetls;
pub mod native_tls;
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);

View File

@ -0,0 +1,163 @@
use std::{
io::{self, IoSlice},
ops::{Deref, DerefMut},
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::{ActixStream, Ready};
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
pub use tokio_native_tls::native_tls::Error;
pub use tokio_native_tls::TlsAcceptor;
use super::MAX_CONN_COUNTER;
/// Wrapper type for `tokio_native_tls::TlsStream` in order to impl `ActixStream` trait.
pub struct TlsStream<T>(tokio_native_tls::TlsStream<T>);
impl<T> From<tokio_native_tls::TlsStream<T>> for TlsStream<T> {
fn from(stream: tokio_native_tls::TlsStream<T>) -> Self {
Self(stream)
}
}
impl<T: ActixStream> Deref for TlsStream<T> {
type Target = tokio_native_tls::TlsStream<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: ActixStream> DerefMut for TlsStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: ActixStream> AsyncRead for TlsStream<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
}
}
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(&**self).is_write_vectored()
}
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
T::poll_read_ready((&**self).get_ref().get_ref().get_ref(), cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
T::poll_write_ready((&**self).get_ref().get_ref().get_ref(), cx)
}
}
/// Accept TLS connections via `native-tls` package.
///
/// `native-tls` feature enables this `Acceptor` type.
pub struct Acceptor {
acceptor: TlsAcceptor,
}
impl Acceptor {
/// Create `native-tls` based `Acceptor` service factory.
#[inline]
pub fn new(acceptor: TlsAcceptor) -> Self {
Acceptor { acceptor }
}
}
impl Clone for Acceptor {
#[inline]
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
}
}
}
impl<T: ActixStream + 'static> ServiceFactory<T> for Acceptor {
type Response = TlsStream<T>;
type Error = Error;
type Config = ();
type Service = NativeTlsAcceptorService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let res = MAX_CONN_COUNTER.with(|conns| {
Ok(NativeTlsAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
})
});
Box::pin(async { res })
}
}
pub struct NativeTlsAcceptorService {
acceptor: TlsAcceptor,
conns: Counter,
}
impl<T: ActixStream + 'static> Service<T> for NativeTlsAcceptorService {
type Response = TlsStream<T>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<TlsStream<T>, Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.conns.available(cx) {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&self, io: T) -> Self::Future {
let guard = self.conns.get();
let acceptor = self.acceptor.clone();
Box::pin(async move {
let io = acceptor.accept(io).await;
drop(guard);
io.map(Into::into)
})
}
}

View File

@ -1,99 +0,0 @@
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
pub use tokio_native_tls::native_tls::Error;
pub use tokio_native_tls::{TlsAcceptor, TlsStream};
use super::MAX_CONN_COUNTER;
/// Accept TLS connections via `native-tls` package.
///
/// `native-tls` feature enables this `Acceptor` type.
pub struct Acceptor {
acceptor: TlsAcceptor,
}
impl Acceptor {
/// Create `native-tls` based `Acceptor` service factory.
#[inline]
pub fn new(acceptor: TlsAcceptor) -> Self {
Acceptor { acceptor }
}
}
impl Clone for Acceptor {
#[inline]
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
}
}
}
impl<T> ServiceFactory<T> for Acceptor
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Response = TlsStream<T>;
type Error = Error;
type Config = ();
type Service = NativeTlsAcceptorService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let res = MAX_CONN_COUNTER.with(|conns| {
Ok(NativeTlsAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
})
});
Box::pin(async { res })
}
}
pub struct NativeTlsAcceptorService {
acceptor: TlsAcceptor,
conns: Counter,
}
impl Clone for NativeTlsAcceptorService {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
conns: self.conns.clone(),
}
}
}
impl<T> Service<T> for NativeTlsAcceptorService
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Response = TlsStream<T>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<TlsStream<T>, Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.conns.available(cx) {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&self, io: T) -> Self::Future {
let guard = self.conns.get();
let this = self.clone();
Box::pin(async move {
let io = this.acceptor.accept(io).await;
drop(guard);
io
})
}
}

View File

@ -1,10 +1,13 @@
use std::{
future::Future,
io::{self, IoSlice},
ops::{Deref, DerefMut},
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::{ActixStream, Ready};
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::{Counter, CounterGuard};
use futures_core::{future::LocalBoxFuture, ready};
@ -12,10 +15,82 @@ use futures_core::{future::LocalBoxFuture, ready};
pub use openssl::ssl::{
AlpnError, Error as SslError, HandshakeError, Ssl, SslAcceptor, SslAcceptorBuilder,
};
pub use tokio_openssl::SslStream;
use super::MAX_CONN_COUNTER;
/// Wrapper type for `tokio_openssl::SslStream` in order to impl `ActixStream` trait.
pub struct TlsStream<T>(tokio_openssl::SslStream<T>);
impl<T> From<tokio_openssl::SslStream<T>> for TlsStream<T> {
fn from(stream: tokio_openssl::SslStream<T>) -> Self {
Self(stream)
}
}
impl<T> Deref for TlsStream<T> {
type Target = tokio_openssl::SslStream<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for TlsStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: ActixStream> AsyncRead for TlsStream<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
}
}
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(&**self).is_write_vectored()
}
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
T::poll_read_ready((&**self).get_ref(), cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
T::poll_write_ready((&**self).get_ref(), cx)
}
}
/// Accept TLS connections via `openssl` package.
///
/// `openssl` feature enables this `Acceptor` type.
@ -40,11 +115,8 @@ impl Clone for Acceptor {
}
}
impl<T> ServiceFactory<T> for Acceptor
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Response = SslStream<T>;
impl<T: ActixStream> ServiceFactory<T> for Acceptor {
type Response = TlsStream<T>;
type Error = SslError;
type Config = ();
type Service = AcceptorService;
@ -67,11 +139,8 @@ pub struct AcceptorService {
conns: Counter,
}
impl<T> Service<T> for AcceptorService
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Response = SslStream<T>;
impl<T: ActixStream> Service<T> for AcceptorService {
type Response = TlsStream<T>;
type Error = SslError;
type Future = AcceptorServiceResponse<T>;
@ -88,24 +157,25 @@ where
let ssl = Ssl::new(ssl_ctx).expect("Provided SSL acceptor was invalid.");
AcceptorServiceResponse {
_guard: self.conns.get(),
stream: Some(SslStream::new(ssl, io).unwrap()),
stream: Some(tokio_openssl::SslStream::new(ssl, io).unwrap()),
}
}
}
pub struct AcceptorServiceResponse<T>
where
T: AsyncRead + AsyncWrite,
{
stream: Option<SslStream<T>>,
pub struct AcceptorServiceResponse<T: ActixStream> {
stream: Option<tokio_openssl::SslStream<T>>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite + Unpin> Future for AcceptorServiceResponse<T> {
type Output = Result<SslStream<T>, SslError>;
impl<T: ActixStream> Future for AcceptorServiceResponse<T> {
type Output = Result<TlsStream<T>, SslError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
ready!(Pin::new(self.stream.as_mut().unwrap()).poll_accept(cx))?;
Poll::Ready(Ok(self.stream.take().expect("SSL connect has resolved.")))
Poll::Ready(Ok(self
.stream
.take()
.expect("SSL connect has resolved.")
.into()))
}
}

View File

@ -1,22 +1,96 @@
use std::{
future::Future,
io,
io::{self, IoSlice},
ops::{Deref, DerefMut},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::{ActixStream, Ready};
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::{Counter, CounterGuard};
use futures_core::future::LocalBoxFuture;
use tokio_rustls::{Accept, TlsAcceptor};
pub use tokio_rustls::rustls::{ServerConfig, Session};
pub use tokio_rustls::server::TlsStream;
use super::MAX_CONN_COUNTER;
/// Wrapper type for `tokio_openssl::SslStream` in order to impl `ActixStream` trait.
pub struct TlsStream<T>(tokio_rustls::server::TlsStream<T>);
impl<T> From<tokio_rustls::server::TlsStream<T>> for TlsStream<T> {
fn from(stream: tokio_rustls::server::TlsStream<T>) -> Self {
Self(stream)
}
}
impl<T> Deref for TlsStream<T> {
type Target = tokio_rustls::server::TlsStream<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for TlsStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: ActixStream> AsyncRead for TlsStream<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
}
}
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(&**self).is_write_vectored()
}
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
T::poll_read_ready((&**self).get_ref().0, cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
T::poll_write_ready((&**self).get_ref().0, cx)
}
}
/// Accept TLS connections via `rustls` package.
///
/// `rustls` feature enables this `Acceptor` type.
@ -43,10 +117,7 @@ impl Clone for Acceptor {
}
}
impl<T> ServiceFactory<T> for Acceptor
where
T: AsyncRead + AsyncWrite + Unpin,
{
impl<T: ActixStream> ServiceFactory<T> for Acceptor {
type Response = TlsStream<T>;
type Error = io::Error;
type Config = ();
@ -72,10 +143,7 @@ pub struct AcceptorService {
conns: Counter,
}
impl<T> Service<T> for AcceptorService
where
T: AsyncRead + AsyncWrite + Unpin,
{
impl<T: ActixStream> Service<T> for AcceptorService {
type Response = TlsStream<T>;
type Error = io::Error;
type Future = AcceptorServiceFut<T>;
@ -96,22 +164,16 @@ where
}
}
pub struct AcceptorServiceFut<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
pub struct AcceptorServiceFut<T: ActixStream> {
fut: Accept<T>,
_guard: CounterGuard,
}
impl<T> Future for AcceptorServiceFut<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
impl<T: ActixStream> Future for AcceptorServiceFut<T> {
type Output = Result<TlsStream<T>, io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
Pin::new(&mut this.fut).poll(cx)
Pin::new(&mut this.fut).poll(cx).map_ok(TlsStream)
}
}

View File

@ -3,7 +3,7 @@ use std::{
fmt,
iter::{self, FromIterator as _},
mem,
net::SocketAddr,
net::{IpAddr, SocketAddr},
};
/// Parse a host into parts (hostname and port).
@ -67,6 +67,7 @@ pub struct Connect<T> {
pub(crate) req: T,
pub(crate) port: u16,
pub(crate) addr: ConnectAddrs,
pub(crate) local_addr: Option<IpAddr>,
}
impl<T: Address> Connect<T> {
@ -78,6 +79,7 @@ impl<T: Address> Connect<T> {
req,
port: port.unwrap_or(0),
addr: ConnectAddrs::None,
local_addr: None,
}
}
@ -88,6 +90,7 @@ impl<T: Address> Connect<T> {
req,
port: 0,
addr: ConnectAddrs::One(addr),
local_addr: None,
}
}
@ -119,6 +122,12 @@ impl<T: Address> Connect<T> {
self
}
/// Set local_addr of connect.
pub fn set_local_addr(mut self, addr: impl Into<IpAddr>) -> Self {
self.local_addr = Some(addr.into());
self
}
/// Get hostname.
pub fn hostname(&self) -> &str {
self.req.hostname()
@ -285,7 +294,7 @@ fn parse_host(host: &str) -> (&str, Option<u16>) {
#[cfg(test)]
mod tests {
use std::net::{IpAddr, Ipv4Addr};
use std::net::Ipv4Addr;
use super::*;
@ -329,4 +338,13 @@ mod tests {
let mut iter = ConnectAddrsIter::None;
assert_eq!(iter.next(), None);
}
#[test]
fn test_local_addr() {
let conn = Connect::new("hello").set_local_addr([127, 0, 0, 1]);
assert_eq!(
conn.local_addr.unwrap(),
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))
)
}
}

View File

@ -2,12 +2,12 @@ use std::{
collections::VecDeque,
future::Future,
io,
net::SocketAddr,
net::{IpAddr, SocketAddr, SocketAddrV4, SocketAddrV6},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::net::TcpStream;
use actix_rt::net::{TcpSocket, TcpStream};
use actix_service::{Service, ServiceFactory};
use futures_core::{future::LocalBoxFuture, ready};
use log::{error, trace};
@ -54,9 +54,14 @@ impl<T: Address> Service<Connect<T>> for TcpConnector {
fn call(&self, req: Connect<T>) -> Self::Future {
let port = req.port();
let Connect { req, addr, .. } = req;
let Connect {
req,
addr,
local_addr,
..
} = req;
TcpConnectorResponse::new(req, port, addr)
TcpConnectorResponse::new(req, port, local_addr, addr)
}
}
@ -65,14 +70,20 @@ pub enum TcpConnectorResponse<T> {
Response {
req: Option<T>,
port: u16,
local_addr: Option<IpAddr>,
addrs: Option<VecDeque<SocketAddr>>,
stream: Option<ReusableBoxFuture<Result<TcpStream, io::Error>>>,
stream: ReusableBoxFuture<Result<TcpStream, io::Error>>,
},
Error(Option<ConnectError>),
}
impl<T: Address> TcpConnectorResponse<T> {
pub(crate) fn new(req: T, port: u16, addr: ConnectAddrs) -> TcpConnectorResponse<T> {
pub(crate) fn new(
req: T,
port: u16,
local_addr: Option<IpAddr>,
addr: ConnectAddrs,
) -> TcpConnectorResponse<T> {
if addr.is_none() {
error!("TCP connector: unresolved connection address");
return TcpConnectorResponse::Error(Some(ConnectError::Unresolved));
@ -90,18 +101,24 @@ impl<T: Address> TcpConnectorResponse<T> {
ConnectAddrs::One(addr) => TcpConnectorResponse::Response {
req: Some(req),
port,
local_addr,
addrs: None,
stream: Some(ReusableBoxFuture::new(TcpStream::connect(addr))),
stream: ReusableBoxFuture::new(connect(addr, local_addr)),
},
// when resolver returns multiple socket addr for request they would be popped from
// front end of queue and returns with the first successful tcp connection.
ConnectAddrs::Multi(addrs) => TcpConnectorResponse::Response {
req: Some(req),
port,
addrs: Some(addrs),
stream: None,
},
ConnectAddrs::Multi(mut addrs) => {
let addr = addrs.pop_front().unwrap();
TcpConnectorResponse::Response {
req: Some(req),
port,
local_addr,
addrs: Some(addrs),
stream: ReusableBoxFuture::new(connect(addr, local_addr)),
}
}
}
}
}
@ -116,43 +133,62 @@ impl<T: Address> Future for TcpConnectorResponse<T> {
TcpConnectorResponse::Response {
req,
port,
local_addr,
addrs,
stream,
} => loop {
if let Some(new) = stream.as_mut() {
match ready!(new.poll(cx)) {
Ok(sock) => {
let req = req.take().unwrap();
trace!(
"TCP connector: successfully connected to {:?} - {:?}",
req.hostname(),
sock.peer_addr()
);
return Poll::Ready(Ok(Connection::new(sock, req)));
}
match ready!(stream.poll(cx)) {
Ok(sock) => {
let req = req.take().unwrap();
trace!(
"TCP connector: successfully connected to {:?} - {:?}",
req.hostname(),
sock.peer_addr()
);
return Poll::Ready(Ok(Connection::new(sock, req)));
}
Err(err) => {
trace!(
"TCP connector: failed to connect to {:?} port: {}",
req.as_ref().unwrap().hostname(),
port,
);
Err(err) => {
trace!(
"TCP connector: failed to connect to {:?} port: {}",
req.as_ref().unwrap().hostname(),
port,
);
if addrs.is_none() || addrs.as_ref().unwrap().is_empty() {
return Poll::Ready(Err(ConnectError::Io(err)));
}
if let Some(addr) = addrs.as_mut().and_then(|addrs| addrs.pop_front()) {
stream.set(connect(addr, *local_addr));
} else {
return Poll::Ready(Err(ConnectError::Io(err)));
}
}
}
// try to connect
let addr = addrs.as_mut().unwrap().pop_front().unwrap();
match stream {
Some(rbf) => rbf.set(TcpStream::connect(addr)),
None => *stream = Some(ReusableBoxFuture::new(TcpStream::connect(addr))),
}
},
}
}
}
async fn connect(addr: SocketAddr, local_addr: Option<IpAddr>) -> io::Result<TcpStream> {
// use local addr if connect asks for it.
match local_addr {
Some(ip_addr) => {
let socket = match ip_addr {
IpAddr::V4(ip_addr) => {
let socket = TcpSocket::new_v4()?;
let addr = SocketAddr::V4(SocketAddrV4::new(ip_addr, 0));
socket.bind(addr)?;
socket
}
IpAddr::V6(ip_addr) => {
let socket = TcpSocket::new_v6()?;
let addr = SocketAddr::V6(SocketAddrV6::new(ip_addr, 0, 0, 0));
socket.bind(addr)?;
socket
}
};
socket.connect(addr).await
}
None => TcpStream::connect(addr).await,
}
}

View File

@ -26,20 +26,20 @@ pub mod ssl;
mod uri;
use actix_rt::net::TcpStream;
use actix_service::{pipeline, pipeline_factory, Service, ServiceFactory};
use actix_service::{Service, ServiceFactory};
pub use self::connect::{Address, Connect, Connection};
pub use self::connector::{TcpConnector, TcpConnectorFactory};
pub use self::error::ConnectError;
pub use self::resolve::{Resolve, Resolver, ResolverFactory};
pub use self::service::{ConnectService, ConnectServiceFactory, TcpConnectService};
pub use self::service::{ConnectService, ConnectServiceFactory};
/// Create TCP connector service.
pub fn new_connector<T: Address + 'static>(
resolver: Resolver,
) -> impl Service<Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError> + Clone
{
pipeline(resolver).and_then(TcpConnector)
ConnectServiceFactory::new(resolver).service()
}
/// Create TCP connector service factory.
@ -52,7 +52,7 @@ pub fn new_connector_factory<T: Address + 'static>(
Error = ConnectError,
InitError = (),
> + Clone {
pipeline_factory(ResolverFactory::new(resolver)).and_then(TcpConnectorFactory)
ConnectServiceFactory::new(resolver)
}
/// Create connector service with default parameters.

View File

@ -56,7 +56,7 @@ pub enum Resolver {
/// An interface for custom async DNS resolvers.
///
/// # Usage
/// ```rust
/// ```
/// use std::net::SocketAddr;
///
/// use actix_tls::connect::{Resolve, Resolver};

View File

@ -34,14 +34,6 @@ impl ConnectServiceFactory {
resolver: self.resolver.service(),
}
}
/// Construct new tcp stream service
pub fn tcp_service(&self) -> TcpConnectService {
TcpConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
}
impl Clone for ConnectServiceFactory {
@ -63,7 +55,7 @@ impl<T: Address> ServiceFactory<Connect<T>> for ConnectServiceFactory {
fn new_service(&self, _: ()) -> Self::Future {
let service = self.service();
Box::pin(async move { Ok(service) })
Box::pin(async { Ok(service) })
}
}
@ -135,44 +127,3 @@ impl<T: Address> Future for ConnectServiceResponse<T> {
}
}
}
#[derive(Clone)]
pub struct TcpConnectService {
tcp: TcpConnector,
resolver: Resolver,
}
impl<T: Address> Service<Connect<T>> for TcpConnectService {
type Response = TcpStream;
type Error = ConnectError;
type Future = TcpConnectServiceResponse<T>;
actix_service::always_ready!();
fn call(&self, req: Connect<T>) -> Self::Future {
TcpConnectServiceResponse {
fut: ConnectFuture::Resolve(self.resolver.call(req)),
tcp: self.tcp,
}
}
}
pub struct TcpConnectServiceResponse<T: Address> {
fut: ConnectFuture<T>,
tcp: TcpConnector,
}
impl<T: Address> Future for TcpConnectServiceResponse<T> {
type Output = Result<TcpStream, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match ready!(self.fut.poll_connect(cx))? {
ConnectOutput::Resolved(res) => {
self.fut = ConnectFuture::Connect(self.tcp.call(res));
}
ConnectOutput::Connected(conn) => return Poll::Ready(Ok(conn.into_parts().0)),
}
}
}
}

View File

@ -5,3 +5,6 @@ pub mod openssl;
#[cfg(feature = "rustls")]
pub mod rustls;
#[cfg(feature = "native-tls")]
pub mod native_tls;

View File

@ -0,0 +1,88 @@
use std::io;
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use log::trace;
use tokio_native_tls::{TlsConnector as TokioNativetlsConnector, TlsStream};
pub use tokio_native_tls::native_tls::TlsConnector;
use crate::connect::{Address, Connection};
/// Native-tls connector factory and service
pub struct NativetlsConnector {
connector: TokioNativetlsConnector,
}
impl NativetlsConnector {
pub fn new(connector: TlsConnector) -> Self {
Self {
connector: TokioNativetlsConnector::from(connector),
}
}
}
impl NativetlsConnector {
pub fn service(connector: TlsConnector) -> Self {
Self::new(connector)
}
}
impl Clone for NativetlsConnector {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
}
}
}
impl<T: Address, U> ServiceFactory<Connection<T, U>> for NativetlsConnector
where
U: ActixStream + 'static,
{
type Response = Connection<T, TlsStream<U>>;
type Error = io::Error;
type Config = ();
type Service = Self;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let connector = self.clone();
Box::pin(async { Ok(connector) })
}
}
// NativetlsConnector is both it's ServiceFactory and Service impl type.
// As the factory and service share the same type and state.
impl<T, U> Service<Connection<T, U>> for NativetlsConnector
where
T: Address,
U: ActixStream + 'static,
{
type Response = Connection<T, TlsStream<U>>;
type Error = io::Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: Connection<T, U>) -> Self::Future {
let (io, stream) = stream.replace_io(());
let connector = self.connector.clone();
Box::pin(async move {
trace!("SSL Handshake start for: {:?}", stream.host());
connector
.connect(stream.host(), io)
.await
.map(|res| {
trace!("SSL Handshake success: {:?}", stream.host());
stream.replace_io(res).1
})
.map_err(|e| {
trace!("SSL Handshake error: {:?}", e);
io::Error::new(io::ErrorKind::Other, format!("{}", e))
})
})
}
}

View File

@ -1,13 +1,11 @@
use std::{
fmt,
future::Future,
io,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream;
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use futures_core::{future::LocalBoxFuture, ready};
use log::trace;
@ -15,10 +13,7 @@ use log::trace;
pub use openssl::ssl::{Error as SslError, HandshakeError, SslConnector, SslMethod};
pub use tokio_openssl::SslStream;
use crate::connect::resolve::Resolve;
use crate::connect::{
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection, Resolver,
};
use crate::connect::{Address, Connection};
/// OpenSSL connector factory
pub struct OpensslConnector {
@ -45,8 +40,8 @@ impl Clone for OpensslConnector {
impl<T, U> ServiceFactory<Connection<T, U>> for OpensslConnector
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Address,
U: ActixStream + 'static,
{
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
@ -75,8 +70,8 @@ impl Clone for OpensslConnectorService {
impl<T, U> Service<Connection<T, U>> for OpensslConnectorService
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Address,
U: ActixStream,
{
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
@ -112,7 +107,8 @@ pub struct ConnectAsyncExt<T, U> {
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Address,
U: ActixStream,
{
type Output = Result<Connection<T, SslStream<U>>, io::Error>;
@ -132,115 +128,3 @@ where
}
}
}
pub struct OpensslConnectServiceFactory {
tcp: ConnectServiceFactory,
openssl: OpensslConnector,
}
impl OpensslConnectServiceFactory {
/// Construct new OpensslConnectService factory
pub fn new(connector: SslConnector) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::new(Resolver::Default),
openssl: OpensslConnector::new(connector),
}
}
/// Construct new connect service with custom DNS resolver
pub fn with_resolver(connector: SslConnector, resolver: impl Resolve + 'static) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::new(Resolver::new_custom(resolver)),
openssl: OpensslConnector::new(connector),
}
}
/// Construct OpenSSL connect service
pub fn service(&self) -> OpensslConnectService {
OpensslConnectService {
tcp: self.tcp.service(),
openssl: OpensslConnectorService {
connector: self.openssl.connector.clone(),
},
}
}
}
impl Clone for OpensslConnectServiceFactory {
fn clone(&self) -> Self {
OpensslConnectServiceFactory {
tcp: self.tcp.clone(),
openssl: self.openssl.clone(),
}
}
}
impl<T: Address + 'static> ServiceFactory<Connect<T>> for OpensslConnectServiceFactory {
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = OpensslConnectService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let service = self.service();
Box::pin(async { Ok(service) })
}
}
#[derive(Clone)]
pub struct OpensslConnectService {
tcp: ConnectService,
openssl: OpensslConnectorService,
}
impl<T: Address + 'static> Service<Connect<T>> for OpensslConnectService {
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Future = OpensslConnectServiceResponse<T>;
actix_service::always_ready!();
fn call(&self, req: Connect<T>) -> Self::Future {
OpensslConnectServiceResponse {
fut1: Some(self.tcp.call(req)),
fut2: None,
openssl: self.openssl.clone(),
}
}
}
pub struct OpensslConnectServiceResponse<T: Address + 'static> {
fut1: Option<<ConnectService as Service<Connect<T>>>::Future>,
fut2: Option<<OpensslConnectorService as Service<Connection<T, TcpStream>>>::Future>,
openssl: OpensslConnectorService,
}
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
type Output = Result<SslStream<TcpStream>, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(ref mut fut) = self.fut1 {
match ready!(Pin::new(fut).poll(cx)) {
Ok(res) => {
let _ = self.fut1.take();
self.fut2 = Some(self.openssl.call(res));
}
Err(e) => return Poll::Ready(Err(e)),
}
}
if let Some(ref mut fut) = self.fut2 {
match ready!(Pin::new(fut).poll(cx)) {
Ok(connect) => Poll::Ready(Ok(connect.into_parts().0)),
Err(e) => Poll::Ready(Err(ConnectError::Io(io::Error::new(
io::ErrorKind::Other,
e,
)))),
}
} else {
Poll::Pending
}
}
}

View File

@ -1,6 +1,6 @@
use std::{
fmt,
future::Future,
io,
pin::Pin,
sync::Arc,
task::{Context, Poll},
@ -10,7 +10,7 @@ pub use tokio_rustls::rustls::Session;
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
pub use webpki_roots::TLS_SERVER_ROOTS;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use futures_core::{future::LocalBoxFuture, ready};
use log::trace;
@ -44,12 +44,13 @@ impl Clone for RustlsConnector {
}
}
impl<T: Address, U> ServiceFactory<Connection<T, U>> for RustlsConnector
impl<T, U> ServiceFactory<Connection<T, U>> for RustlsConnector
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T: Address,
U: ActixStream + 'static,
{
type Response = Connection<T, TlsStream<U>>;
type Error = std::io::Error;
type Error = io::Error;
type Config = ();
type Service = RustlsConnectorService;
type InitError = ();
@ -76,43 +77,55 @@ impl Clone for RustlsConnectorService {
impl<T, U> Service<Connection<T, U>> for RustlsConnectorService
where
T: Address,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
U: ActixStream,
{
type Response = Connection<T, TlsStream<U>>;
type Error = std::io::Error;
type Future = ConnectAsyncExt<T, U>;
type Error = io::Error;
type Future = RustlsConnectorServiceFuture<T, U>;
actix_service::always_ready!();
fn call(&self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace_io(());
let host = DNSNameRef::try_from_ascii_str(stream.host())
.expect("rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54");
ConnectAsyncExt {
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
stream: Some(stream),
fn call(&self, connection: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", connection.host());
let (stream, connection) = connection.replace_io(());
match DNSNameRef::try_from_ascii_str(connection.host()) {
Ok(host) => RustlsConnectorServiceFuture::Future {
connect: TlsConnector::from(self.connector.clone()).connect(host, stream),
connection: Some(connection),
},
Err(_) => RustlsConnectorServiceFuture::InvalidDns,
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: Connect<U>,
stream: Option<Connection<T, ()>>,
pub enum RustlsConnectorServiceFuture<T, U> {
/// See issue https://github.com/briansmith/webpki/issues/54
InvalidDns,
Future {
connect: Connect<U>,
connection: Option<Connection<T, ()>>,
},
}
impl<T, U> Future for ConnectAsyncExt<T, U>
impl<T, U> Future for RustlsConnectorServiceFuture<T, U>
where
T: Address,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
U: ActixStream,
{
type Output = Result<Connection<T, TlsStream<U>>, std::io::Error>;
type Output = Result<Connection<T, TlsStream<U>>, io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
let stream = ready!(Pin::new(&mut this.fut).poll(cx))?;
let s = this.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
Poll::Ready(Ok(s.replace_io(stream).1))
match self.get_mut() {
Self::InvalidDns => Poll::Ready(Err(
io::Error::new(io::ErrorKind::Other, "rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54")
)),
Self::Future { connect, connection } => {
let stream = ready!(Pin::new(connect).poll(cx))?;
let connection = connection.take().unwrap();
trace!("SSL Handshake success: {:?}", connection.host());
Poll::Ready(Ok(connection.replace_io(stream).1))
}
}
}
}

View File

@ -1,4 +1,9 @@
use std::io;
#![cfg(feature = "connect")]
use std::{
io,
net::{IpAddr, Ipv4Addr},
};
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
@ -9,7 +14,7 @@ use futures_util::sink::SinkExt;
use actix_tls::connect::{self as actix_connect, Connect};
#[cfg(all(feature = "connect", feature = "openssl"))]
#[cfg(feature = "openssl")]
#[actix_rt::test]
async fn test_string() {
let srv = TestServer::with(|| {
@ -125,3 +130,25 @@ async fn test_rustls_uri() {
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[actix_rt::test]
async fn test_local_addr() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
})
});
let conn = actix_connect::default_connector();
let local = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 3));
let (con, _) = conn
.call(Connect::with_addr("10", srv.addr()).set_local_addr(local))
.await
.unwrap()
.into_parts();
assert_eq!(con.local_addr().unwrap().ip(), local)
}

View File

@ -1,3 +1,5 @@
#![cfg(feature = "connect")]
use std::{
io,
net::{Ipv4Addr, SocketAddr},

View File

@ -16,9 +16,9 @@ name = "actix_tracing"
path = "src/lib.rs"
[dependencies]
actix-service = "2.0.0-beta.4"
actix-service = "2.0.0"
actix-utils = "3.0.0"
futures-util = { version = "0.3.4", default-features = false }
tracing = "0.1"
tracing-futures = "0.2"

View File

@ -7,9 +7,9 @@
use core::marker::PhantomData;
use actix_service::{
apply, dev::ApplyTransform, IntoServiceFactory, Service, ServiceFactory, Transform,
apply, ApplyTransform, IntoServiceFactory, Service, ServiceFactory, Transform,
};
use futures_util::future::{ok, Either, Ready};
use actix_utils::future::{ok, Either, Ready};
use tracing_futures::{Instrument, Instrumented};
/// A `Service` implementation that automatically enters/exits tracing spans
@ -48,9 +48,9 @@ where
.clone()
.map(|span| tracing::span!(parent: &span, tracing::Level::INFO, "future"))
{
Either::Right(fut.instrument(span))
Either::right(fut.instrument(span))
} else {
Either::Left(fut)
Either::left(fut)
}
}
}

View File

@ -3,6 +3,26 @@
## Unreleased - 2021-xx-xx
## 3.0.0 - 2021-04-16
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2021-04-01
* Add `future::Either` type. [#305]
[#305]: https://github.com/actix/actix-net/pull/305
## 3.0.0-beta.3 - 2021-04-01
* Moved `mpsc` to own crate `local-channel`. [#301]
* Moved `task::LocalWaker` to own crate `local-waker`. [#301]
* Remove `timeout` module. [#301]
* Remove `dispatcher` module. [#301]
* Expose `future` mod with `ready` and `poll_fn` helpers. [#301]
[#301]: https://github.com/actix/actix-net/pull/301
## 3.0.0-beta.2 - 2021-02-06
* Update `actix-rt` to `2.0.0`. [#273]

View File

@ -1,13 +1,14 @@
[package]
name = "actix-utils"
version = "3.0.0-beta.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various network related services and utilities for the Actix ecosystem"
version = "3.0.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Various utilities used in the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-utils"
categories = ["network-programming", "asynchronous"]
repository = "https://github.com/actix/actix-net"
license = "MIT OR Apache-2.0"
edition = "2018"
@ -16,14 +17,8 @@ name = "actix_utils"
path = "src/lib.rs"
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
pin-project-lite = "0.2.0"
pin-project-lite = "0.2"
local-waker = "0.1"
[dev-dependencies]
actix-rt = "2.0.0"

View File

@ -1,24 +1,18 @@
use core::cell::Cell;
use core::task;
//! Task-notifying counter.
use core::{cell::Cell, fmt, task};
use std::rc::Rc;
use crate::task::LocalWaker;
use local_waker::LocalWaker;
#[derive(Clone)]
/// Simple counter with ability to notify task on reaching specific number
///
/// Counter could be cloned, total n-count is shared across all clones.
#[derive(Debug, Clone)]
pub struct Counter(Rc<CounterInner>);
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: LocalWaker,
}
impl Counter {
/// Create `Counter` instance and set max value.
/// Create `Counter` instance with max value.
pub fn new(capacity: usize) -> Self {
Counter(Rc::new(CounterInner {
capacity,
@ -27,38 +21,26 @@ impl Counter {
}))
}
/// Get counter guard.
/// Create new counter guard, incrementing the counter.
pub fn get(&self) -> CounterGuard {
CounterGuard::new(self.0.clone())
}
/// Check if counter is not at capacity. If counter at capacity
/// it registers notification for current task.
/// Notify current task and return true if counter is at capacity.
pub fn available(&self, cx: &mut task::Context<'_>) -> bool {
self.0.available(cx)
}
/// Get total number of acquired counts
/// Get total number of acquired guards.
pub fn total(&self) -> usize {
self.0.count.get()
}
}
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Unpin for CounterGuard {}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: LocalWaker,
}
impl CounterInner {
@ -83,3 +65,32 @@ impl CounterInner {
}
}
}
impl fmt::Debug for CounterInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Counter")
.field("count", &self.count.get())
.field("capacity", &self.capacity)
.field("task", &self.task)
.finish()
}
}
/// An RAII structure that keeps the underlying counter incremented until this guard is dropped.
#[derive(Debug)]
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Unpin for CounterGuard {}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
}

View File

@ -1,338 +0,0 @@
//! Framed dispatcher service and related utilities.
#![allow(type_alias_bounds)]
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use core::{fmt, mem};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_service::{IntoService, Service};
use futures_core::stream::Stream;
use log::debug;
use pin_project_lite::pin_project;
use crate::mpsc;
/// Framed transport errors
pub enum DispatcherError<E, U: Encoder<I> + Decoder, I> {
Service(E),
Encoder(<U as Encoder<I>>::Error),
Decoder(<U as Decoder>::Error),
}
impl<E, U: Encoder<I> + Decoder, I> From<E> for DispatcherError<E, U, I> {
fn from(err: E) -> Self {
DispatcherError::Service(err)
}
}
impl<E, U: Encoder<I> + Decoder, I> fmt::Debug for DispatcherError<E, U, I>
where
E: fmt::Debug,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DispatcherError::Service(ref e) => write!(fmt, "DispatcherError::Service({:?})", e),
DispatcherError::Encoder(ref e) => write!(fmt, "DispatcherError::Encoder({:?})", e),
DispatcherError::Decoder(ref e) => write!(fmt, "DispatcherError::Decoder({:?})", e),
}
}
}
impl<E, U: Encoder<I> + Decoder, I> fmt::Display for DispatcherError<E, U, I>
where
E: fmt::Display,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DispatcherError::Service(ref e) => write!(fmt, "{}", e),
DispatcherError::Encoder(ref e) => write!(fmt, "{:?}", e),
DispatcherError::Decoder(ref e) => write!(fmt, "{:?}", e),
}
}
}
pub enum Message<T> {
Item(T),
Close,
}
pin_project! {
/// Dispatcher is a future that reads frames from Framed object
/// and passes them to the service.
pub struct Dispatcher<S, T, U, I>
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead,
T: AsyncWrite,
U: Encoder<I>,
U: Decoder,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
{
service: S,
state: State<S, U, I>,
#[pin]
framed: Framed<T, U>,
rx: mpsc::Receiver<Result<Message<I>, S::Error>>,
tx: mpsc::Sender<Result<Message<I>, S::Error>>,
}
}
enum State<S, U, I>
where
S: Service<<U as Decoder>::Item>,
U: Encoder<I> + Decoder,
{
Processing,
Error(DispatcherError<S::Error, U, I>),
FramedError(DispatcherError<S::Error, U, I>),
FlushAndStop,
Stopping,
}
impl<S, U, I> State<S, U, I>
where
S: Service<<U as Decoder>::Item>,
U: Encoder<I> + Decoder,
{
fn take_error(&mut self) -> DispatcherError<S::Error, U, I> {
match mem::replace(self, State::Processing) {
State::Error(err) => err,
_ => panic!(),
}
}
fn take_framed_error(&mut self) -> DispatcherError<S::Error, U, I> {
match mem::replace(self, State::Processing) {
State::FramedError(err) => err,
_ => panic!(),
}
}
}
impl<S, T, U, I> Dispatcher<S, T, U, I>
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Decoder>::Error: fmt::Debug,
<U as Encoder<I>>::Error: fmt::Debug,
{
pub fn new<F>(framed: Framed<T, U>, service: F) -> Self
where
F: IntoService<S, <U as Decoder>::Item>,
{
let (tx, rx) = mpsc::channel();
Dispatcher {
framed,
rx,
tx,
service: service.into_service(),
state: State::Processing,
}
}
/// Construct new `Dispatcher` instance with customer `mpsc::Receiver`
pub fn with_rx<F>(
framed: Framed<T, U>,
service: F,
rx: mpsc::Receiver<Result<Message<I>, S::Error>>,
) -> Self
where
F: IntoService<S, <U as Decoder>::Item>,
{
let tx = rx.sender();
Dispatcher {
framed,
rx,
tx,
service: service.into_service(),
state: State::Processing,
}
}
/// Get sink
pub fn get_sink(&self) -> mpsc::Sender<Result<Message<I>, S::Error>> {
self.tx.clone()
}
/// Get reference to a service wrapped by `Dispatcher` instance.
pub fn get_ref(&self) -> &S {
&self.service
}
/// Get mutable reference to a service wrapped by `Dispatcher` instance.
pub fn get_mut(&mut self) -> &mut S {
&mut self.service
}
/// Get reference to a framed instance wrapped by `Dispatcher`
/// instance.
pub fn get_framed(&self) -> &Framed<T, U> {
&self.framed
}
/// Get mutable reference to a framed instance wrapped by `Dispatcher` instance.
pub fn get_framed_mut(&mut self) -> &mut Framed<T, U> {
&mut self.framed
}
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> bool
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
{
loop {
let this = self.as_mut().project();
match this.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
let item = match this.framed.next_item(cx) {
Poll::Ready(Some(Ok(el))) => el,
Poll::Ready(Some(Err(err))) => {
*this.state = State::FramedError(DispatcherError::Decoder(err));
return true;
}
Poll::Pending => return false,
Poll::Ready(None) => {
*this.state = State::Stopping;
return true;
}
};
let tx = this.tx.clone();
let fut = this.service.call(item);
actix_rt::spawn(async move {
let item = fut.await;
let _ = tx.send(item.map(Message::Item));
});
}
Poll::Pending => return false,
Poll::Ready(Err(err)) => {
*this.state = State::Error(DispatcherError::Service(err));
return true;
}
}
}
}
/// write to framed object
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> bool
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
{
loop {
let mut this = self.as_mut().project();
while !this.framed.is_write_buf_full() {
match Pin::new(&mut this.rx).poll_next(cx) {
Poll::Ready(Some(Ok(Message::Item(msg)))) => {
if let Err(err) = this.framed.as_mut().write(msg) {
*this.state = State::FramedError(DispatcherError::Encoder(err));
return true;
}
}
Poll::Ready(Some(Ok(Message::Close))) => {
*this.state = State::FlushAndStop;
return true;
}
Poll::Ready(Some(Err(err))) => {
*this.state = State::Error(DispatcherError::Service(err));
return true;
}
Poll::Ready(None) | Poll::Pending => break,
}
}
if !this.framed.is_write_buf_empty() {
match this.framed.flush(cx) {
Poll::Pending => break,
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
*this.state = State::FramedError(DispatcherError::Encoder(err));
return true;
}
}
} else {
break;
}
}
false
}
}
impl<S, T, U, I> Future for Dispatcher<S, T, U, I>
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
type Output = Result<(), DispatcherError<S::Error, U, I>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
let this = self.as_mut().project();
return match this.state {
State::Processing => {
if self.as_mut().poll_read(cx) || self.as_mut().poll_write(cx) {
continue;
} else {
Poll::Pending
}
}
State::Error(_) => {
// flush write buffer
if !this.framed.is_write_buf_empty() && this.framed.flush(cx).is_pending() {
return Poll::Pending;
}
Poll::Ready(Err(this.state.take_error()))
}
State::FlushAndStop => {
if !this.framed.is_write_buf_empty() {
match this.framed.flush(cx) {
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
Poll::Ready(Ok(()))
}
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
}
} else {
Poll::Ready(Ok(()))
}
}
State::FramedError(_) => Poll::Ready(Err(this.state.take_framed_error())),
State::Stopping => Poll::Ready(Ok(())),
};
}
}
}

View File

@ -0,0 +1,91 @@
//! A symmetric either future.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
pin_project! {
/// Combines two different futures that have the same output type.
///
/// Construct variants with [`Either::left`] and [`Either::right`].
///
/// # Examples
/// ```
/// use actix_utils::future::{ready, Ready, Either};
///
/// # async fn run() {
/// let res = Either::<_, Ready<usize>>::left(ready(42));
/// assert_eq!(res.await, 42);
///
/// let res = Either::<Ready<usize>, _>::right(ready(43));
/// assert_eq!(res.await, 43);
/// # }
/// ```
#[project = EitherProj]
#[derive(Debug, Clone)]
pub enum Either<L, R> {
/// A value of type `L`.
#[allow(missing_docs)]
Left { #[pin] value: L },
/// A value of type `R`.
#[allow(missing_docs)]
Right { #[pin] value: R },
}
}
impl<L, R> Either<L, R> {
/// Creates new `Either` using left variant.
pub fn left(value: L) -> Either<L, R> {
Either::Left { value }
}
/// Creates new `Either` using right variant.
pub fn right(value: R) -> Either<L, R> {
Either::Right { value }
}
}
impl<T> Either<T, T> {
/// Unwraps into inner value when left and right have a common type.
pub fn into_inner(self) -> T {
match self {
Either::Left { value } => value,
Either::Right { value } => value,
}
}
}
impl<L, R> Future for Either<L, R>
where
L: Future,
R: Future<Output = L::Output>,
{
type Output = L::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
EitherProj::Left { value } => value.poll(cx),
EitherProj::Right { value } => value.poll(cx),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::future::{ready, Ready};
#[actix_rt::test]
async fn test_either() {
let res = Either::<_, Ready<usize>>::left(ready(42));
assert_eq!(res.await, 42);
let res = Either::<Ready<usize>, _>::right(ready(43));
assert_eq!(res.await, 43);
}
}

View File

@ -0,0 +1,9 @@
//! Asynchronous values.
mod either;
mod poll_fn;
mod ready;
pub use self::either::Either;
pub use self::poll_fn::{poll_fn, PollFn};
pub use self::ready::{err, ok, ready, Ready};

View File

@ -0,0 +1,65 @@
//! Simple "poll function" future and factory.
use core::{
fmt,
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Creates a future driven by the provided function that receives a task context.
pub fn poll_fn<F, T>(f: F) -> PollFn<F>
where
F: FnMut(&mut Context<'_>) -> Poll<T>,
{
PollFn { f }
}
/// A Future driven by the inner function.
pub struct PollFn<F> {
f: F,
}
impl<F> Unpin for PollFn<F> {}
impl<F> fmt::Debug for PollFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollFn").finish()
}
}
impl<F, T> Future for PollFn<F>
where
F: FnMut(&mut Context<'_>) -> Poll<T>,
{
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
(self.f)(cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[actix_rt::test]
async fn test_poll_fn() {
let res = poll_fn(|_| Poll::Ready(42)).await;
assert_eq!(res, 42);
let mut i = 5;
let res = poll_fn(|cx| {
i -= 1;
if i > 0 {
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(42)
}
})
.await;
assert_eq!(res, 42);
}
}

View File

@ -0,0 +1,122 @@
//! When MSRV is 1.48, replace with `core::future::Ready` and `core::future::ready()`.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Future for the [`ready`](ready()) function.
///
/// Panic will occur if polled more than once.
///
/// # Examples
/// ```
/// use actix_utils::future::ready;
///
/// // async
/// # async fn run() {
/// let a = ready(1);
/// assert_eq!(a.await, 1);
/// # }
///
/// // sync
/// let a = ready(1);
/// assert_eq!(a.into_inner(), 1);
/// ```
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Ready<T> {
val: Option<T>,
}
impl<T> Ready<T> {
/// Unwraps the value from this immediately ready future.
#[inline]
pub fn into_inner(mut self) -> T {
self.val.take().unwrap()
}
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
#[inline]
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
let val = self.val.take().expect("Ready polled after completion");
Poll::Ready(val)
}
}
/// Creates a future that is immediately ready with a value.
///
/// # Examples
/// ```no_run
/// use actix_utils::future::ready;
///
/// # async fn run() {
/// let a = ready(1);
/// assert_eq!(a.await, 1);
/// # }
///
/// // sync
/// let a = ready(1);
/// assert_eq!(a.into_inner(), 1);
/// ```
pub fn ready<T>(val: T) -> Ready<T> {
Ready { val: Some(val) }
}
/// Creates a future that is immediately ready with a success value.
///
/// # Examples
/// ```no_run
/// use actix_utils::future::ok;
///
/// # async fn run() {
/// let a = ok::<_, ()>(1);
/// assert_eq!(a.await, Ok(1));
/// # }
/// ```
pub fn ok<T, E>(val: T) -> Ready<Result<T, E>> {
Ready { val: Some(Ok(val)) }
}
/// Creates a future that is immediately ready with an error value.
///
/// # Examples
/// ```no_run
/// use actix_utils::future::err;
///
/// # async fn run() {
/// let a = err::<(), _>(1);
/// assert_eq!(a.await, Err(1));
/// # }
/// ```
pub fn err<T, E>(err: E) -> Ready<Result<T, E>> {
Ready {
val: Some(Err(err)),
}
}
#[cfg(test)]
mod tests {
use futures_util::task::noop_waker;
use super::*;
#[test]
#[should_panic]
fn multiple_poll_panics() {
let waker = noop_waker();
let mut cx = Context::from_waker(&waker);
let mut ready = ready(1);
assert_eq!(Pin::new(&mut ready).poll(&mut cx), Poll::Ready(1));
// panic!
let _ = Pin::new(&mut ready).poll(&mut cx);
}
}

View File

@ -1,12 +1,9 @@
//! Various network related services and utilities for the Actix ecosystem.
//! Various utilities used in the Actix ecosystem.
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
pub mod counter;
pub mod dispatcher;
pub mod mpsc;
pub mod task;
pub mod timeout;
pub mod future;

View File

@ -1,84 +0,0 @@
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::PhantomData;
use core::task::Waker;
/// A synchronization primitive for task wakeup.
///
/// Sometimes the task interested in a given event will change over time.
/// An `LocalWaker` can coordinate concurrent notifications with the consumer
/// potentially "updating" the underlying task to wake up. This is useful in
/// scenarios where a computation completes in another task and wants to
/// notify the consumer, but the consumer is in the process of being migrated to
/// a new logical task.
///
/// Consumers should call `register` before checking the result of a computation
/// and producers should call `wake` after producing the computation (this
/// differs from the usual `thread::park` pattern). It is also permitted for
/// `wake` to be called **before** `register`. This results in a no-op.
///
/// A single `AtomicWaker` may be reused for any number of calls to `register` or
/// `wake`.
// TODO: Refactor to Cell when remove deprecated methods (@botika)
#[derive(Default)]
pub struct LocalWaker {
pub(crate) waker: UnsafeCell<Option<Waker>>,
// mark LocalWaker as a !Send type.
_t: PhantomData<*const ()>,
}
impl LocalWaker {
/// Create an `LocalWaker`.
pub fn new() -> Self {
LocalWaker {
waker: UnsafeCell::new(None),
_t: PhantomData,
}
}
#[deprecated(
since = "2.1.0",
note = "In favor of `wake`. State of the register doesn't matter at `wake` up"
)]
/// Check if waker has been registered.
#[inline]
pub fn is_registered(&self) -> bool {
unsafe { (*self.waker.get()).is_some() }
}
/// Registers the waker to be notified on calls to `wake`.
///
/// Returns `true` if waker was registered before.
#[inline]
pub fn register(&self, waker: &Waker) -> bool {
unsafe {
let w = self.waker.get();
let last_waker = w.replace(Some(waker.clone()));
last_waker.is_some()
}
}
/// Calls `wake` on the last `Waker` passed to `register`.
///
/// If `register` has not been called yet, then this does nothing.
#[inline]
pub fn wake(&self) {
if let Some(waker) = self.take() {
waker.wake();
}
}
/// Returns the last `Waker` passed to `register`, so that the user can wake it.
///
/// If a waker has not been registered, this returns `None`.
#[inline]
pub fn take(&self) -> Option<Waker> {
unsafe { (*self.waker.get()).take() }
}
}
impl fmt::Debug for LocalWaker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "LocalWaker")
}
}

View File

@ -1,256 +0,0 @@
//! Service that applies a timeout to requests.
//!
//! If the response does not complete within the specified timeout, the response will be aborted.
use core::future::Future;
use core::marker::PhantomData;
use core::pin::Pin;
use core::task::{Context, Poll};
use core::{fmt, time};
use actix_rt::time::{sleep, Sleep};
use actix_service::{IntoService, Service, Transform};
use pin_project_lite::pin_project;
/// Applies a timeout to requests.
#[derive(Debug)]
pub struct Timeout<E = ()> {
timeout: time::Duration,
_t: PhantomData<E>,
}
/// Timeout error
pub enum TimeoutError<E> {
/// Service error
Service(E),
/// Service call timeout
Timeout,
}
impl<E> From<E> for TimeoutError<E> {
fn from(err: E) -> Self {
TimeoutError::Service(err)
}
}
impl<E: fmt::Debug> fmt::Debug for TimeoutError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimeoutError::Service(e) => write!(f, "TimeoutError::Service({:?})", e),
TimeoutError::Timeout => write!(f, "TimeoutError::Timeout"),
}
}
}
impl<E: fmt::Display> fmt::Display for TimeoutError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimeoutError::Service(e) => e.fmt(f),
TimeoutError::Timeout => write!(f, "Service call timeout"),
}
}
}
impl<E: PartialEq> PartialEq for TimeoutError<E> {
fn eq(&self, other: &TimeoutError<E>) -> bool {
match self {
TimeoutError::Service(e1) => match other {
TimeoutError::Service(e2) => e1 == e2,
TimeoutError::Timeout => false,
},
TimeoutError::Timeout => matches!(other, TimeoutError::Timeout),
}
}
}
impl<E> Timeout<E> {
pub fn new(timeout: time::Duration) -> Self {
Timeout {
timeout,
_t: PhantomData,
}
}
}
impl<E> Clone for Timeout<E> {
fn clone(&self) -> Self {
Timeout::new(self.timeout)
}
}
impl<S, E, Req> Transform<S, Req> for Timeout<E>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = TimeoutError<S::Error>;
type Transform = TimeoutService<S, Req>;
type InitError = E;
type Future = TimeoutFuture<Self::Transform, Self::InitError>;
fn new_transform(&self, service: S) -> Self::Future {
let service = TimeoutService {
service,
timeout: self.timeout,
_phantom: PhantomData,
};
TimeoutFuture {
service: Some(service),
_err: PhantomData,
}
}
}
pub struct TimeoutFuture<T, E> {
service: Option<T>,
_err: PhantomData<E>,
}
impl<T, E> Unpin for TimeoutFuture<T, E> {}
impl<T, E> Future for TimeoutFuture<T, E> {
type Output = Result<T, E>;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(self.get_mut().service.take().unwrap()))
}
}
/// Applies a timeout to requests.
#[derive(Debug, Clone)]
pub struct TimeoutService<S, Req> {
service: S,
timeout: time::Duration,
_phantom: PhantomData<Req>,
}
impl<S, Req> TimeoutService<S, Req>
where
S: Service<Req>,
{
pub fn new<U>(timeout: time::Duration, service: U) -> Self
where
U: IntoService<S, Req>,
{
TimeoutService {
timeout,
service: service.into_service(),
_phantom: PhantomData,
}
}
}
impl<S, Req> Service<Req> for TimeoutService<S, Req>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = TimeoutError<S::Error>;
type Future = TimeoutServiceResponse<S, Req>;
actix_service::forward_ready!(service);
fn call(&self, request: Req) -> Self::Future {
TimeoutServiceResponse {
fut: self.service.call(request),
sleep: sleep(self.timeout),
}
}
}
pin_project! {
/// `TimeoutService` response future
#[derive(Debug)]
pub struct TimeoutServiceResponse<S, Req>
where
S: Service<Req>
{
#[pin]
fut: S::Future,
#[pin]
sleep: Sleep,
}
}
impl<S, Req> Future for TimeoutServiceResponse<S, Req>
where
S: Service<Req>,
{
type Output = Result<S::Response, TimeoutError<S::Error>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
// First, try polling the future
if let Poll::Ready(res) = this.fut.poll(cx) {
return match res {
Ok(v) => Poll::Ready(Ok(v)),
Err(e) => Poll::Ready(Err(TimeoutError::Service(e))),
};
}
// Now check the sleep
this.sleep.poll(cx).map(|_| Err(TimeoutError::Timeout))
}
}
#[cfg(test)]
mod tests {
use core::task::Poll;
use core::time::Duration;
use super::*;
use actix_service::{apply, fn_factory, Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
struct SleepService(Duration);
impl Service<()> for SleepService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<(), ()>>;
actix_service::always_ready!();
fn call(&self, _: ()) -> Self::Future {
let sleep = actix_rt::time::sleep(self.0);
Box::pin(async move {
sleep.await;
Ok(())
})
}
}
#[actix_rt::test]
async fn test_success() {
let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(50);
let timeout = TimeoutService::new(resolution, SleepService(wait_time));
assert_eq!(timeout.call(()).await, Ok(()));
}
#[actix_rt::test]
async fn test_timeout() {
let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(500);
let timeout = TimeoutService::new(resolution, SleepService(wait_time));
assert_eq!(timeout.call(()).await, Err(TimeoutError::Timeout));
}
#[actix_rt::test]
async fn test_timeout_new_service() {
let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(500);
let timeout = apply(
Timeout::new(resolution),
fn_factory(|| async { Ok::<_, ()>(SleepService(wait_time)) }),
);
let srv = timeout.new_service(&()).await.unwrap();
assert_eq!(srv.call(()).await, Err(TimeoutError::Timeout));
}
}

7
local-channel/CHANGES.md Normal file
View File

@ -0,0 +1,7 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.1.1 - 2021-03-29
* Move local mpsc channel to it's own crate.

21
local-channel/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "local-channel"
version = "0.1.2"
description = "A non-threadsafe multi-producer, single-consumer, futures-aware, FIFO queue"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
repository = "https://github.com/actix/actix-net.git"
keywords = ["channel", "local", "futures"]
license = "MIT OR Apache-2.0"
edition = "2018"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
local-waker = "0.1"
[dev-dependencies]
tokio = { version = "1", features = ["rt", "macros"] }

3
local-channel/src/lib.rs Normal file
View File

@ -0,0 +1,3 @@
//! Non-thread-safe channels.
pub mod mpsc;

View File

@ -1,31 +1,35 @@
//! A multi-producer, single-consumer, futures-aware, FIFO queue.
//! A non-thread-safe multi-producer, single-consumer, futures-aware, FIFO queue.
use core::any::Any;
use core::cell::RefCell;
use core::fmt;
use core::pin::Pin;
use core::task::{Context, Poll};
use core::{
cell::RefCell,
fmt,
pin::Pin,
task::{Context, Poll},
};
use std::collections::VecDeque;
use std::error::Error;
use std::rc::Rc;
use std::{collections::VecDeque, error::Error, rc::Rc};
use futures_core::stream::Stream;
use futures_sink::Sink;
use crate::task::LocalWaker;
use futures_util::future::poll_fn;
use local_waker::LocalWaker;
/// Creates a unbounded in-memory channel with buffered storage.
///
/// [Sender]s and [Receiver]s are `!Send`.
pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
let shared = Rc::new(RefCell::new(Shared {
has_receiver: true,
buffer: VecDeque::new(),
blocked_recv: LocalWaker::new(),
}));
let sender = Sender {
shared: shared.clone(),
};
let receiver = Receiver { shared };
(sender, receiver)
}
@ -50,18 +54,22 @@ impl<T> Sender<T> {
/// Sends the provided message along this channel.
pub fn send(&self, item: T) -> Result<(), SendError<T>> {
let mut shared = self.shared.borrow_mut();
if !shared.has_receiver {
return Err(SendError(item)); // receiver was dropped
// receiver was dropped
return Err(SendError(item));
};
shared.buffer.push_back(item);
shared.blocked_recv.wake();
Ok(())
}
/// Closes the sender half
/// Closes the sender half.
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
/// This prevents any further messages from being sent on the channel, by any sender, while
/// still enabling the receiver to drain messages that are already buffered.
pub fn close(&mut self) {
self.shared.borrow_mut().has_receiver = false;
}
@ -110,14 +118,24 @@ impl<T> Drop for Sender<T> {
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is created by the `channel` function.
/// This is created by the [`channel`] function.
#[derive(Debug)]
pub struct Receiver<T> {
shared: Rc<RefCell<Shared<T>>>,
}
impl<T> Receiver<T> {
/// Create Sender
/// Receive the next value.
///
/// Returns `None` if the channel is empty and has been [closed](Sender::close) explicitly or
/// when all senders have been dropped and, therefore, no more values can ever be sent though
/// this channel.
pub async fn recv(&mut self) -> Option<T> {
let mut this = Pin::new(self);
poll_fn(|cx| this.as_mut().poll_next(cx)).await
}
/// Create an associated [Sender].
pub fn sender(&self) -> Sender<T> {
Sender {
shared: self.shared.clone(),
@ -132,11 +150,13 @@ impl<T> Stream for Receiver<T> {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut shared = self.shared.borrow_mut();
if Rc::strong_count(&self.shared) == 1 {
// All senders have been dropped, so drain the buffer and end the
// stream.
Poll::Ready(shared.buffer.pop_front())
} else if let Some(msg) = shared.buffer.pop_front() {
// All senders have been dropped, so drain the buffer and end the stream.
return Poll::Ready(shared.buffer.pop_front());
}
if let Some(msg) = shared.buffer.pop_front() {
Poll::Ready(Some(msg))
} else {
shared.blocked_recv.register(cx.waker());
@ -153,9 +173,17 @@ impl<T> Drop for Receiver<T> {
}
}
/// Error type for sending, used when the receiving end of a channel is
/// dropped
pub struct SendError<T>(T);
/// Error returned when attempting to send after the channels' [Receiver] is dropped or closed.
///
/// Allows access to message that failed to send with [`into_inner`](Self::into_inner).
pub struct SendError<T>(pub T);
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -169,26 +197,15 @@ impl<T> fmt::Display for SendError<T> {
}
}
impl<T: Any> Error for SendError<T> {
fn description(&self) -> &str {
"send failed because receiver is gone"
}
}
impl<T> SendError<T> {
/// Returns the message that was attempted to be sent but failed.
pub fn into_inner(self) -> T {
self.0
}
}
impl<T> Error for SendError<T> {}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::lazy;
use futures_util::{stream::Stream, StreamExt};
use futures_util::{future::lazy, StreamExt as _};
#[actix_rt::test]
use super::*;
#[tokio::test]
async fn test_mpsc() {
let (tx, mut rx) = channel();
tx.send("test").unwrap();
@ -221,4 +238,18 @@ mod tests {
assert!(tx.send("test").is_err());
assert!(tx2.send("test").is_err());
}
#[tokio::test]
async fn test_recv() {
let (tx, mut rx) = channel();
tx.send("test").unwrap();
assert_eq!(rx.recv().await.unwrap(), "test");
drop(tx);
let (tx, mut rx) = channel();
tx.send("test").unwrap();
assert_eq!(rx.recv().await.unwrap(), "test");
drop(tx);
assert!(rx.recv().await.is_none());
}
}

11
local-waker/CHANGES.md Normal file
View File

@ -0,0 +1,11 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.1.2 - 2021-04-01
* Fix crate metadata.
## 0.1.1 - 2021-03-29
* Move `LocalWaker` to it's own crate.

15
local-waker/Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "local-waker"
version = "0.1.1"
description = "A synchronization primitive for thread-local task wakeup"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
keywords = ["waker", "local", "futures", "no-std"]
repository = "https://github.com/actix/actix-net.git"
categories = ["asynchronous", "no-std"]
license = "MIT OR Apache-2.0"
edition = "2018"
[dependencies]

71
local-waker/src/lib.rs Normal file
View File

@ -0,0 +1,71 @@
//! A synchronization primitive for thread-local task wakeup.
//!
//! See docs for [`LocalWaker`].
#![no_std]
use core::{cell::Cell, fmt, marker::PhantomData, task::Waker};
/// A synchronization primitive for task wakeup.
///
/// Sometimes the task interested in a given event will change over time. A `LocalWaker` can
/// coordinate concurrent notifications with the consumer, potentially "updating" the underlying
/// task to wake up. This is useful in scenarios where a computation completes in another task and
/// wants to notify the consumer, but the consumer is in the process of being migrated to a new
/// logical task.
///
/// Consumers should call [`register`] before checking the result of a computation and producers
/// should call [`wake`] after producing the computation (this differs from the usual `thread::park`
/// pattern). It is also permitted for [`wake`] to be called _before_ [`register`]. This results in
/// a no-op.
///
/// A single `LocalWaker` may be reused for any number of calls to [`register`] or [`wake`].
///
/// [`register`]: LocalWaker::register
/// [`wake`]: LocalWaker::wake
#[derive(Default)]
pub struct LocalWaker {
pub(crate) waker: Cell<Option<Waker>>,
// mark LocalWaker as a !Send type.
_phantom: PhantomData<*const ()>,
}
impl LocalWaker {
/// Creates a new, empty `LocalWaker`.
pub fn new() -> Self {
LocalWaker::default()
}
/// Registers the waker to be notified on calls to `wake`.
///
/// Returns `true` if waker was registered before.
#[inline]
pub fn register(&self, waker: &Waker) -> bool {
let last_waker = self.waker.replace(Some(waker.clone()));
last_waker.is_some()
}
/// Calls `wake` on the last `Waker` passed to `register`.
///
/// If `register` has not been called yet, then this does nothing.
#[inline]
pub fn wake(&self) {
if let Some(waker) = self.take() {
waker.wake();
}
}
/// Returns the last `Waker` passed to `register`, so that the user can wake it.
///
/// If a waker has not been registered, this returns `None`.
#[inline]
pub fn take(&self) -> Option<Waker> {
self.waker.take()
}
}
impl fmt::Debug for LocalWaker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "LocalWaker")
}
}