From 8cd02c14f38a58029f105eb78ef3af566092b157 Mon Sep 17 00:00:00 2001 From: Maksym Vorobiov Date: Tue, 21 Jan 2020 18:59:05 +0200 Subject: [PATCH] add direct service benchmarks --- Cargo.toml | 4 ++ benches/server.rs | 18 ++++----- benches/service.rs | 96 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 10 deletions(-) create mode 100644 benches/service.rs diff --git a/Cargo.toml b/Cargo.toml index e0be64b1b..d366defe1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,4 +120,8 @@ awc = { path = "awc" } [[bench]] name = "server" +harness = false + +[[bench]] +name = "service" harness = false \ No newline at end of file diff --git a/benches/server.rs b/benches/server.rs index a1261a73c..93079a223 100644 --- a/benches/server.rs +++ b/benches/server.rs @@ -1,7 +1,7 @@ -use actix_web::{web, test, App, HttpResponse}; +use actix_web::{test, web, App, HttpResponse}; +use awc::Client; use criterion::{criterion_group, criterion_main, Criterion}; use futures::future::join_all; -use awc::Client; const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ @@ -25,7 +25,6 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World \ Hello World Hello World Hello World Hello World Hello World"; - // benchmark sending all requests at the same time fn bench_async_burst(c: &mut Criterion) { let srv = test::start(|| { @@ -38,29 +37,28 @@ fn bench_async_burst(c: &mut Criterion) { let url = srv.url("/"); let mut rt = actix_rt::System::new("test"); - c.bench_function("get_body_async_burst", move |b| + c.bench_function("get_body_async_burst", move |b| { b.iter_custom(|iters| { - let client = Client::new().get(url.clone()).freeze().unwrap(); let start = std::time::Instant::now(); // benchmark body let resps = rt.block_on(async move { - let burst = (0..iters).map( |_| client.send() ); - join_all( burst ).await + let burst = (0..iters).map(|_| client.send()); + join_all(burst).await }); let elapsed = start.elapsed(); // if there are failed requests that might be an issue let failed = resps.iter().filter(|r| r.is_err()).count(); - if failed >0 { + if failed > 0 { eprintln!("failed {} requests (might be bench timeout)", failed); }; elapsed }) - ); + }); } criterion_group!(server_benches, bench_async_burst); -criterion_main!(server_benches); \ No newline at end of file +criterion_main!(server_benches); diff --git a/benches/service.rs b/benches/service.rs new file mode 100644 index 000000000..2fb4ca03d --- /dev/null +++ b/benches/service.rs @@ -0,0 +1,96 @@ +use actix_service::Service; +use actix_web::dev::{ServiceRequest, ServiceResponse}; +use actix_web::{test, web, App, Error, HttpResponse}; +use criterion::{criterion_group, criterion_main, Criterion}; +use std::cell::RefCell; +use std::rc::Rc; + +use crate::test::{init_service, ok_service, TestRequest}; + +// TOOD: probably convert to macro? + +// Following approach is usable for benching Service wrappers +// Using minimum service code implementation we first measure +// time to run minimum service, then measure time with wrapper. +// Sample results on MacBook Pro '14 +// async_service_direct time: [1.0908 us 1.1656 us 1.2613 us] +fn async_cloneable_wrapper_service(c: &mut Criterion) { + let mut rt = actix_rt::System::new("test"); + let srv = Rc::new(RefCell::new(ok_service())); + + let req = TestRequest::default().to_srv_request(); + assert!(rt + .block_on(srv.borrow_mut().call(req)) + .unwrap() + .status() + .is_success()); + + // start benchmark loops + c.bench_function("async_service_direct", move |b| { + b.iter_custom(|iters| { + let srv = srv.clone(); + // exclude request generation, it appears it takes significant time vs call (3us vs 1us) + let reqs: Vec<_> = (0..iters) + .map(|_| TestRequest::default().to_srv_request()) + .collect(); + let start = std::time::Instant::now(); + // benchmark body + rt.block_on(async move { + for req in reqs { + srv.borrow_mut().call(req).await.unwrap(); + } + }); + let elapsed = start.elapsed(); + // check that at least first request succeeded + elapsed + }) + }); +} + +async fn index(req: ServiceRequest) -> Result { + Ok(req.into_response(HttpResponse::Ok().finish())) +} + +// Benchmark basic WebService directly +// this approach is usable for benching WebService, though it adds some time to direct service call: +// Sample results on MacBook Pro '14 +// time: [2.0724 us 2.1345 us 2.2074 us] +fn async_cloneable_wrapper_web_service(c: &mut Criterion) { + let mut rt = actix_rt::System::new("test"); + let srv = Rc::new(RefCell::new(rt.block_on(init_service( + App::new().service(web::service("/").finish(index)), + )))); + + let req = TestRequest::get().uri("/").to_request(); + assert!(rt + .block_on(srv.borrow_mut().call(req)) + .unwrap() + .status() + .is_success()); + + // start benchmark loops + c.bench_function("async_web_service_direct", move |b| { + b.iter_custom(|iters| { + let srv = srv.clone(); + let reqs = (0..iters).map(|_| TestRequest::get().uri("/").to_request()); + + let start = std::time::Instant::now(); + // benchmark body + rt.block_on(async move { + for req in reqs { + srv.borrow_mut().call(req).await.unwrap(); + } + }); + let elapsed = start.elapsed(); + // check that at least first request succeeded + elapsed + }) + }); +} + +criterion_group!( + service_benches, + async_cloneable_wrapper_service, + async_cloneable_wrapper_web_service +); +criterion_main!(service_benches);