mirror of https://github.com/fafhrd91/actix-net
get rtless working with manual localset
This commit is contained in:
parent
81d7295486
commit
c56265d9aa
|
@ -240,6 +240,15 @@ impl Arbiter {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Try to get current running arbiter handle.
|
||||||
|
///
|
||||||
|
/// Returns `None` if no Arbiter has been started.
|
||||||
|
///
|
||||||
|
/// Contrary to [`current`](Self::current), this never panics.
|
||||||
|
pub fn try_current() -> Option<ArbiterHandle> {
|
||||||
|
HANDLE.with(|cell| cell.borrow().clone())
|
||||||
|
}
|
||||||
|
|
||||||
/// Stop Arbiter from continuing it's event loop.
|
/// Stop Arbiter from continuing it's event loop.
|
||||||
///
|
///
|
||||||
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
|
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
|
||||||
|
|
|
@ -130,7 +130,7 @@ impl System {
|
||||||
///
|
///
|
||||||
/// Returns `None` if no System has been started.
|
/// Returns `None` if no System has been started.
|
||||||
///
|
///
|
||||||
/// Contrary to `current`, this never panics.
|
/// Contrary to [`current`](Self::current), this never panics.
|
||||||
pub fn try_current() -> Option<System> {
|
pub fn try_current() -> Option<System> {
|
||||||
CURRENT.with(|cell| cell.borrow().clone())
|
CURRENT.with(|cell| cell.borrow().clone())
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
//! the length of each line it echos and the total size of data sent when the connection is closed.
|
//! the length of each line it echos and the total size of data sent when the connection is closed.
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
env, io,
|
io,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{AtomicUsize, Ordering},
|
atomic::{AtomicUsize, Ordering},
|
||||||
Arc,
|
Arc,
|
||||||
|
@ -25,10 +25,8 @@ use futures_util::future::ok;
|
||||||
use log::{error, info};
|
use log::{error, info};
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
|
||||||
#[actix_rt::main]
|
async fn run() -> io::Result<()> {
|
||||||
async fn main() -> io::Result<()> {
|
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||||
env::set_var("RUST_LOG", "info");
|
|
||||||
env_logger::init();
|
|
||||||
|
|
||||||
let count = Arc::new(AtomicUsize::new(0));
|
let count = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
@ -85,6 +83,25 @@ async fn main() -> io::Result<()> {
|
||||||
})
|
})
|
||||||
})?
|
})?
|
||||||
.workers(1)
|
.workers(1)
|
||||||
|
// .system_exit()
|
||||||
.run()
|
.run()
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn main() -> io::Result<()> {
|
||||||
|
let rt = tokio::runtime::Builder::new_current_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let ls = tokio::task::LocalSet::new();
|
||||||
|
rt.block_on(ls.run_until(run()))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// #[actix_rt::main]
|
||||||
|
// async fn main() -> io::Result<()> {
|
||||||
|
// run().await?;
|
||||||
|
// Ok(())
|
||||||
|
// }
|
||||||
|
|
|
@ -156,13 +156,17 @@ impl Accept {
|
||||||
srv: ServerHandle,
|
srv: ServerHandle,
|
||||||
handles: Vec<WorkerHandleAccept>,
|
handles: Vec<WorkerHandleAccept>,
|
||||||
) {
|
) {
|
||||||
// Accept runs in its own thread and would want to spawn additional futures to current
|
// Accept runs in its own thread and might spawn additional futures to current system
|
||||||
// actix system.
|
let sys = System::try_current();
|
||||||
let sys = System::current();
|
|
||||||
thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name("actix-server accept loop".to_owned())
|
.name("actix-server accept loop".to_owned())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
|
// forward existing actix system context
|
||||||
|
if let Some(sys) = sys {
|
||||||
System::set_current(sys);
|
System::set_current(sys);
|
||||||
|
}
|
||||||
|
|
||||||
let (mut accept, mut sockets) =
|
let (mut accept, mut sockets) =
|
||||||
Accept::new_with_sockets(poll, waker, socks, handles, srv);
|
Accept::new_with_sockets(poll, waker, socks, handles, srv);
|
||||||
|
|
||||||
|
@ -479,10 +483,23 @@ impl Accept {
|
||||||
|
|
||||||
// after the sleep a Timer interest is sent to Accept Poll
|
// after the sleep a Timer interest is sent to Accept Poll
|
||||||
let waker = self.waker.clone();
|
let waker = self.waker.clone();
|
||||||
System::current().arbiter().spawn(async move {
|
|
||||||
|
match System::try_current() {
|
||||||
|
Some(sys) => {
|
||||||
|
sys.arbiter().spawn(async move {
|
||||||
sleep(Duration::from_millis(510)).await;
|
sleep(Duration::from_millis(510)).await;
|
||||||
waker.wake(WakerInterest::Timer);
|
waker.wake(WakerInterest::Timer);
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
None => {
|
||||||
|
let rt = tokio::runtime::Handle::current();
|
||||||
|
rt.spawn(async move {
|
||||||
|
sleep(Duration::from_millis(510)).await;
|
||||||
|
waker.wake(WakerInterest::Timer);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
|
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
|
||||||
use log::{error, info};
|
use log::{error, info, trace};
|
||||||
use tokio::sync::{
|
use tokio::sync::{
|
||||||
mpsc::{unbounded_channel, UnboundedReceiver},
|
mpsc::{unbounded_channel, UnboundedReceiver},
|
||||||
oneshot,
|
oneshot,
|
||||||
|
@ -160,6 +160,8 @@ impl ServerBuilder {
|
||||||
{
|
{
|
||||||
let sockets = bind_addr(addr, self.backlog)?;
|
let sockets = bind_addr(addr, self.backlog)?;
|
||||||
|
|
||||||
|
trace!("binding server to: {:?}", &sockets);
|
||||||
|
|
||||||
for lst in sockets {
|
for lst in sockets {
|
||||||
let token = self.next_token();
|
let token = self.next_token();
|
||||||
self.services.push(StreamNewService::create(
|
self.services.push(StreamNewService::create(
|
||||||
|
@ -171,6 +173,7 @@ impl ServerBuilder {
|
||||||
self.sockets
|
self.sockets
|
||||||
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
|
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,6 +258,8 @@ impl ServerBuilder {
|
||||||
if self.sockets.is_empty() {
|
if self.sockets.is_empty() {
|
||||||
panic!("Server should have at least one bound socket");
|
panic!("Server should have at least one bound socket");
|
||||||
} else {
|
} else {
|
||||||
|
trace!("start running server");
|
||||||
|
|
||||||
for (_, name, lst) in &self.sockets {
|
for (_, name, lst) in &self.sockets {
|
||||||
info!(
|
info!(
|
||||||
r#"Starting service: "{}", workers: {}, listening on: {}"#,
|
r#"Starting service: "{}", workers: {}, listening on: {}"#,
|
||||||
|
@ -264,6 +269,8 @@ impl ServerBuilder {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace!("run server");
|
||||||
|
|
||||||
// start workers
|
// start workers
|
||||||
let handles = (0..self.threads)
|
let handles = (0..self.threads)
|
||||||
.map(|idx| {
|
.map(|idx| {
|
||||||
|
@ -301,8 +308,8 @@ impl ServerBuilder {
|
||||||
idx: usize,
|
idx: usize,
|
||||||
waker_queue: WakerQueue,
|
waker_queue: WakerQueue,
|
||||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||||
|
trace!("start server worker {}", idx);
|
||||||
let services = self.services.iter().map(|v| v.clone_factory()).collect();
|
let services = self.services.iter().map(|v| v.clone_factory()).collect();
|
||||||
|
|
||||||
ServerWorker::start(idx, services, waker_queue, self.worker_config)
|
ServerWorker::start(idx, services, waker_queue, self.worker_config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,7 +391,7 @@ impl ServerBuilder {
|
||||||
|
|
||||||
if exit {
|
if exit {
|
||||||
sleep(Duration::from_millis(300)).await;
|
sleep(Duration::from_millis(300)).await;
|
||||||
System::current().stop();
|
System::try_current().as_ref().map(System::stop);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ use std::{
|
||||||
use actix_rt::{
|
use actix_rt::{
|
||||||
spawn,
|
spawn,
|
||||||
time::{sleep, Instant, Sleep},
|
time::{sleep, Instant, Sleep},
|
||||||
Arbiter,
|
Arbiter, ArbiterHandle, System,
|
||||||
};
|
};
|
||||||
use futures_core::{future::LocalBoxFuture, ready};
|
use futures_core::{future::LocalBoxFuture, ready};
|
||||||
use log::{error, info, trace};
|
use log::{error, info, trace};
|
||||||
|
@ -23,12 +23,14 @@ use tokio::sync::{
|
||||||
oneshot,
|
oneshot,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::join_all;
|
use crate::{
|
||||||
use crate::service::{BoxedServerService, InternalServiceFactory};
|
join_all,
|
||||||
use crate::socket::MioStream;
|
service::{BoxedServerService, InternalServiceFactory},
|
||||||
use crate::waker_queue::{WakerInterest, WakerQueue};
|
socket::MioStream,
|
||||||
|
waker_queue::{WakerInterest, WakerQueue},
|
||||||
|
};
|
||||||
|
|
||||||
/// Stop worker message. Returns `true` on successful graceful shutdown.
|
/// Stop worker message. Returns `true` on successful graceful shutdown
|
||||||
/// and `false` if some connections still alive when shutdown execute.
|
/// and `false` if some connections still alive when shutdown execute.
|
||||||
pub(crate) struct Stop {
|
pub(crate) struct Stop {
|
||||||
graceful: bool,
|
graceful: bool,
|
||||||
|
@ -273,6 +275,8 @@ impl ServerWorker {
|
||||||
waker_queue: WakerQueue,
|
waker_queue: WakerQueue,
|
||||||
config: ServerWorkerConfig,
|
config: ServerWorkerConfig,
|
||||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||||
|
trace!("starting server worker {}", idx);
|
||||||
|
|
||||||
let (tx1, rx) = unbounded_channel();
|
let (tx1, rx) = unbounded_channel();
|
||||||
let (tx2, rx2) = unbounded_channel();
|
let (tx2, rx2) = unbounded_channel();
|
||||||
|
|
||||||
|
@ -289,16 +293,24 @@ impl ServerWorker {
|
||||||
Arbiter::new()
|
Arbiter::new()
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
// get actix system context if it is set
|
||||||
let arbiter = Arbiter::with_tokio_rt(move || {
|
let sys = System::try_current();
|
||||||
tokio::runtime::Builder::new_current_thread()
|
|
||||||
|
std::thread::Builder::new()
|
||||||
|
.name("eofibef".to_owned())
|
||||||
|
.spawn(move || {
|
||||||
|
// forward existing actix system context
|
||||||
|
if let Some(sys) = sys {
|
||||||
|
System::set_current(sys);
|
||||||
|
}
|
||||||
|
|
||||||
|
let rt = tokio::runtime::Builder::new_current_thread()
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.max_blocking_threads(config.max_blocking_threads)
|
.max_blocking_threads(config.max_blocking_threads)
|
||||||
.build()
|
.build()
|
||||||
.unwrap()
|
.unwrap();
|
||||||
});
|
|
||||||
|
|
||||||
arbiter.spawn(async move {
|
rt.block_on(tokio::task::LocalSet::new().run_until(async move {
|
||||||
let fut = factories
|
let fut = factories
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
|
@ -329,7 +341,7 @@ impl ServerWorker {
|
||||||
.into_boxed_slice(),
|
.into_boxed_slice(),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Can not start worker: {:?}", e);
|
error!("Can not start worker: {:?}", e);
|
||||||
Arbiter::current().stop();
|
Arbiter::try_current().as_ref().map(ArbiterHandle::stop);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -343,9 +355,15 @@ impl ServerWorker {
|
||||||
factories: factories.into_boxed_slice(),
|
factories: factories.into_boxed_slice(),
|
||||||
state: Default::default(),
|
state: Default::default(),
|
||||||
shutdown_timeout: config.shutdown_timeout,
|
shutdown_timeout: config.shutdown_timeout,
|
||||||
});
|
})
|
||||||
});
|
.await
|
||||||
});
|
.expect("task 3 panic");
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("task 2 panic");
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
.expect("worker thread error/panic");
|
||||||
|
|
||||||
handle_pair(idx, tx1, tx2, counter)
|
handle_pair(idx, tx1, tx2, counter)
|
||||||
}
|
}
|
||||||
|
@ -450,8 +468,9 @@ impl Default for WorkerState {
|
||||||
|
|
||||||
impl Drop for ServerWorker {
|
impl Drop for ServerWorker {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
|
trace!("dropping ServerWorker");
|
||||||
// Stop the Arbiter ServerWorker runs on on drop.
|
// Stop the Arbiter ServerWorker runs on on drop.
|
||||||
Arbiter::current().stop();
|
Arbiter::try_current().as_ref().map(ArbiterHandle::stop);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue