Hello,
I was wondering if there are any suggestions on optimising the Tokio TcpListener section to better handle the number of conenctions that it holds.
As you can see below, the connection count is just rising, the requests are processed normally, however if I'm comparing this to nginx which was running previously it kept the connection count around 2k .
This is the listener:
async fn https_listener(
redis_connection: Option<redis::aio::ConnectionManager>,
settings: Arc<tokio::sync::RwLock<config::LolzaConfiguration>>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let conf = match config::load_configuration() {
Ok(json) => json,
Err(e) => {
panic!("problem parsing configuration: {}", e);
}
};
// configures the rustls accptor
let tls_sni_certificates = configure_tls_sni(conf.server.tls_certificates.unwrap()).unwrap();
// start the listener
let listen_port = format!(
"{}:{}",
conf.server.tls_ipaddress,
conf.server.tls_port.unwrap()
);
info!(target: "https_listener", "starting HTTPS listener: {}", listen_port);
let listener = TcpListener::bind(listen_port).await?;
loop {
let (tcp_stream, remote_addr) = listener.accept().await?;
// clone things we want to do
let tls_sni_certificates = tls_sni_certificates.clone();
let settings = settings.clone();
let redis_connection = redis_connection.clone();
tokio::spawn(async move {
let tls_stream = match tls_sni_certificates.accept(tcp_stream).await {
Ok(tls_stream) => tls_stream,
Err(err) => {
error!(
target: "tls_acceptor",
"Failed to perform TLS handshake for {}: {:#?}",
remote_addr, err
);
return;
}
};
let response_service = service_fn(|req: Request<Incoming>| {
let settings = settings.clone();
let redis_connection = redis_connection.clone();
async move {
service::http_service_2(
req,
&settings,
redis_connection,
remote_addr,
)
.await;
}
});
// Start serving
if let Err(err) = Builder::new(TokioExecutor::new())
.serve_connection(TokioIo::new(tls_stream), response_service)
.await
{
error!(target: "https_listener", "TCP listener: {:?}", err)
}
drop(settings);
});
}
}