I'm working on high-performance data indexing and I'm trying to optimize my block fetching and parsing pipeline. I'm currently comparing my implementation against a similar crate which can process blocks in ~250ms+, while my implementation is significantly slower.
use std::time::Duration;
use async_trait::async_trait;
use hyper::header::HeaderValue;
use hyper::{Body, Client, Method, Request};
use hyper_tls::HttpsConnector;
use crate::hyper_rpc::Transport;
use crate::RpcError;
#[derive(Debug)]
pub struct HyperTransport {
client: Client<HttpsConnector<hyper::client::HttpConnector>>,
url: String,
}
const DURATION_60: Duration = std::time::Duration::from_secs(60);
const CONTENT_TYPE_JSON: HeaderValue = HeaderValue::from_static("application/json");
impl HyperTransport {
pub fn new(url: String) -> Self {
let https = HttpsConnector::new();
// Build the hyper client
let client = Client::builder()
.pool_idle_timeout(DURATION_60)
.pool_max_idle_per_host(32)
.build::<_, Body>(https);
Self { client, url }
}
}
#[async_trait]
impl Transport for HyperTransport {
async fn hyper_execute(&self, request: String) -> Result<String, RpcError> {
let req = Request::builder()
.method(Method::POST)
.uri(self.url.as_str())
.header("Content-Type", CONTENT_TYPE_JSON)
.body(hyper::Body::from(request))
.map_err(|e| RpcError::Transport(format!("Failed to build request: {}", e)))?;
let response = self
.client
.request(req)
.await
.map_err(|e| RpcError::Transport(format!("Request failed: {}", e)))?;
let body_bytes = hyper::body::to_bytes(response.into_body())
.await
.map_err(|e| RpcError::Transport(format!("Failed to read response body: {}", e)))?;
String::from_utf8(body_bytes.to_vec())
.map_err(|e| RpcError::Response(format!("Invalid UTF-8 in response: {}", e)))
}
async fn hyper_execute_raw(&self, request: String) -> Result<Vec<u8>, RpcError> {
let req = Request::builder()
.method(Method::POST)
.uri(self.url.as_str())
.header("Content-Type", CONTENT_TYPE_JSON)
.body(hyper::Body::from(request))
.map_err(|e| RpcError::Transport(format!("Failed to build request: {}", e)))?;
let response = self
.client
.request(req)
.await
.map_err(|e| RpcError::Transport(format!("Request failed: {}", e)))?;
let body_bytes = hyper::body::to_bytes(response.into_body())
.await
.map_err(|e| RpcError::Transport(format!("Failed to read response body: {}", e)))?;
// For small responses, preallocate the exact size
let mut result = Vec::with_capacity(body_bytes.len());
result.extend_from_slice(&body_bytes);
Ok(result)
}
}
I've built a fetching system using Hyper HTTP client that calls a RPC endpoint and to avoid serde overhead I simply parse my data raw but I'm new to networking and when benchmarking well parsing was 20ms, no matter how I optimize my parsing networking will eat majority of the time