Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 10 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
[package]
name = "kprf"
version = "0.1.0"
authors = ["Aleksandr Petrukhin <[email protected]>"]
authors = [
"Aleksandr Petrukhin <[email protected]>",
"Artyom Belyankin <[email protected]>",
]
edition = "2018"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
Expand All @@ -21,7 +24,12 @@ slog-term = "2.8.0"
prometheus = "0.12.0"
lazy_static = "1.4.0"
uuid = { version = "0.8", features = ["serde", "v4"] }
r2d2 = "0.8.9"
r2d2_kafka = { path = "r2d2_kafka" }

[features]
default = ["std"]
std = [ "serde" ]
std = [ "serde" ]

[workspace]
members = ["r2d2_kafka"]
Empty file modified Makefile
100644 → 100755
Empty file.
11 changes: 11 additions & 0 deletions r2d2_kafka/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[package]
name = "r2d2_kafka"
version = "0.1.0"
description = "Apache Kafka support for the r2d2 connection pool"
keywords = ["kafka", "apache-kafka", "apache", "pool", "r2d2"]
authors = ["Artyom Belyankin <[email protected]>"]
edition = "2018"

[dependencies]
r2d2 = "0.8.9"
rdkafka = { version = "0.25", features = ["cmake-build"] }
46 changes: 46 additions & 0 deletions r2d2_kafka/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
use rdkafka::config::FromClientConfig;
use rdkafka::error::{KafkaError, KafkaResult};
use rdkafka::producer::{FutureProducer, Producer};
use rdkafka::ClientConfig;
use std::time::Duration;

const DEFAULT_VALIDATION_DURATION: u64 = 100;

#[derive(Clone, Debug)]
pub struct KafkaConnectorManager {
config: ClientConfig,
validation_duration: Duration,
}

impl KafkaConnectorManager {
pub fn new(config: ClientConfig) -> KafkaConnectorManager {
let validation_duration = Duration::from_millis(DEFAULT_VALIDATION_DURATION);
KafkaConnectorManager {
config,
validation_duration,
}
}
}

impl r2d2::ManageConnection for KafkaConnectorManager {
type Connection = FutureProducer;
type Error = KafkaError;

fn connect(&self) -> KafkaResult<FutureProducer> {
FutureProducer::from_config(&self.config.clone())
}

fn is_valid(&self, conn: &mut Self::Connection) -> Result<(), Self::Error> {
match conn
.client()
.fetch_metadata(Some(""), self.validation_duration)
{
Ok(_) => Ok(()),
Err(e) => Err(e),
}
}

fn has_broken(&self, conn: &mut Self::Connection) -> bool {
self.is_valid(conn).is_err()
}
}
3 changes: 1 addition & 2 deletions src/http/handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ mod handler {
use crate::kafka::kafka::producer::Producer;
use crate::log::kflog;
use rdkafka::error::KafkaError;
use rdkafka::producer::future_producer::OwnedDeliveryResult;
use std::convert::Infallible;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
Expand Down Expand Up @@ -95,7 +94,7 @@ mod handler {
&record.topic,
&record.data,
record.key.as_ref(),
Duration::from_millis(100),
Duration::from_millis(300),
)
.await
})
Expand Down
98 changes: 60 additions & 38 deletions src/kafka/kafka.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,15 +201,19 @@ pub mod config {
}

pub mod producer {
use rdkafka::client::DefaultClientContext;
use rdkafka::config::FromClientConfig;
use r2d2::Pool;
use r2d2_kafka::KafkaConnectorManager;
use rdkafka::producer::future_producer::OwnedDeliveryResult;
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::producer::FutureRecord;
use std::sync::Arc;
use std::time::{Duration, SystemTime};

const DEFAULT_PRODUCERS_POOL_SIZE: u32 = 15;
const DEFAULT_PRODUCERS_CONNECTION_TIMEOUT: Duration = Duration::from_millis(100);
const DEFAULT_PRODUCERS_CONNECTION_LIFETIME: Option<Duration> = Some(Duration::from_secs(300));

pub struct Producer {
producer: FutureProducer<DefaultClientContext>,
pool: Arc<Pool<KafkaConnectorManager>>,
sent_messages_counter: prometheus::IntCounterVec,
queue_size_gauge: prometheus::IntGaugeVec,
error_counter: prometheus::IntCounterVec,
Expand Down Expand Up @@ -238,7 +242,20 @@ pub mod producer {
};

let start = SystemTime::now();
let result = self.producer.send(record, timeout).await;
let conn = self
.pool
.clone()
.get()
.map_err(|err| {
println!(
"kafka: get producer connection from pool error in line:{} ! error: {:?}",
line!(),
err
)
})
.unwrap();

let result = conn.send(record, timeout).await;
self.message_send_duration
.with_label_values(&[&topic])
.observe(
Expand All @@ -260,43 +277,48 @@ pub mod producer {
}

pub fn new(cfg: super::config::Config) -> Arc<Producer> {
let cf = cfg.to_hash();
let mut client_config = rdkafka::ClientConfig::new();
for (k, v) in cf.iter() {
for (k, v) in cfg.to_hash().iter() {
client_config.set(k, v);
}

let result = FutureProducer::from_config(&client_config);
match result {
Err(err) => panic!("Failed to create threaded producer: {}", err.to_string()),
Ok(producer) => Arc::new(Producer {
producer,
queue_size_gauge: prometheus::register_int_gauge_vec!(
"kafka_internal_queue_size",
"Kafka internal queue size",
&["topic"]
)
.unwrap(),
error_counter: prometheus::register_int_counter_vec!(
"kafka_errors_count",
"Kafka internal errors count",
&["topic", "error_code"]
)
let manager = KafkaConnectorManager::new(client_config);
let pool = Arc::new(
r2d2::Pool::builder()
.max_size(DEFAULT_PRODUCERS_POOL_SIZE)
.connection_timeout(DEFAULT_PRODUCERS_CONNECTION_TIMEOUT)
.max_lifetime(DEFAULT_PRODUCERS_CONNECTION_LIFETIME)
.build(manager)
.unwrap(),
sent_messages_counter: prometheus::register_int_counter_vec!(
"kafka_sent_messages",
"Kafka sent messages count",
&["topic"]
)
.unwrap(),
message_send_duration: prometheus::register_histogram_vec!(
"kafka_message_send_duration",
"Kafka message send duration",
&["topic"],
prometheus::exponential_buckets(5.0, 2.0, 5).unwrap()
)
.unwrap(),
}),
}
);

Arc::new(Producer {
pool,
queue_size_gauge: prometheus::register_int_gauge_vec!(
"kafka_internal_queue_size",
"Kafka internal queue size",
&["topic"]
)
.unwrap(),
error_counter: prometheus::register_int_counter_vec!(
"kafka_errors_count",
"Kafka internal errors count",
&["topic", "error_code"]
)
.unwrap(),
sent_messages_counter: prometheus::register_int_counter_vec!(
"kafka_sent_messages",
"Kafka sent messages count",
&["topic"]
)
.unwrap(),
message_send_duration: prometheus::register_histogram_vec!(
"kafka_message_send_duration",
"Kafka message send duration",
&["topic"],
prometheus::exponential_buckets(5.0, 2.0, 5).unwrap()
)
.unwrap(),
})
}
}
2 changes: 2 additions & 0 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ async fn main() {

let metrics_shutdown_rx = start_metrics_server(logger.clone(), shutdown_metrics_rx);

println!("Running http server");

// TODO(shmel1k): improve graceful shutdown behavior.
let main_server_shutdown_rx =
server.start_server(logger.clone(), kafka_producer.clone(), shutdown_rx);
Expand Down