Skip to content

producer: avoid lookup every time #28

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
160 changes: 98 additions & 62 deletions src/hstreamdb/src/producer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ pub struct Producer {
tasks: Vec<JoinHandle<()>>,
shard_buffer: HashMap<ShardId, Vec<Record>>,
shard_buffer_state: HashMap<ShardId, BufferState>,
shard_urls: HashMap<ShardId, String>,
request_receiver: tokio::sync::mpsc::UnboundedReceiver<Request>,
channels: Channels,
url_scheme: String,
Expand Down Expand Up @@ -84,6 +85,7 @@ impl Producer {
tasks: Vec::new(),
shard_buffer: HashMap::new(),
shard_buffer_state: HashMap::new(),
shard_urls: HashMap::new(),
request_receiver,
channels,
url_scheme,
Expand All @@ -98,50 +100,94 @@ impl Producer {
pub async fn start(&mut self) {
while let Some(Request(record)) = self.request_receiver.recv().await {
let partition_key = record.partition_key.clone();
match partition_key_to_shard_id(&self.shards, partition_key) {
match partition_key_to_shard_id(&self.shards, partition_key.clone()) {
Err(err) => {
log::error!("get shard id by partition key error: {:?}", err)
log::error!(
"get shard id by partition key error: partition_key = {partition_key}, {err}"
)
}
Ok(shard_id) => match self.shard_buffer.get_mut(&shard_id) {
None => {
let mut buffer_state: BufferState = default();
buffer_state.modify(&record);
self.shard_buffer_state.insert(shard_id, buffer_state);
self.shard_buffer.insert(shard_id, vec![record]);
}
Some(buffer) => {
let buffer_state = self.shard_buffer_state.get_mut(&shard_id).unwrap();
buffer_state.modify(&record);
buffer.push(record);
if buffer_state.check(&self.flush_settings) {
let buffer = clear_shard_buffer(&mut self.shard_buffer, shard_id);
self.shard_buffer_state.insert(shard_id, default());
let task = tokio::spawn(flush_(
self.channels.clone(),
self.url_scheme.clone(),
self.stream_name.clone(),
shard_id,
self.compression_type,
buffer,
));
self.tasks.push(task);
Ok(shard_id) => {
let shard_url = self.shard_urls.get(&shard_id);
let shard_url_is_none = shard_url.is_none();
match lookup_shard(
&mut self.channels.channel().await,
&self.url_scheme,
shard_id,
shard_url,
)
.await
{
Err(err) => {
log::error!("lookup shard error: shard_id = {shard_id}, {err}")
}
Ok(shard_url) => {
if shard_url_is_none {
self.shard_urls.insert(shard_id, shard_url.clone());
};
match self.shard_buffer.get_mut(&shard_id) {
None => {
let mut buffer_state: BufferState = default();
buffer_state.modify(&record);
self.shard_buffer_state.insert(shard_id, buffer_state);
self.shard_buffer.insert(shard_id, vec![record]);
}
Some(buffer) => {
let buffer_state =
self.shard_buffer_state.get_mut(&shard_id).unwrap();
buffer_state.modify(&record);
buffer.push(record);
if buffer_state.check(&self.flush_settings) {
let buffer =
clear_shard_buffer(&mut self.shard_buffer, shard_id);
self.shard_buffer_state.insert(shard_id, default());
let task = tokio::spawn(flush_(
self.channels.clone(),
self.stream_name.clone(),
shard_id,
shard_url,
self.compression_type,
buffer,
));
self.tasks.push(task);
}
}
}
}
}
},
}
}
}

let mut shard_buffer = mem::take(&mut self.shard_buffer);
for (shard_id, buffer) in shard_buffer.iter_mut() {
let task = tokio::spawn(flush_(
self.channels.clone(),
self.url_scheme.clone(),
self.stream_name.clone(),
let shard_url = self.shard_urls.get(shard_id);
let shard_url_is_none = shard_url.is_none();
match lookup_shard(
&mut self.channels.channel().await,
&self.url_scheme,
*shard_id,
self.compression_type,
mem::take(buffer),
));
self.tasks.push(task);
shard_url,
)
.await
{
Err(err) => {
log::error!("lookup shard error: shard_id = {shard_id}, {err}")
}
Ok(shard_url) => {
if shard_url_is_none {
self.shard_urls.insert(*shard_id, shard_url.clone());
};
let task = tokio::spawn(flush_(
self.channels.clone(),
self.stream_name.clone(),
*shard_id,
shard_url,
self.compression_type,
mem::take(buffer),
));
self.tasks.push(task);
}
}
}

let tasks = std::mem::take(&mut self.tasks);
Expand All @@ -155,56 +201,46 @@ impl Producer {

async fn flush(
channels: Channels,
url_scheme: String,
stream_name: String,
shard_id: ShardId,
shard_url: String,
compression_type: CompressionType,
buffer: Vec<Record>,
) -> Result<(), String> {
if !buffer.is_empty() {
match lookup_shard(&mut channels.channel().await, &url_scheme, shard_id).await {
Err(err) => {
log::warn!("{err}");
Ok(())
}
Ok(server_node) => {
let channel = channels
.channel_at(server_node.clone())
.await
.map_err(|err| {
format!("producer connect error: url = {server_node}, {err:?}")
})?;
append(
channel,
stream_name,
shard_id,
compression_type,
buffer.to_vec(),
)
.await
.map_err(|err| format!("producer append error: addr = {server_node}, {err:?}"))
.map(|x| log::debug!("append succeed: len = {}", x.len()))?;
Ok(())
}
}
let channel = channels
.channel_at(shard_url.clone())
.await
.map_err(|err| format!("producer connect error: url = {shard_url}, {err}"))?;
append(
channel,
stream_name,
shard_id,
compression_type,
buffer.to_vec(),
)
.await
.map_err(|err| format!("producer append error: url = {shard_url}, {err}"))
.map(|x| log::debug!("append succeed: len = {}", x.len()))?;
Ok(())
} else {
Ok(())
}
}

async fn flush_(
channels: Channels,
url_scheme: String,
stream_name: String,
shard_id: ShardId,
shard_url: String,
compression_type: CompressionType,
buffer: Vec<Record>,
) {
flush(
channels,
url_scheme,
stream_name,
shard_id,
shard_url,
compression_type,
buffer,
)
Expand Down
29 changes: 16 additions & 13 deletions src/hstreamdb/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use num_traits::Num;
use tonic::transport::Channel;

use crate::common::{self, PartitionKey, Record, ShardId};
use crate::format_url;
use crate::{format_url, Error};

pub fn record_id_to_string(record_id: &RecordId) -> String {
format!(
Expand All @@ -22,18 +22,21 @@ pub async fn lookup_shard(
channel: &mut HStreamApiClient<Channel>,
url_scheme: &str,
shard_id: ShardId,
) -> Result<String, String> {
let server_node = channel
.lookup_shard(LookupShardRequest { shard_id })
.await
.map_err(|err| format!("lookup shard error: shard_id = {shard_id}, {err}"))?
.into_inner()
.server_node
.ok_or_else(|| {
format!("lookup shard error: shard_id = {shard_id}, failed to unwrap `server_node`")
})?;
let server_node = format_url!(url_scheme, server_node.host, server_node.port);
Ok(server_node)
shard_url: Option<&String>,
) -> common::Result<String> {
match shard_url {
Some(url) => Ok(url.to_string()),
None => {
let server_node = channel
.lookup_shard(LookupShardRequest { shard_id })
.await?
.into_inner()
.server_node
.ok_or_else(|| Error::PBUnwrapError("server_node".to_string()))?;
let server_node = format_url!(url_scheme, server_node.host, server_node.port);
Ok(server_node)
}
}
}

pub fn clear_shard_buffer(
Expand Down
57 changes: 44 additions & 13 deletions src/x/hstreamdb-erl-nifs/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,29 @@
use hstreamdb::client::Client;
use hstreamdb::producer::FlushSettings;
use hstreamdb::{CompressionType, Record, Stream};
use rustler::types::atom::ok;
use rustler::{resource, Atom, Env, ResourceArc, Term};
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};

mod runtime;

rustler::atoms! {
none, gzip, zstd
none, gzip, zstd,
len, size
}

rustler::init!(
"hstreamdb",
[create_stream, start_producer, append],
load = load
);

#[derive(Clone)]
pub struct NifAppender(UnboundedSender<Record>);

fn load(env: Env, _: Term) -> bool {
resource!(NifAppender, env);
env_logger::init();
true
}

Expand All @@ -25,7 +34,7 @@ pub fn create_stream(
replication_factor: u32,
backlog_duration: u32,
shard_count: u32,
) {
) -> Atom {
let future = async move {
let mut client = Client::new(url).await.unwrap();
client
Expand All @@ -38,20 +47,21 @@ pub fn create_stream(
.await
.unwrap()
};
_ = runtime::spawn(future)
_ = runtime::spawn(future);
ok()
}

#[rustler::nif]
pub fn start_producer(
url: String,
stream_name: String,
compression_type: Atom,
flush_settings: Term,
) -> ResourceArc<NifAppender> {
let (request_sender, request_receiver) = unbounded_channel::<Record>();
let compression_type = atom_to_compression_type(compression_type);
let flush_settings = new_flush_settings(flush_settings);
let future = async move {
let compression_type = atom_to_compression_type(compression_type);
let flush_settings = FlushSettings { len: 0, size: 0 };

let mut client = Client::new(url).await.unwrap();
let (appender, mut producer) = client
.new_producer(stream_name, compression_type, flush_settings)
Expand All @@ -72,16 +82,17 @@ pub fn start_producer(
}

#[rustler::nif]
fn append(producer: ResourceArc<NifAppender>, partition_key: String, raw_payload: String) {
fn append(producer: ResourceArc<NifAppender>, partition_key: String, raw_payload: String) -> Atom {
let record = Record {
partition_key,
payload: hstreamdb::Payload::RawRecord(raw_payload.into_bytes()),
};
let producer = &producer.0;
producer.send(record).unwrap();
ok()
}

pub fn atom_to_compression_type(compression_type: Atom) -> CompressionType {
fn atom_to_compression_type(compression_type: Atom) -> CompressionType {
if compression_type == none() {
CompressionType::None
} else if compression_type == gzip() {
Expand All @@ -93,8 +104,28 @@ pub fn atom_to_compression_type(compression_type: Atom) -> CompressionType {
}
}

rustler::init!(
"hstreamdb",
[create_stream, start_producer, append],
load = load
);
fn new_flush_settings(proplists: Term) -> FlushSettings {
let proplists = proplists.into_list_iterator().unwrap();
let mut len_v = usize::MAX;
let mut size_v = usize::MAX;

for x in proplists {
if x.is_tuple() {
let (k, v): (Atom, usize) = x.decode().unwrap();
if k == len() {
len_v = v;
} else if k == size() {
size_v = v;
}
}
}

if len_v == usize::MAX && size_v == usize::MAX {
len_v = 0;
size_v = 0;
}
FlushSettings {
len: len_v,
size: size_v,
}
}
Loading