Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support redis max-attempts #99

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 62 additions & 31 deletions omniqueue/src/backends/redis/fallback.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,24 @@ use svix_ksuid::{KsuidLike as _, KsuidMs};
use time::OffsetDateTime;
use tracing::{error, trace};

use super::{from_key, to_key, RawPayload, RedisConnection, RedisConsumer, RedisProducer};
use super::{InternalPayload, RawPayload, RedisConnection, RedisConsumer, RedisProducer};
use crate::{queue::Acker, Delivery, QueueError, Result};

pub(super) async fn send_raw<R: RedisConnection>(
producer: &RedisProducer<R>,
payload: &[u8],
) -> Result<()> {
let payload = InternalPayload {
payload: payload.to_vec(),
num_receives: 0,
};

producer
.redis
.get()
.await
.map_err(QueueError::generic)?
.lpush(&producer.queue_key, to_key(payload))
.lpush(&producer.queue_key, payload.into_list_payload())
.await
.map_err(QueueError::generic)
}
Expand All @@ -45,7 +50,7 @@ async fn receive_with_timeout<R: RedisConnection>(
consumer: &RedisConsumer<R>,
timeout: Duration,
) -> Result<Option<Delivery>> {
let key: Option<Vec<u8>> = consumer
let payload: Option<Vec<u8>> = consumer
.redis
.get()
.await
Expand All @@ -61,29 +66,31 @@ async fn receive_with_timeout<R: RedisConnection>(
.await
.map_err(QueueError::generic)?;

key.map(|key| make_delivery(consumer, &key)).transpose()
payload
.and_then(|payload| {
if let Ok(new_payload) = InternalPayload::from_list_item(&payload) {
Some((payload, new_payload))
} else {
None
}
})
.map(|(old_payload, payload)| payload.into_fallback_delivery(consumer, &old_payload))
.transpose()
}

fn make_delivery<R: RedisConnection>(consumer: &RedisConsumer<R>, key: &[u8]) -> Result<Delivery> {
let (_, payload) = from_key(key)?;

Ok(Delivery::new(
payload.to_owned(),
RedisFallbackAcker {
redis: consumer.redis.clone(),
processing_queue_key: consumer.processing_queue_key.clone(),
key: key.to_owned(),
already_acked_or_nacked: false,
},
))
}
pub(super) struct RedisFallbackAcker<M: ManageConnection> {
pub(super) redis: bb8::Pool<M>,
pub(super) processing_queue_key: String,
// We delete based on the payload -- and since the
// `num_receives` changes after receiving it's the
// `old_payload`, since `num_receives` is part of the
// payload. Make sense?
pub(super) old_payload: RawPayload,

struct RedisFallbackAcker<M: ManageConnection> {
redis: bb8::Pool<M>,
processing_queue_key: String,
key: RawPayload,
pub(super) already_acked_or_nacked: bool,

already_acked_or_nacked: bool,
pub(super) max_receives: usize,
pub(super) num_receives: usize,
}

impl<R: RedisConnection> Acker for RedisFallbackAcker<R> {
Expand All @@ -97,7 +104,7 @@ impl<R: RedisConnection> Acker for RedisFallbackAcker<R> {
.get()
.await
.map_err(QueueError::generic)?
.lrem(&self.processing_queue_key, 1, &self.key)
.lrem(&self.processing_queue_key, 1, &self.old_payload)
.await
.map_err(QueueError::generic)?;

Expand All @@ -107,6 +114,11 @@ impl<R: RedisConnection> Acker for RedisFallbackAcker<R> {
}

async fn nack(&mut self) -> Result<()> {
if self.num_receives >= self.max_receives {
trace!("Maximum attempts reached");
return self.ack().await;
}

if self.already_acked_or_nacked {
return Err(QueueError::CannotAckOrNackTwice);
}
Expand Down Expand Up @@ -144,13 +156,19 @@ pub(super) async fn background_task_processing<R: RedisConnection>(
queue_key: String,
processing_queue_key: String,
ack_deadline_ms: i64,
max_receives: usize,
) -> Result<()> {
// FIXME: ack_deadline_ms should be unsigned
let ack_deadline = Duration::from_millis(ack_deadline_ms as _);
loop {
if let Err(err) =
reenqueue_timed_out_messages(&pool, &queue_key, &processing_queue_key, ack_deadline)
.await
if let Err(err) = reenqueue_timed_out_messages(
&pool,
&queue_key,
&processing_queue_key,
ack_deadline,
max_receives,
)
.await
{
error!("{err}");
tokio::time::sleep(Duration::from_millis(500)).await;
Expand All @@ -164,6 +182,7 @@ async fn reenqueue_timed_out_messages<R: RedisConnection>(
queue_key: &str,
processing_queue_key: &str,
ack_deadline: Duration,
max_receives: usize,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
const BATCH_SIZE: isize = 50;

Expand All @@ -180,10 +199,23 @@ async fn reenqueue_timed_out_messages<R: RedisConnection>(
let keys: Vec<RawPayload> = conn.lrange(processing_queue_key, 0, BATCH_SIZE).await?;
for key in keys {
if key <= validity_limit {
let payload = InternalPayload::from_list_item(&key)?;
let num_receives = payload.num_receives;
let refreshed_key = payload.into_list_payload();
if num_receives >= max_receives {
trace!(
num_receives = num_receives,
"Maximum attempts reached for message, not reenqueuing",
);
} else {
trace!(
num_receives = num_receives,
"Pushing back overdue task to queue"
);
let _: () = conn.rpush(queue_key, &refreshed_key).await?;
}

// We use LREM to be sure we only delete the keys we should be deleting
trace!("Pushing back overdue task to queue");
let refreshed_key = regenerate_key(&key)?;
let _: () = conn.rpush(queue_key, &refreshed_key).await?;
let _: () = conn.lrem(processing_queue_key, 1, &key).await?;
}
}
Expand All @@ -196,6 +228,5 @@ async fn reenqueue_timed_out_messages<R: RedisConnection>(
}

fn regenerate_key(key: &[u8]) -> Result<RawPayload> {
let (_, payload) = from_key(key)?;
Ok(to_key(payload))
Ok(InternalPayload::from_list_item(key)?.into_list_payload())
}
Loading
Loading