Queues vs Event Streaming — Deliver Once vs Replay Forever
Queues: message consumed and deleted. Streaming: message retained in a log, consumers replay from any offset.
When to use
- Queues (SQS, RabbitMQ): task distribution — one worker processes each message exactly once; competing consumers
- Streaming (Kafka, Kinesis): multiple independent consumers need the same events; replay required; ordering within a partition matters
Tradeoffs
- Streaming needs more storage and operational complexity
- Queues lose message history after consumption
- Go
- Python
// Queue consumer (SQS): delete after processing
func processQueue(ctx context.Context, svc *sqs.Client, queueURL string) {
out, _ := svc.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{QueueUrl: &queueURL})
for _, msg := range out.Messages {
if err := handle(*msg.Body); err == nil {
svc.DeleteMessage(ctx, &sqs.DeleteMessageInput{
QueueUrl: &queueURL,
ReceiptHandle: msg.ReceiptHandle,
})
}
}
}
// Stream consumer (Kafka): commit offset, replay any time
func processStream(reader *kafka.Reader) {
for {
msg, _ := reader.FetchMessage(context.Background())
handle(string(msg.Value))
reader.CommitMessages(context.Background(), msg) // offset committed, not deleted
}
}
# Queue consumer (SQS): delete after processing
def process_queue(sqs, queue_url: str) -> None:
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10)
for msg in resp.get("Messages", []):
if handle(msg["Body"]):
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=msg["ReceiptHandle"]
)
# Stream consumer (Kafka): commit offset, replay any time
def process_stream(consumer) -> None:
for msg in consumer:
handle(msg.value)
consumer.commit() # offset committed, not deleted — can replay by resetting offset
Gotcha: Kafka is NOT a queue. If you're deleting messages after consumption and only need one consumer, you're fighting the tool — use SQS instead.