Skip to content

Commit

Permalink
test application
Browse files Browse the repository at this point in the history
  • Loading branch information
Tevinthuku committed Apr 23, 2024
1 parent 53bd46b commit 3fdc116
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 59 deletions.
20 changes: 19 additions & 1 deletion ratelimiter/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,24 @@
## Running the dragonfly backed sliding window rate limiter
## How to run the 2 servers plus a rate limiter:

### Running the dragonfly backed sliding window rate limiter

```bash
docker-compose up -d
```

### Run first server

```bash
cargo run --bin server_1
```

### Run second server

```
cargo run --bin server_2
```


- Using postman or Insomnia, call this endpoint until it returns 429 (Automate the requests sent (60 to be exact)) `localhost:8080/limited`

- Then call this endpoint `localhost:8081/limited` and you will still get `429`
22 changes: 22 additions & 0 deletions ratelimiter/src/bin/server_1.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
use actix_web::{web, App, HttpServer};
use anyhow::Context;
use ratelimiter::{
limited, rate_limiters::sliding_window_counter::distributed::DistributedSlidingWindowCounter,
AppStateWithIpRateLimiter,
};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
let rate_limiter = DistributedSlidingWindowCounter::new().await?;
HttpServer::new(move || {
let rate_limiter = AppStateWithIpRateLimiter::new(rate_limiter.clone());
App::new()
.app_data(web::Data::new(rate_limiter))
.service(limited)
})
.bind(("127.0.0.1", 8080))
.context("Failed to bind to port")?
.run()
.await
.context("Failed to run the server")
}
22 changes: 22 additions & 0 deletions ratelimiter/src/bin/server_2.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
use actix_web::{web, App, HttpServer};
use anyhow::Context;
use ratelimiter::{
limited, rate_limiters::sliding_window_counter::distributed::DistributedSlidingWindowCounter,
AppStateWithIpRateLimiter,
};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
let rate_limiter = DistributedSlidingWindowCounter::new().await?;
HttpServer::new(move || {
let rate_limiter = AppStateWithIpRateLimiter::new(rate_limiter.clone());
App::new()
.app_data(web::Data::new(rate_limiter))
.service(limited)
})
.bind(("127.0.0.1", 8081))
.context("Failed to bind to port")?
.run()
.await
.context("Failed to run the server")
}
33 changes: 33 additions & 0 deletions ratelimiter/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1 +1,34 @@
use actix_web::{get, web, HttpResponse, Responder};
use rate_limiters::sliding_window_counter::distributed::DistributedSlidingWindowCounter;
use tokio::sync::Mutex;

pub mod rate_limiters;

#[get("/limited")]
async fn limited(data: web::Data<AppStateWithIpRateLimiter>) -> impl Responder {
let limiter = &mut data.limiter.lock().await;
let consumed_result = limiter.consume_token().await;
let consumed = match consumed_result {
Ok(consumed) => consumed,
Err(err) => {
return HttpResponse::InternalServerError()
.body(format!("Internal Server Error: {err:?}"))
}
};
if consumed {
return HttpResponse::Ok().body("Limited, but ok for now, don't over use me!");
}
HttpResponse::TooManyRequests().body("Rate limit exceeded, try again later")
}

pub struct AppStateWithIpRateLimiter {
limiter: Mutex<DistributedSlidingWindowCounter>,
}

impl AppStateWithIpRateLimiter {
pub fn new(limiter: DistributedSlidingWindowCounter) -> Self {
Self {
limiter: Mutex::new(limiter),
}
}
}
46 changes: 0 additions & 46 deletions ratelimiter/src/main.rs

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -3,30 +3,24 @@ use std::time::Duration;
use anyhow::{anyhow, Context};
use redis::aio::ConnectionManager;

#[derive(Clone)]
pub struct DistributedSlidingWindowCounter {
client: ConnectionManager,
window_duration: Duration,
max_window_tokens: usize,
}

impl DistributedSlidingWindowCounter {
/// Create a new DistributedSlidingWindowCounter
///
/// if `window_duration` is not provided, it defaults to 60 seconds
///
/// if `max_window_tokens` is not provided, it defaults to 60 tokens
pub async fn new(
window_duration: Option<Duration>,
max_window_tokens: Option<usize>,
) -> anyhow::Result<Self> {
pub async fn new() -> anyhow::Result<Self> {
let url = std::env::var("REDIS_URL").unwrap_or_else(|_| "redis://127.0.0.1/".to_owned());
let client = redis::Client::open(url).context("Failed to create redis client")?;
let conn_manager = ConnectionManager::new(client)
.await
.context("Failed to create connection manager")?;

let window_duration = window_duration.unwrap_or(Duration::from_secs(60));
let max_window_tokens = max_window_tokens.unwrap_or(60);
let window_duration = Duration::from_secs(60);
let max_window_tokens = 60;

Ok(Self {
client: conn_manager,
window_duration,
Expand Down Expand Up @@ -60,7 +54,8 @@ impl DistributedSlidingWindowCounter {
.ignore()
.set("previous_window_count", current_window_count.unwrap_or(0))
.ignore()
.mget(&["current_window_count", "previous_window_count"])
.get("current_window_count")
.get("previous_window_count")
.query_async(&mut self.client)
.await
.context("Failed to set values in the DB")?;
Expand Down

0 comments on commit 3fdc116

Please sign in to comment.