132 lines
3.7 KiB
Python
132 lines
3.7 KiB
Python
"""Examples demonstrating all available rate limiting algorithms."""
|
|
|
|
from __future__ import annotations
|
|
|
|
from contextlib import asynccontextmanager
|
|
|
|
from fastapi import FastAPI, Request
|
|
from fastapi.responses import JSONResponse
|
|
|
|
from fastapi_traffic import (
|
|
Algorithm,
|
|
MemoryBackend,
|
|
RateLimiter,
|
|
RateLimitExceeded,
|
|
rate_limit,
|
|
)
|
|
from fastapi_traffic.core.limiter import set_limiter
|
|
|
|
backend = MemoryBackend()
|
|
limiter = RateLimiter(backend)
|
|
|
|
|
|
@asynccontextmanager
|
|
async def lifespan(_: FastAPI):
|
|
await limiter.initialize()
|
|
set_limiter(limiter)
|
|
yield
|
|
await limiter.close()
|
|
|
|
|
|
app = FastAPI(title="Rate Limiting Algorithms", lifespan=lifespan)
|
|
|
|
|
|
@app.exception_handler(RateLimitExceeded)
|
|
async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
|
|
return JSONResponse(
|
|
status_code=429,
|
|
content={
|
|
"error": "rate_limit_exceeded",
|
|
"message": exc.message,
|
|
"retry_after": exc.retry_after,
|
|
},
|
|
headers=exc.limit_info.to_headers() if exc.limit_info else {},
|
|
)
|
|
|
|
|
|
# 1. Fixed Window - Simple, resets at fixed intervals
|
|
# Best for: Simple use cases, low memory usage
|
|
# Drawback: Can allow 2x burst at window boundaries
|
|
@app.get("/fixed-window")
|
|
@rate_limit(
|
|
limit=10,
|
|
window_size=60,
|
|
algorithm=Algorithm.FIXED_WINDOW,
|
|
)
|
|
async def fixed_window(_: Request) -> dict[str, str]:
|
|
"""Fixed window resets counter at fixed time intervals."""
|
|
return {
|
|
"algorithm": "fixed_window",
|
|
"description": "Counter resets every 60 seconds",
|
|
}
|
|
|
|
|
|
# 2. Sliding Window Log - Most precise
|
|
# Best for: When accuracy is critical
|
|
# Drawback: Higher memory usage (stores all timestamps)
|
|
@app.get("/sliding-window")
|
|
@rate_limit(
|
|
limit=10,
|
|
window_size=60,
|
|
algorithm=Algorithm.SLIDING_WINDOW,
|
|
)
|
|
async def sliding_window(_: Request) -> dict[str, str]:
|
|
"""Sliding window tracks exact timestamps for precise limiting."""
|
|
return {
|
|
"algorithm": "sliding_window",
|
|
"description": "Precise tracking with timestamp log",
|
|
}
|
|
|
|
|
|
# 3. Sliding Window Counter - Balance of precision and efficiency
|
|
# Best for: Most production use cases (default algorithm)
|
|
# Combines benefits of fixed window efficiency with sliding window precision
|
|
@app.get("/sliding-window-counter")
|
|
@rate_limit(
|
|
limit=10,
|
|
window_size=60,
|
|
algorithm=Algorithm.SLIDING_WINDOW_COUNTER,
|
|
)
|
|
async def sliding_window_counter(_: Request) -> dict[str, str]:
|
|
"""Sliding window counter uses weighted counts from current and previous windows."""
|
|
return {
|
|
"algorithm": "sliding_window_counter",
|
|
"description": "Efficient approximation",
|
|
}
|
|
|
|
|
|
# 4. Token Bucket - Allows controlled bursts
|
|
# Best for: APIs that need to allow occasional bursts
|
|
# Tokens refill gradually, burst_size controls max burst
|
|
@app.get("/token-bucket")
|
|
@rate_limit(
|
|
limit=10,
|
|
window_size=60,
|
|
algorithm=Algorithm.TOKEN_BUCKET,
|
|
burst_size=5, # Allow bursts of up to 5 requests
|
|
)
|
|
async def token_bucket(_: Request) -> dict[str, str]:
|
|
"""Token bucket allows bursts up to burst_size, then refills gradually."""
|
|
return {"algorithm": "token_bucket", "description": "Allows controlled bursts"}
|
|
|
|
|
|
# 5. Leaky Bucket - Smooths out traffic
|
|
# Best for: Protecting downstream services from bursts
|
|
# Processes requests at a constant rate
|
|
@app.get("/leaky-bucket")
|
|
@rate_limit(
|
|
limit=10,
|
|
window_size=60,
|
|
algorithm=Algorithm.LEAKY_BUCKET,
|
|
burst_size=5, # Queue capacity
|
|
)
|
|
async def leaky_bucket(_: Request) -> dict[str, str]:
|
|
"""Leaky bucket smooths traffic to a constant rate."""
|
|
return {"algorithm": "leaky_bucket", "description": "Constant output rate"}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
|
|
uvicorn.run(app, host="0.0.0.0", port=8000)
|