Compare commits

...

21 Commits

Author SHA1 Message Date
4f19c0b19e ci: remove PyPI publish job 2026-02-04 01:50:11 +00:00
fe07912040 docs: update install instructions and bump version to 0.2.0 2026-02-04 01:49:53 +00:00
6bdeab2b4e docs: update repo URL in development guide 2026-02-04 01:39:17 +00:00
bb07ac816f fix: resolve flaky test and JSON config validation 2026-02-04 01:33:44 +00:00
34e07f6b7e style: apply ruff formatting 2026-02-04 01:28:45 +00:00
2900fca30a fix: add ruff per-file-ignores for tests and re-exports 2026-02-04 01:26:06 +00:00
a8b3319d14 fix: disable pyright false positives for FastAPI route handlers 2026-02-04 01:23:02 +00:00
3e431927b9 fix: install libatomic1 for pyright Node.js dependency 2026-02-04 01:19:33 +00:00
5298df5e72 fix: use uv sync --extra dev to install pyright and other dev tools 2026-02-04 01:17:35 +00:00
c966fdfe21 fix: exclude .venv and .cache from source distribution 2026-02-04 01:13:34 +00:00
3e026866cb ci: add GitLab CI/CD pipeline for linting, testing, and publishing 2026-02-04 01:09:47 +00:00
a46e216902 chore: update lockfile 2026-02-04 01:09:08 +00:00
64c368907f release: bump version to 0.2.0 2026-02-04 01:08:54 +00:00
ddd51aab39 docs: fix markdown formatting in development guide 2026-02-04 01:08:42 +00:00
fc88f84f4a style: apply ruff formatting and move TYPE_CHECKING imports in tests 2026-02-04 01:08:32 +00:00
d7966f7e96 style: clean up unused parameters and imports in examples 2026-02-04 01:08:16 +00:00
6bc108078f style: minor code style improvements in core modules 2026-02-04 01:07:53 +00:00
064af30d0f refactor: replace algorithm dict lookup with match/case pattern 2026-02-04 01:07:43 +00:00
3510ea564a refactor: use contextlib.suppress and sort __slots__ in backends 2026-02-04 01:07:32 +00:00
ac90ac4141 chore: update project URLs to GitLab and add black to dev deps 2026-02-04 01:07:14 +00:00
997eda7a36 chore: update gitignore with editor and cache directories 2026-02-04 01:07:01 +00:00
33 changed files with 518 additions and 233 deletions

6
.gitignore vendored
View File

@@ -8,3 +8,9 @@ wheels/
# Virtual environments # Virtual environments
.venv .venv
scratchpad.txt
things-todo.md
.ruff_cache
.qodo
.pytest_cache
.vscode

109
.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,109 @@
stages:
- lint
- test
- build
- publish
variables:
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
UV_CACHE_DIR: "$CI_PROJECT_DIR/.cache/uv"
cache:
paths:
- .cache/pip
- .cache/uv
- .venv
.python-base:
image: python:3.12-slim
before_script:
- apt-get update && apt-get install -y --no-install-recommends libatomic1 && rm -rf /var/lib/apt/lists/*
- pip install uv
- uv sync --extra dev
# Linting stage
ruff-lint:
extends: .python-base
stage: lint
script:
- uv run ruff check .
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
ruff-format:
extends: .python-base
stage: lint
script:
- uv run ruff format --check .
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
pyright:
extends: .python-base
stage: lint
script:
- uv run pyright
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# Test stage - run tests on multiple Python versions
.test-base:
stage: test
before_script:
- apt-get update && apt-get install -y --no-install-recommends libatomic1 && rm -rf /var/lib/apt/lists/*
- pip install uv
- uv sync --extra dev
script:
- uv run pytest --cov=fastapi_traffic --cov-report=xml --cov-report=term
coverage: '/TOTAL.*\s+(\d+%)/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
when: always
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
test-py310:
extends: .test-base
image: python:3.10-slim
test-py311:
extends: .test-base
image: python:3.11-slim
test-py312:
extends: .test-base
image: python:3.12-slim
# Build stage
build-package:
extends: .python-base
stage: build
script:
- uv build
artifacts:
paths:
- dist/
expire_in: 1 week
rules:
- if: $CI_COMMIT_TAG
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# Publish to GitLab Package Registry
publish-gitlab:
extends: .python-base
stage: publish
script:
- uv build
- pip install twine
- TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi dist/*
rules:
- if: $CI_COMMIT_TAG =~ /^v\d+\.\d+\.\d+$/
needs:
- build-package

View File

@@ -5,7 +5,7 @@ All notable changes to fastapi-traffic will be documented here.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased] ## [0.2.0] - 2026-02-04
### Added ### Added
- **Configuration Loader** - Load rate limiting configuration from external files: - **Configuration Loader** - Load rate limiting configuration from external files:

View File

@@ -13,7 +13,7 @@ Want to contribute or just poke around? Here's how to get set up.
### Using uv (the fast way) ### Using uv (the fast way)
```bash ```bash
git clone https://gitlab.com/bereckobrian/fastapi-traffic.git git clone https://gitlab.com/zanewalker/fastapi-traffic.git
cd fastapi-traffic cd fastapi-traffic
# This creates a venv and installs everything # This creates a venv and installs everything
@@ -25,7 +25,7 @@ That's it. uv figures out the rest.
### Using pip ### Using pip
```bash ```bash
git clone https://gitlab.com/bereckobrian/fastapi-traffic.git git clone https://gitlab.com/zanewalker/fastapi-traffic.git
cd fastapi-traffic cd fastapi-traffic
python -m venv .venv python -m venv .venv
@@ -74,7 +74,6 @@ uv run pyright
# or just: pyright # or just: pyright
``` ```
## linting ## linting
Ruff handles both linting and formatting: Ruff handles both linting and formatting:
@@ -106,7 +105,7 @@ Then open `http://localhost:8000` in your browser.
## Project layout ## Project layout
``` ```bash
fastapi_traffic/ fastapi_traffic/
├── __init__.py # Public exports ├── __init__.py # Public exports
├── exceptions.py # Custom exceptions ├── exceptions.py # Custom exceptions

View File

@@ -18,26 +18,26 @@ Most rate limiting solutions are either too simple (fixed window only) or too co
```bash ```bash
# Basic installation (memory backend only) # Basic installation (memory backend only)
pip install fastapi-traffic pip install git+https://gitlab.com/zanewalker/fastapi-traffic.git
# With Redis support # With Redis support
pip install fastapi-traffic[redis] pip install git+https://gitlab.com/zanewalker/fastapi-traffic.git[redis]
# With all extras # With all extras
pip install fastapi-traffic[all] pip install git+https://gitlab.com/zanewalker/fastapi-traffic.git[all]
``` ```
### Using uv ### Using uv
```bash ```bash
# Basic installation # Basic installation
uv add fastapi-traffic uv add git+https://gitlab.com/zanewalker/fastapi-traffic.git
# With Redis support # With Redis support
uv add fastapi-traffic[redis] uv add git+https://gitlab.com/zanewalker/fastapi-traffic.git[redis]
# With all extras # With all extras
uv add fastapi-traffic[all] uv add git+https://gitlab.com/zanewalker/fastapi-traffic.git[all]
``` ```
## Quick Start ## Quick Start

View File

@@ -9,8 +9,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
@@ -21,7 +21,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
"""Lifespan context manager for startup/shutdown.""" """Lifespan context manager for startup/shutdown."""
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
@@ -34,7 +34,7 @@ app = FastAPI(title="Quickstart Example", lifespan=lifespan)
# Step 2: Add exception handler for rate limit errors # Step 2: Add exception handler for rate limit errors
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={"error": "Too many requests", "retry_after": exc.retry_after}, content={"error": "Too many requests", "retry_after": exc.retry_after},
@@ -44,17 +44,17 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
# Step 3: Apply rate limiting to endpoints # Step 3: Apply rate limiting to endpoints
@app.get("/") @app.get("/")
@rate_limit(10, 60) # 10 requests per minute @rate_limit(10, 60) # 10 requests per minute
async def hello(request: Request) -> dict[str, str]: async def hello(_: Request) -> dict[str, str]:
return {"message": "Hello, World!"} return {"message": "Hello, World!"}
@app.get("/api/data") @app.get("/api/data")
@rate_limit(100, 60) # 100 requests per minute @rate_limit(100, 60) # 100 requests per minute
async def get_data(request: Request) -> dict[str, str]: async def get_data(_: Request) -> dict[str, str]:
return {"data": "Some important data"} return {"data": "Some important data"}
if __name__ == "__main__": if __name__ == "__main__":
import uvicorn import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) uvicorn.run(app, host="127.0.0.1", port=8002)

View File

@@ -10,8 +10,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
@@ -21,7 +21,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
yield yield
@@ -32,7 +32,7 @@ app = FastAPI(title="Rate Limiting Algorithms", lifespan=lifespan)
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={ content={
@@ -53,9 +53,12 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
window_size=60, window_size=60,
algorithm=Algorithm.FIXED_WINDOW, algorithm=Algorithm.FIXED_WINDOW,
) )
async def fixed_window(request: Request) -> dict[str, str]: async def fixed_window(_: Request) -> dict[str, str]:
"""Fixed window resets counter at fixed time intervals.""" """Fixed window resets counter at fixed time intervals."""
return {"algorithm": "fixed_window", "description": "Counter resets every 60 seconds"} return {
"algorithm": "fixed_window",
"description": "Counter resets every 60 seconds",
}
# 2. Sliding Window Log - Most precise # 2. Sliding Window Log - Most precise
@@ -67,9 +70,12 @@ async def fixed_window(request: Request) -> dict[str, str]:
window_size=60, window_size=60,
algorithm=Algorithm.SLIDING_WINDOW, algorithm=Algorithm.SLIDING_WINDOW,
) )
async def sliding_window(request: Request) -> dict[str, str]: async def sliding_window(_: Request) -> dict[str, str]:
"""Sliding window tracks exact timestamps for precise limiting.""" """Sliding window tracks exact timestamps for precise limiting."""
return {"algorithm": "sliding_window", "description": "Precise tracking with timestamp log"} return {
"algorithm": "sliding_window",
"description": "Precise tracking with timestamp log",
}
# 3. Sliding Window Counter - Balance of precision and efficiency # 3. Sliding Window Counter - Balance of precision and efficiency
@@ -81,9 +87,12 @@ async def sliding_window(request: Request) -> dict[str, str]:
window_size=60, window_size=60,
algorithm=Algorithm.SLIDING_WINDOW_COUNTER, algorithm=Algorithm.SLIDING_WINDOW_COUNTER,
) )
async def sliding_window_counter(request: Request) -> dict[str, str]: async def sliding_window_counter(_: Request) -> dict[str, str]:
"""Sliding window counter uses weighted counts from current and previous windows.""" """Sliding window counter uses weighted counts from current and previous windows."""
return {"algorithm": "sliding_window_counter", "description": "Efficient approximation"} return {
"algorithm": "sliding_window_counter",
"description": "Efficient approximation",
}
# 4. Token Bucket - Allows controlled bursts # 4. Token Bucket - Allows controlled bursts
@@ -96,7 +105,7 @@ async def sliding_window_counter(request: Request) -> dict[str, str]:
algorithm=Algorithm.TOKEN_BUCKET, algorithm=Algorithm.TOKEN_BUCKET,
burst_size=5, # Allow bursts of up to 5 requests burst_size=5, # Allow bursts of up to 5 requests
) )
async def token_bucket(request: Request) -> dict[str, str]: async def token_bucket(_: Request) -> dict[str, str]:
"""Token bucket allows bursts up to burst_size, then refills gradually.""" """Token bucket allows bursts up to burst_size, then refills gradually."""
return {"algorithm": "token_bucket", "description": "Allows controlled bursts"} return {"algorithm": "token_bucket", "description": "Allows controlled bursts"}
@@ -111,7 +120,7 @@ async def token_bucket(request: Request) -> dict[str, str]:
algorithm=Algorithm.LEAKY_BUCKET, algorithm=Algorithm.LEAKY_BUCKET,
burst_size=5, # Queue capacity burst_size=5, # Queue capacity
) )
async def leaky_bucket(request: Request) -> dict[str, str]: async def leaky_bucket(_: Request) -> dict[str, str]:
"""Leaky bucket smooths traffic to a constant rate.""" """Leaky bucket smooths traffic to a constant rate."""
return {"algorithm": "leaky_bucket", "description": "Constant output rate"} return {"algorithm": "leaky_bucket", "description": "Constant output rate"}

View File

@@ -11,8 +11,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
SQLiteBackend, SQLiteBackend,
rate_limit, rate_limit,
) )
@@ -32,9 +32,10 @@ def get_backend():
# Redis - Required for distributed/multi-instance deployments # Redis - Required for distributed/multi-instance deployments
# Requires: pip install redis # Requires: pip install redis
try: try:
from fastapi_traffic import RedisBackend
import asyncio import asyncio
from fastapi_traffic import RedisBackend
async def create_redis(): async def create_redis():
return await RedisBackend.from_url( return await RedisBackend.from_url(
os.getenv("REDIS_URL", "redis://localhost:6379/0"), os.getenv("REDIS_URL", "redis://localhost:6379/0"),
@@ -56,7 +57,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
yield yield
@@ -67,7 +68,7 @@ app = FastAPI(title="Storage Backends Example", lifespan=lifespan)
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after}, content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after},
@@ -76,7 +77,7 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
@app.get("/api/resource") @app.get("/api/resource")
@rate_limit(100, 60) @rate_limit(100, 60)
async def get_resource(request: Request) -> dict[str, str]: async def get_resource(_: Request) -> dict[str, str]:
return {"message": "Resource data", "backend": type(backend).__name__} return {"message": "Resource data", "backend": type(backend).__name__}

View File

@@ -9,8 +9,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
@@ -20,7 +20,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
yield yield
@@ -31,7 +31,7 @@ app = FastAPI(title="Custom Key Extractors", lifespan=lifespan)
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after}, content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after},
@@ -43,7 +43,10 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
@rate_limit(10, 60) # Uses default IP-based key extractor @rate_limit(10, 60) # Uses default IP-based key extractor
async def by_ip(request: Request) -> dict[str, str]: async def by_ip(request: Request) -> dict[str, str]:
"""Rate limited by client IP address (default behavior).""" """Rate limited by client IP address (default behavior)."""
return {"limited_by": "ip", "client_ip": request.client.host if request.client else "unknown"} return {
"limited_by": "ip",
"client_ip": request.client.host if request.client else "unknown",
}
# 2. Rate limit by API key # 2. Rate limit by API key
@@ -99,7 +102,7 @@ def endpoint_ip_extractor(request: Request) -> str:
window_size=60, window_size=60,
key_extractor=endpoint_ip_extractor, key_extractor=endpoint_ip_extractor,
) )
async def endpoint_specific(request: Request) -> dict[str, str]: async def endpoint_specific(_: Request) -> dict[str, str]:
"""Each endpoint has its own rate limit counter.""" """Each endpoint has its own rate limit counter."""
return {"limited_by": "endpoint+ip"} return {"limited_by": "endpoint+ip"}

View File

@@ -10,8 +10,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
) )
from fastapi_traffic.core.decorator import RateLimitDependency from fastapi_traffic.core.decorator import RateLimitDependency
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
@@ -21,7 +21,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
"""Lifespan context manager for startup/shutdown.""" """Lifespan context manager for startup/shutdown."""
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
@@ -33,7 +33,7 @@ app = FastAPI(title="Dependency Injection Example", lifespan=lifespan)
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after}, content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after},
@@ -46,7 +46,7 @@ basic_rate_limit = RateLimitDependency(limit=10, window_size=60)
@app.get("/basic") @app.get("/basic")
async def basic_endpoint( async def basic_endpoint(
request: Request, _: Request,
rate_info: Any = Depends(basic_rate_limit), rate_info: Any = Depends(basic_rate_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Access rate limit info in your endpoint logic.""" """Access rate limit info in your endpoint logic."""
@@ -131,7 +131,7 @@ api_rate_limit = RateLimitDependency(
@app.get("/api/resource") @app.get("/api/resource")
async def api_resource( async def api_resource(
request: Request, _: Request,
rate_info: Any = Depends(api_rate_limit), rate_info: Any = Depends(api_rate_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
"""API endpoint with per-API-key rate limiting.""" """API endpoint with per-API-key rate limiting."""
@@ -156,7 +156,7 @@ per_hour_limit = RateLimitDependency(
async def combined_rate_limit( async def combined_rate_limit(
request: Request, _: Request,
minute_info: Any = Depends(per_minute_limit), minute_info: Any = Depends(per_minute_limit),
hour_info: Any = Depends(per_hour_limit), hour_info: Any = Depends(per_hour_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
@@ -175,7 +175,7 @@ async def combined_rate_limit(
@app.get("/combined") @app.get("/combined")
async def combined_endpoint( async def combined_endpoint(
request: Request, _: Request,
rate_info: dict[str, Any] = Depends(combined_rate_limit), rate_info: dict[str, Any] = Depends(combined_rate_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Endpoint with multiple rate limit tiers.""" """Endpoint with multiple rate limit tiers."""
@@ -209,9 +209,13 @@ async def internal_exempt_endpoint(
return { return {
"message": "Success", "message": "Success",
"is_internal": is_internal, "is_internal": is_internal,
"rate_limit": None if is_internal else { "rate_limit": (
"remaining": rate_info.remaining, None
}, if is_internal
else {
"remaining": rate_info.remaining,
}
),
} }

View File

@@ -14,20 +14,20 @@ from __future__ import annotations
import os import os
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from typing import Annotated
from fastapi import Depends, FastAPI, Request from fastapi import Depends, FastAPI, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from typing import Annotated
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.limiter import set_limiter
from fastapi_traffic.backends.redis import RedisBackend from fastapi_traffic.backends.redis import RedisBackend
from fastapi_traffic.core.limiter import set_limiter
async def create_redis_backend(): async def create_redis_backend():
@@ -94,7 +94,7 @@ LimiterDep = Annotated[RateLimiter, Depends(get_limiter)]
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={ content={
@@ -113,7 +113,7 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
window_size=60, window_size=60,
key_prefix="shared", key_prefix="shared",
) )
async def shared_limit(request: Request) -> dict[str, str]: async def shared_limit(_: Request) -> dict[str, str]:
"""This rate limit is shared across all application instances.""" """This rate limit is shared across all application instances."""
return { return {
"message": "Success", "message": "Success",
@@ -152,7 +152,7 @@ async def user_limit(request: Request) -> dict[str, str]:
burst_size=20, burst_size=20,
key_prefix="burst", key_prefix="burst",
) )
async def burst_allowed(request: Request) -> dict[str, str]: async def burst_allowed(_: Request) -> dict[str, str]:
"""Token bucket with Redis allows controlled bursts across instances.""" """Token bucket with Redis allows controlled bursts across instances."""
return {"message": "Burst request successful"} return {"message": "Burst request successful"}

View File

@@ -13,8 +13,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
) )
from fastapi_traffic.core.decorator import RateLimitDependency from fastapi_traffic.core.decorator import RateLimitDependency
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
@@ -24,7 +24,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
yield yield
@@ -82,7 +82,14 @@ TIER_CONFIGS: dict[Tier, TierConfig] = {
requests_per_hour=50000, requests_per_hour=50000,
requests_per_day=500000, requests_per_day=500000,
burst_size=200, burst_size=200,
features=["basic_api", "webhooks", "analytics", "priority_support", "sla", "custom_integrations"], features=[
"basic_api",
"webhooks",
"analytics",
"priority_support",
"sla",
"custom_integrations",
],
), ),
} }
@@ -109,7 +116,9 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
"message": exc.message, "message": exc.message,
"retry_after": exc.retry_after, "retry_after": exc.retry_after,
"tier": tier.value, "tier": tier.value,
"upgrade_url": "https://example.com/pricing" if tier != Tier.ENTERPRISE else None, "upgrade_url": (
"https://example.com/pricing" if tier != Tier.ENTERPRISE else None
),
}, },
headers=exc.limit_info.to_headers() if exc.limit_info else {}, headers=exc.limit_info.to_headers() if exc.limit_info else {},
) )
@@ -171,7 +180,7 @@ async def apply_tier_rate_limit(
@app.get("/api/v1/data") @app.get("/api/v1/data")
async def get_data( async def get_data(
request: Request, _: Request,
limit_info: dict[str, Any] = Depends(apply_tier_rate_limit), limit_info: dict[str, Any] = Depends(apply_tier_rate_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Get data with tier-based rate limiting.""" """Get data with tier-based rate limiting."""
@@ -188,7 +197,7 @@ async def get_data(
@app.get("/api/v1/analytics") @app.get("/api/v1/analytics")
async def get_analytics( async def get_analytics(
request: Request, _: Request,
limit_info: dict[str, Any] = Depends(apply_tier_rate_limit), limit_info: dict[str, Any] = Depends(apply_tier_rate_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Analytics endpoint - requires Pro tier or higher.""" """Analytics endpoint - requires Pro tier or higher."""
@@ -228,7 +237,11 @@ async def get_tier_info(
"burst_size": config.burst_size, "burst_size": config.burst_size,
}, },
"features": config.features, "features": config.features,
"upgrade_options": [t.value for t in Tier if TIER_CONFIGS[t].requests_per_minute > config.requests_per_minute], "upgrade_options": [
t.value
for t in Tier
if TIER_CONFIGS[t].requests_per_minute > config.requests_per_minute
],
} }

View File

@@ -12,8 +12,8 @@ from fastapi.responses import HTMLResponse, JSONResponse, PlainTextResponse
from fastapi_traffic import ( from fastapi_traffic import (
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
@@ -26,7 +26,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
yield yield
@@ -38,7 +38,9 @@ app = FastAPI(title="Custom Responses Example", lifespan=lifespan)
# 1. Standard JSON error response # 1. Standard JSON error response
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def json_rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def json_rate_limit_handler(
request: Request, exc: RateLimitExceeded
) -> JSONResponse:
"""Standard JSON response for API clients.""" """Standard JSON response for API clients."""
headers = exc.limit_info.to_headers() if exc.limit_info else {} headers = exc.limit_info.to_headers() if exc.limit_info else {}
@@ -85,7 +87,7 @@ async def log_blocked_request(request: Request, info: Any) -> None:
window_size=60, window_size=60,
on_blocked=log_blocked_request, on_blocked=log_blocked_request,
) )
async def monitored_endpoint(request: Request) -> dict[str, str]: async def monitored_endpoint(_: Request) -> dict[str, str]:
"""Endpoint with blocked request logging.""" """Endpoint with blocked request logging."""
return {"message": "Success"} return {"message": "Success"}
@@ -97,7 +99,7 @@ async def monitored_endpoint(request: Request) -> dict[str, str]:
window_size=60, window_size=60,
error_message="Search rate limit exceeded. Please wait before searching again.", error_message="Search rate limit exceeded. Please wait before searching again.",
) )
async def search_endpoint(request: Request, q: str = "") -> dict[str, Any]: async def search_endpoint(_: Request, q: str = "") -> dict[str, Any]:
"""Search with custom error message.""" """Search with custom error message."""
return {"query": q, "results": []} return {"query": q, "results": []}
@@ -108,7 +110,7 @@ async def search_endpoint(request: Request, q: str = "") -> dict[str, Any]:
window_size=300, # 5 uploads per 5 minutes window_size=300, # 5 uploads per 5 minutes
error_message="Upload limit reached. You can upload 5 files every 5 minutes.", error_message="Upload limit reached. You can upload 5 files every 5 minutes.",
) )
async def upload_endpoint(request: Request) -> dict[str, str]: async def upload_endpoint(_: Request) -> dict[str, str]:
"""Upload with custom error message.""" """Upload with custom error message."""
return {"message": "Upload successful"} return {"message": "Upload successful"}
@@ -116,7 +118,7 @@ async def upload_endpoint(request: Request) -> dict[str, str]:
# 4. Different response formats based on Accept header # 4. Different response formats based on Accept header
@app.get("/api/flexible") @app.get("/api/flexible")
@rate_limit(limit=10, window_size=60) @rate_limit(limit=10, window_size=60)
async def flexible_endpoint(request: Request) -> dict[str, str]: async def flexible_endpoint(_: Request) -> dict[str, str]:
"""Endpoint that returns different formats.""" """Endpoint that returns different formats."""
return {"message": "Success", "data": "Some data"} return {"message": "Success", "data": "Some data"}
@@ -168,7 +170,7 @@ async def flexible_rate_limit_handler(request: Request, exc: RateLimitExceeded):
window_size=60, window_size=60,
include_headers=True, # Includes X-RateLimit-* headers include_headers=True, # Includes X-RateLimit-* headers
) )
async def verbose_headers_endpoint(request: Request) -> dict[str, Any]: async def verbose_headers_endpoint(_: Request) -> dict[str, Any]:
"""Response includes detailed rate limit headers.""" """Response includes detailed rate limit headers."""
return { return {
"message": "Check response headers for rate limit info", "message": "Check response headers for rate limit info",
@@ -181,10 +183,13 @@ async def verbose_headers_endpoint(request: Request) -> dict[str, Any]:
# 6. Graceful degradation - return cached/stale data instead of error # 6. Graceful degradation - return cached/stale data instead of error
cached_data = {"data": "Cached response", "cached_at": datetime.now(timezone.utc).isoformat()} cached_data = {
"data": "Cached response",
"cached_at": datetime.now(timezone.utc).isoformat(),
}
async def return_cached_on_limit(request: Request, info: Any) -> None: async def return_cached_on_limit(_: Request, __: Any) -> None:
"""Log when rate limited (callback doesn't prevent exception).""" """Log when rate limited (callback doesn't prevent exception)."""
logger.info("Returning cached data due to rate limit") logger.info("Returning cached data due to rate limit")
# This callback is called when blocked, but doesn't prevent the exception # This callback is called when blocked, but doesn't prevent the exception
@@ -197,9 +202,12 @@ async def return_cached_on_limit(request: Request, info: Any) -> None:
window_size=60, window_size=60,
on_blocked=return_cached_on_limit, on_blocked=return_cached_on_limit,
) )
async def graceful_endpoint(request: Request) -> dict[str, str]: async def graceful_endpoint(_: Request) -> dict[str, str]:
"""Endpoint with graceful degradation.""" """Endpoint with graceful degradation."""
return {"message": "Fresh data", "timestamp": datetime.now(timezone.utc).isoformat()} return {
"message": "Fresh data",
"timestamp": datetime.now(timezone.utc).isoformat(),
}
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -13,8 +13,8 @@ from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.decorator import RateLimitDependency from fastapi_traffic.core.decorator import RateLimitDependency
@@ -25,7 +25,7 @@ limiter = RateLimiter(backend)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(_: FastAPI):
await limiter.initialize() await limiter.initialize()
set_limiter(limiter) set_limiter(limiter)
yield yield
@@ -36,7 +36,7 @@ app = FastAPI(title="Advanced Patterns", lifespan=lifespan)
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after}, content={"error": "rate_limit_exceeded", "retry_after": exc.retry_after},
@@ -49,30 +49,31 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
# Different operations consume different amounts of quota # Different operations consume different amounts of quota
# ============================================================================= # =============================================================================
@app.get("/api/list") @app.get("/api/list")
@rate_limit(limit=100, window_size=60, cost=1) @rate_limit(limit=100, window_size=60, cost=1)
async def list_items(request: Request) -> dict[str, Any]: async def list_items(_: Request) -> dict[str, Any]:
"""Cheap operation - costs 1 token.""" """Cheap operation - costs 1 token."""
return {"items": ["a", "b", "c"], "cost": 1} return {"items": ["a", "b", "c"], "cost": 1}
@app.get("/api/details/{item_id}") @app.get("/api/details/{item_id}")
@rate_limit(limit=100, window_size=60, cost=5) @rate_limit(limit=100, window_size=60, cost=5)
async def get_details(request: Request, item_id: str) -> dict[str, Any]: async def get_details(_: Request, item_id: str) -> dict[str, Any]:
"""Medium operation - costs 5 tokens.""" """Medium operation - costs 5 tokens."""
return {"item_id": item_id, "details": "...", "cost": 5} return {"item_id": item_id, "details": "...", "cost": 5}
@app.post("/api/generate") @app.post("/api/generate")
@rate_limit(limit=100, window_size=60, cost=20) @rate_limit(limit=100, window_size=60, cost=20)
async def generate_content(request: Request) -> dict[str, Any]: async def generate_content(_: Request) -> dict[str, Any]:
"""Expensive operation - costs 20 tokens.""" """Expensive operation - costs 20 tokens."""
return {"generated": "AI-generated content...", "cost": 20} return {"generated": "AI-generated content...", "cost": 20}
@app.post("/api/bulk-export") @app.post("/api/bulk-export")
@rate_limit(limit=100, window_size=60, cost=50) @rate_limit(limit=100, window_size=60, cost=50)
async def bulk_export(request: Request) -> dict[str, Any]: async def bulk_export(_: Request) -> dict[str, Any]:
"""Very expensive operation - costs 50 tokens.""" """Very expensive operation - costs 50 tokens."""
return {"export_url": "https://...", "cost": 50} return {"export_url": "https://...", "cost": 50}
@@ -82,6 +83,7 @@ async def bulk_export(request: Request) -> dict[str, Any]:
# Gradually reduce limits instead of hard blocking # Gradually reduce limits instead of hard blocking
# ============================================================================= # =============================================================================
def get_request_priority(request: Request) -> int: def get_request_priority(request: Request) -> int:
"""Determine request priority (higher = more important).""" """Determine request priority (higher = more important)."""
# Premium users get higher priority # Premium users get higher priority
@@ -122,6 +124,7 @@ async def priority_endpoint(request: Request) -> dict[str, Any]:
# Prevent abuse of specific resources # Prevent abuse of specific resources
# ============================================================================= # =============================================================================
def resource_key_extractor(request: Request) -> str: def resource_key_extractor(request: Request) -> str:
"""Rate limit by resource ID + user.""" """Rate limit by resource ID + user."""
resource_id = request.path_params.get("resource_id", "unknown") resource_id = request.path_params.get("resource_id", "unknown")
@@ -135,7 +138,7 @@ def resource_key_extractor(request: Request) -> str:
window_size=60, window_size=60,
key_extractor=resource_key_extractor, key_extractor=resource_key_extractor,
) )
async def get_resource(request: Request, resource_id: str) -> dict[str, str]: async def get_resource(_: Request, resource_id: str) -> dict[str, str]:
"""Each user can access each resource 10 times per minute.""" """Each user can access each resource 10 times per minute."""
return {"resource_id": resource_id, "data": "..."} return {"resource_id": resource_id, "data": "..."}
@@ -145,6 +148,7 @@ async def get_resource(request: Request, resource_id: str) -> dict[str, str]:
# Prevent brute force attacks # Prevent brute force attacks
# ============================================================================= # =============================================================================
def login_key_extractor(request: Request) -> str: def login_key_extractor(request: Request) -> str:
"""Rate limit by IP + username to prevent brute force.""" """Rate limit by IP + username to prevent brute force."""
ip = request.client.host if request.client else "unknown" ip = request.client.host if request.client else "unknown"
@@ -161,7 +165,7 @@ def login_key_extractor(request: Request) -> str:
key_extractor=login_key_extractor, key_extractor=login_key_extractor,
error_message="Too many login attempts. Please try again in 5 minutes.", error_message="Too many login attempts. Please try again in 5 minutes.",
) )
async def login(request: Request) -> dict[str, str]: async def login(_: Request) -> dict[str, str]:
"""Login endpoint with brute force protection.""" """Login endpoint with brute force protection."""
return {"message": "Login successful", "token": "..."} return {"message": "Login successful", "token": "..."}
@@ -179,7 +183,7 @@ def password_reset_key(request: Request) -> str:
key_extractor=password_reset_key, key_extractor=password_reset_key,
error_message="Too many password reset requests. Please try again later.", error_message="Too many password reset requests. Please try again later.",
) )
async def password_reset(request: Request) -> dict[str, str]: async def password_reset(_: Request) -> dict[str, str]:
"""Password reset with strict rate limiting.""" """Password reset with strict rate limiting."""
return {"message": "Password reset email sent"} return {"message": "Password reset email sent"}
@@ -197,23 +201,24 @@ webhook_rate_limit = RateLimitDependency(
async def check_webhook_limit( async def check_webhook_limit(
request: Request, _: Request,
webhook_url: str, webhook_url: str,
) -> None: ) -> None:
"""Check rate limit before sending webhook.""" """Check rate limit before sending webhook."""
# Create key based on destination domain # Create key based on destination domain
from urllib.parse import urlparse from urllib.parse import urlparse
domain = urlparse(webhook_url).netloc domain = urlparse(webhook_url).netloc
_key = f"webhook:{domain}" # Would be used with limiter in production _key = f"webhook:{domain}" # Would be used with limiter in production
# Manually check limit (simplified example) # Manually check limit (simplified example)
# In production, you'd use the limiter directly # In production, you'd use the limiter directly
_ = _key # Suppress unused variable warning __ = _key # Suppress unused variable warning
@app.post("/api/send-webhook") @app.post("/api/send-webhook")
async def send_webhook( async def send_webhook(
request: Request, _: Request,
webhook_url: str = "https://example.com/webhook", webhook_url: str = "https://example.com/webhook",
rate_info: Any = Depends(webhook_rate_limit), rate_info: Any = Depends(webhook_rate_limit),
) -> dict[str, Any]: ) -> dict[str, Any]:
@@ -231,6 +236,7 @@ async def send_webhook(
# Detect and limit similar requests (e.g., spam prevention) # Detect and limit similar requests (e.g., spam prevention)
# ============================================================================= # =============================================================================
def request_fingerprint(request: Request) -> str: def request_fingerprint(request: Request) -> str:
"""Create fingerprint based on request characteristics.""" """Create fingerprint based on request characteristics."""
ip = request.client.host if request.client else "unknown" ip = request.client.host if request.client else "unknown"
@@ -251,7 +257,7 @@ def request_fingerprint(request: Request) -> str:
key_extractor=request_fingerprint, key_extractor=request_fingerprint,
error_message="Too many submissions from this device.", error_message="Too many submissions from this device.",
) )
async def submit_form(request: Request) -> dict[str, str]: async def submit_form(_: Request) -> dict[str, str]:
"""Form submission with fingerprint-based rate limiting.""" """Form submission with fingerprint-based rate limiting."""
return {"message": "Form submitted successfully"} return {"message": "Form submitted successfully"}
@@ -261,13 +267,14 @@ async def submit_form(request: Request) -> dict[str, str]:
# Different limits during peak vs off-peak hours # Different limits during peak vs off-peak hours
# ============================================================================= # =============================================================================
def is_peak_hours() -> bool: def is_peak_hours() -> bool:
"""Check if current time is during peak hours (9 AM - 6 PM UTC).""" """Check if current time is during peak hours (9 AM - 6 PM UTC)."""
current_hour = time.gmtime().tm_hour current_hour = time.gmtime().tm_hour
return 9 <= current_hour < 18 return 9 <= current_hour < 18
def peak_aware_exempt(request: Request) -> bool: def peak_aware_exempt(_: Request) -> bool:
"""Exempt requests during off-peak hours.""" """Exempt requests during off-peak hours."""
return not is_peak_hours() return not is_peak_hours()
@@ -278,7 +285,7 @@ def peak_aware_exempt(request: Request) -> bool:
window_size=60, window_size=60,
exempt_when=peak_aware_exempt, # No limit during off-peak exempt_when=peak_aware_exempt, # No limit during off-peak
) )
async def peak_aware_endpoint(request: Request) -> dict[str, Any]: async def peak_aware_endpoint(_: Request) -> dict[str, Any]:
"""Stricter limits during peak hours.""" """Stricter limits during peak hours."""
return { return {
"message": "Success", "message": "Success",
@@ -297,7 +304,7 @@ per_hour = RateLimitDependency(limit=1000, window_size=3600, key_prefix="hour")
async def cascading_limits( async def cascading_limits(
request: Request, _: Request,
sec_info: Any = Depends(per_second), sec_info: Any = Depends(per_second),
min_info: Any = Depends(per_minute), min_info: Any = Depends(per_minute),
hour_info: Any = Depends(per_hour), hour_info: Any = Depends(per_hour),
@@ -312,7 +319,7 @@ async def cascading_limits(
@app.get("/api/cascading") @app.get("/api/cascading")
async def cascading_endpoint( async def cascading_endpoint(
request: Request, _: Request,
limits: dict[str, Any] = Depends(cascading_limits), limits: dict[str, Any] = Depends(cascading_limits),
) -> dict[str, Any]: ) -> dict[str, Any]:
"""Endpoint with per-second, per-minute, and per-hour limits.""" """Endpoint with per-second, per-minute, and per-hour limits."""

View File

@@ -346,7 +346,7 @@ def create_app_with_config() -> FastAPI:
) )
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse: async def _rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
return JSONResponse( return JSONResponse(
status_code=429, status_code=429,
content={ content={
@@ -358,17 +358,17 @@ def create_app_with_config() -> FastAPI:
@app.get("/") @app.get("/")
@rate_limit(limit=10, window_size=60) @rate_limit(limit=10, window_size=60)
async def root(_: Request) -> dict[str, str]: async def _root(_: Request) -> dict[str, str]:
return {"message": "Hello from config-loaded app!"} return {"message": "Hello from config-loaded app!"}
@app.get("/health") @app.get("/health")
async def health() -> dict[str, str]: async def _health() -> dict[str, str]:
"""Health check - exempt from rate limiting.""" """Health check - exempt from rate limiting."""
return {"status": "healthy"} return {"status": "healthy"}
@app.get("/api/data") @app.get("/api/data")
@rate_limit(limit=50, window_size=60) @rate_limit(limit=50, window_size=60)
async def get_data(_: Request) -> dict[str, str]: async def _get_data(_: Request) -> dict[str, str]:
return {"data": "Some API data"} return {"data": "Some API data"}
return app return app

View File

@@ -3,21 +3,24 @@
from __future__ import annotations from __future__ import annotations
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from typing import AsyncIterator from typing import TYPE_CHECKING
from fastapi import Depends, FastAPI, Request from fastapi import Depends, FastAPI, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
SQLiteBackend, SQLiteBackend,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.decorator import RateLimitDependency from fastapi_traffic.core.decorator import RateLimitDependency
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
if TYPE_CHECKING:
from collections.abc import AsyncIterator
# Configure global rate limiter with SQLite backend for persistence # Configure global rate limiter with SQLite backend for persistence
backend = SQLiteBackend("rate_limits.db") backend = SQLiteBackend("rate_limits.db")
limiter = RateLimiter(backend) limiter = RateLimiter(backend)
@@ -25,7 +28,7 @@ set_limiter(limiter)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncIterator[None]: async def lifespan(_: FastAPI) -> AsyncIterator[None]:
"""Manage application lifespan - startup and shutdown.""" """Manage application lifespan - startup and shutdown."""
# Startup: Initialize the rate limiter # Startup: Initialize the rate limiter
await limiter.initialize() await limiter.initialize()
@@ -39,7 +42,7 @@ app = FastAPI(title="FastAPI Traffic Example", lifespan=lifespan)
# Exception handler for rate limit exceeded # Exception handler for rate limit exceeded
@app.exception_handler(RateLimitExceeded) @app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONResponse: async def rate_limit_handler(_: Request, exc: RateLimitExceeded) -> JSONResponse:
"""Handle rate limit exceeded exceptions.""" """Handle rate limit exceeded exceptions."""
headers = exc.limit_info.to_headers() if exc.limit_info else {} headers = exc.limit_info.to_headers() if exc.limit_info else {}
return JSONResponse( return JSONResponse(
@@ -56,7 +59,7 @@ async def rate_limit_handler(request: Request, exc: RateLimitExceeded) -> JSONRe
# Example 1: Basic decorator usage # Example 1: Basic decorator usage
@app.get("/api/basic") @app.get("/api/basic")
@rate_limit(100, 60) # 100 requests per minute @rate_limit(100, 60) # 100 requests per minute
async def basic_endpoint(request: Request) -> dict[str, str]: async def basic_endpoint(_: Request) -> dict[str, str]:
"""Basic rate-limited endpoint.""" """Basic rate-limited endpoint."""
return {"message": "Hello, World!"} return {"message": "Hello, World!"}
@@ -69,7 +72,7 @@ async def basic_endpoint(request: Request) -> dict[str, str]:
algorithm=Algorithm.TOKEN_BUCKET, algorithm=Algorithm.TOKEN_BUCKET,
burst_size=10, # Allow bursts of up to 10 requests burst_size=10, # Allow bursts of up to 10 requests
) )
async def token_bucket_endpoint(request: Request) -> dict[str, str]: async def token_bucket_endpoint(_: Request) -> dict[str, str]:
"""Endpoint using token bucket algorithm.""" """Endpoint using token bucket algorithm."""
return {"message": "Token bucket rate limiting"} return {"message": "Token bucket rate limiting"}
@@ -81,7 +84,7 @@ async def token_bucket_endpoint(request: Request) -> dict[str, str]:
window_size=60, window_size=60,
algorithm=Algorithm.SLIDING_WINDOW, algorithm=Algorithm.SLIDING_WINDOW,
) )
async def sliding_window_endpoint(request: Request) -> dict[str, str]: async def sliding_window_endpoint(_: Request) -> dict[str, str]:
"""Endpoint using sliding window algorithm.""" """Endpoint using sliding window algorithm."""
return {"message": "Sliding window rate limiting"} return {"message": "Sliding window rate limiting"}
@@ -99,7 +102,7 @@ def api_key_extractor(request: Request) -> str:
window_size=3600, # 1000 requests per hour window_size=3600, # 1000 requests per hour
key_extractor=api_key_extractor, key_extractor=api_key_extractor,
) )
async def api_key_endpoint(request: Request) -> dict[str, str]: async def api_key_endpoint(_: Request) -> dict[str, str]:
"""Endpoint rate limited by API key.""" """Endpoint rate limited by API key."""
return {"message": "Rate limited by API key"} return {"message": "Rate limited by API key"}
@@ -110,7 +113,7 @@ rate_limit_dep = RateLimitDependency(limit=20, window_size=60)
@app.get("/api/dependency") @app.get("/api/dependency")
async def dependency_endpoint( async def dependency_endpoint(
request: Request, _: Request,
rate_info: dict[str, object] = Depends(rate_limit_dep), rate_info: dict[str, object] = Depends(rate_limit_dep),
) -> dict[str, object]: ) -> dict[str, object]:
"""Endpoint using rate limit as dependency.""" """Endpoint using rate limit as dependency."""
@@ -132,7 +135,7 @@ def is_admin(request: Request) -> bool:
window_size=60, window_size=60,
exempt_when=is_admin, exempt_when=is_admin,
) )
async def admin_exempt_endpoint(request: Request) -> dict[str, str]: async def admin_exempt_endpoint(_: Request) -> dict[str, str]:
"""Endpoint with admin exemption.""" """Endpoint with admin exemption."""
return {"message": "Admins are exempt from rate limiting"} return {"message": "Admins are exempt from rate limiting"}
@@ -144,7 +147,7 @@ async def admin_exempt_endpoint(request: Request) -> dict[str, str]:
window_size=60, window_size=60,
cost=10, # This endpoint costs 10 tokens per request cost=10, # This endpoint costs 10 tokens per request
) )
async def expensive_endpoint(request: Request) -> dict[str, str]: async def expensive_endpoint(_: Request) -> dict[str, str]:
"""Expensive operation that costs more tokens.""" """Expensive operation that costs more tokens."""
return {"message": "Expensive operation completed"} return {"message": "Expensive operation completed"}

View File

@@ -71,6 +71,7 @@ class Backend(ABC):
"""Clear all rate limit data.""" """Clear all rate limit data."""
... ...
@abstractmethod
async def close(self) -> None: async def close(self) -> None:
"""Close the backend connection.""" """Close the backend connection."""
pass pass

View File

@@ -3,6 +3,7 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import contextlib
import time import time
from collections import OrderedDict from collections import OrderedDict
from typing import Any from typing import Any
@@ -13,7 +14,7 @@ from fastapi_traffic.backends.base import Backend
class MemoryBackend(Backend): class MemoryBackend(Backend):
"""Thread-safe in-memory backend with LRU eviction and TTL support.""" """Thread-safe in-memory backend with LRU eviction and TTL support."""
__slots__ = ("_data", "_lock", "_max_size", "_cleanup_interval", "_cleanup_task") __slots__ = ("_cleanup_interval", "_cleanup_task", "_data", "_lock", "_max_size")
def __init__( def __init__(
self, self,
@@ -127,10 +128,8 @@ class MemoryBackend(Backend):
"""Stop cleanup task and clear data.""" """Stop cleanup task and clear data."""
if self._cleanup_task is not None: if self._cleanup_task is not None:
self._cleanup_task.cancel() self._cleanup_task.cancel()
try: with contextlib.suppress(asyncio.CancelledError):
await self._cleanup_task await self._cleanup_task
except asyncio.CancelledError:
pass
self._cleanup_task = None self._cleanup_task = None
await self.clear() await self.clear()

View File

@@ -3,27 +3,30 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import contextlib
import json import json
import sqlite3 import sqlite3
import time import time
from pathlib import Path from typing import TYPE_CHECKING, Any
from typing import Any
from fastapi_traffic.backends.base import Backend from fastapi_traffic.backends.base import Backend
from fastapi_traffic.exceptions import BackendError from fastapi_traffic.exceptions import BackendError
if TYPE_CHECKING:
from pathlib import Path
class SQLiteBackend(Backend): class SQLiteBackend(Backend):
"""SQLite-based backend with connection pooling and async support.""" """SQLite-based backend with connection pooling and async support."""
__slots__ = ( __slots__ = (
"_db_path",
"_connection",
"_lock",
"_cleanup_interval", "_cleanup_interval",
"_cleanup_task", "_cleanup_task",
"_pool_size", "_connection",
"_connections", "_connections",
"_db_path",
"_lock",
"_pool_size",
) )
def __init__( def __init__(
@@ -59,9 +62,7 @@ class SQLiteBackend(Backend):
"""Ensure a database connection exists.""" """Ensure a database connection exists."""
if self._connection is None: if self._connection is None:
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
self._connection = await loop.run_in_executor( self._connection = await loop.run_in_executor(None, self._create_connection)
None, self._create_connection
)
assert self._connection is not None assert self._connection is not None
return self._connection return self._connection
@@ -87,16 +88,20 @@ class SQLiteBackend(Backend):
def _create_tables_sync(self, conn: sqlite3.Connection) -> None: def _create_tables_sync(self, conn: sqlite3.Connection) -> None:
"""Synchronously create tables.""" """Synchronously create tables."""
conn.execute(""" conn.execute(
"""
CREATE TABLE IF NOT EXISTS rate_limits ( CREATE TABLE IF NOT EXISTS rate_limits (
key TEXT PRIMARY KEY, key TEXT PRIMARY KEY,
data TEXT NOT NULL, data TEXT NOT NULL,
expires_at REAL NOT NULL expires_at REAL NOT NULL
) )
""") """
conn.execute(""" )
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_expires_at ON rate_limits(expires_at) CREATE INDEX IF NOT EXISTS idx_expires_at ON rate_limits(expires_at)
""") """
)
async def _cleanup_loop(self) -> None: async def _cleanup_loop(self) -> None:
"""Background task to clean up expired entries.""" """Background task to clean up expired entries."""
@@ -247,10 +252,8 @@ class SQLiteBackend(Backend):
"""Close the database connection.""" """Close the database connection."""
if self._cleanup_task is not None: if self._cleanup_task is not None:
self._cleanup_task.cancel() self._cleanup_task.cancel()
try: with contextlib.suppress(asyncio.CancelledError):
await self._cleanup_task await self._cleanup_task
except asyncio.CancelledError:
pass
self._cleanup_task = None self._cleanup_task = None
if self._connection is not None: if self._connection is not None:

View File

@@ -26,7 +26,7 @@ class Algorithm(str, Enum):
class BaseAlgorithm(ABC): class BaseAlgorithm(ABC):
"""Base class for rate limiting algorithms.""" """Base class for rate limiting algorithms."""
__slots__ = ("limit", "window_size", "backend", "burst_size") __slots__ = ("backend", "burst_size", "limit", "window_size")
def __init__( def __init__(
self, self,
@@ -450,17 +450,24 @@ def get_algorithm(
burst_size: int | None = None, burst_size: int | None = None,
) -> BaseAlgorithm: ) -> BaseAlgorithm:
"""Factory function to create algorithm instances.""" """Factory function to create algorithm instances."""
algorithm_map: dict[Algorithm, type[BaseAlgorithm]] = { match algorithm:
Algorithm.TOKEN_BUCKET: TokenBucketAlgorithm, case Algorithm.TOKEN_BUCKET:
Algorithm.SLIDING_WINDOW: SlidingWindowAlgorithm, return TokenBucketAlgorithm(
Algorithm.FIXED_WINDOW: FixedWindowAlgorithm, limit, window_size, backend, burst_size=burst_size
Algorithm.LEAKY_BUCKET: LeakyBucketAlgorithm, )
Algorithm.SLIDING_WINDOW_COUNTER: SlidingWindowCounterAlgorithm, case Algorithm.SLIDING_WINDOW:
} return SlidingWindowAlgorithm(
limit, window_size, backend, burst_size=burst_size
algorithm_class = algorithm_map.get(algorithm) )
if algorithm_class is None: case Algorithm.FIXED_WINDOW:
msg = f"Unknown algorithm: {algorithm}" return FixedWindowAlgorithm(
raise ValueError(msg) limit, window_size, backend, burst_size=burst_size
)
return algorithm_class(limit, window_size, backend, burst_size=burst_size) case Algorithm.LEAKY_BUCKET:
return LeakyBucketAlgorithm(
limit, window_size, backend, burst_size=burst_size
)
case Algorithm.SLIDING_WINDOW_COUNTER:
return SlidingWindowCounterAlgorithm(
limit, window_size, backend, burst_size=burst_size
)

View File

@@ -2,8 +2,9 @@
from __future__ import annotations from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Callable from typing import TYPE_CHECKING, Any
from fastapi_traffic.core.algorithms import Algorithm from fastapi_traffic.core.algorithms import Algorithm

View File

@@ -49,12 +49,14 @@ _GLOBAL_FIELD_TYPES: dict[str, type[Any]] = {
} }
# Fields that cannot be loaded from config files (callables, complex objects) # Fields that cannot be loaded from config files (callables, complex objects)
_NON_LOADABLE_FIELDS: frozenset[str] = frozenset({ _NON_LOADABLE_FIELDS: frozenset[str] = frozenset(
"key_extractor", {
"exempt_when", "key_extractor",
"on_blocked", "exempt_when",
"backend", "on_blocked",
}) "backend",
}
)
class ConfigLoader: class ConfigLoader:
@@ -200,7 +202,11 @@ class ConfigLoader:
value = value.strip() value = value.strip()
# Remove surrounding quotes if present # Remove surrounding quotes if present
if len(value) >= 2 and value[0] == value[-1] and value[0] in ('"', "'"): if (
len(value) >= 2
and value[0] == value[-1]
and value[0] in ('"', "'")
):
value = value[1:-1] value = value[1:-1]
env_vars[key] = value env_vars[key] = value
@@ -211,14 +217,14 @@ class ConfigLoader:
return env_vars return env_vars
def _load_json_file(self, file_path: Path) -> dict[str, Any]: def _load_json_file(self, file_path: Path) -> Any:
"""Load configuration from a JSON file. """Load configuration from a JSON file.
Args: Args:
file_path: Path to the JSON file. file_path: Path to the JSON file.
Returns: Returns:
Configuration dictionary. Parsed JSON data (could be any JSON type).
Raises: Raises:
ConfigurationError: If the file cannot be read or parsed. ConfigurationError: If the file cannot be read or parsed.
@@ -229,7 +235,7 @@ class ConfigLoader:
try: try:
with file_path.open(encoding="utf-8") as f: with file_path.open(encoding="utf-8") as f:
data: dict[str, Any] = json.load(f) data: Any = json.load(f)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
msg = f"Invalid JSON in {file_path}: {e}" msg = f"Invalid JSON in {file_path}: {e}"
raise ConfigurationError(msg) from e raise ConfigurationError(msg) from e
@@ -261,7 +267,7 @@ class ConfigLoader:
for key, value in source.items(): for key, value in source.items():
if key.startswith(full_prefix): if key.startswith(full_prefix):
field_name = key[len(full_prefix):].lower() field_name = key[len(full_prefix) :].lower()
if field_name in field_types: if field_name in field_types:
result[field_name] = value result[field_name] = value
@@ -344,6 +350,9 @@ class ConfigLoader:
""" """
path = Path(file_path) path = Path(file_path)
raw_config = self._load_json_file(path) raw_config = self._load_json_file(path)
if not isinstance(raw_config, dict):
msg = "JSON root must be an object"
raise ConfigurationError(msg)
config_dict = self._validate_and_convert(raw_config, _RATE_LIMIT_FIELD_TYPES) config_dict = self._validate_and_convert(raw_config, _RATE_LIMIT_FIELD_TYPES)
# Apply overrides # Apply overrides

View File

@@ -3,19 +3,27 @@
from __future__ import annotations from __future__ import annotations
import functools import functools
from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload from collections.abc import Callable
from typing import TYPE_CHECKING, Any, TypeVar, overload
from fastapi_traffic.core.algorithms import Algorithm from fastapi_traffic.core.algorithms import Algorithm
from fastapi_traffic.core.config import KeyExtractor, RateLimitConfig, default_key_extractor from fastapi_traffic.core.config import (
KeyExtractor,
RateLimitConfig,
default_key_extractor,
)
from fastapi_traffic.core.limiter import get_limiter from fastapi_traffic.core.limiter import get_limiter
from fastapi_traffic.exceptions import RateLimitExceeded
if TYPE_CHECKING: if TYPE_CHECKING:
from starlette.requests import Request from starlette.requests import Request
from starlette.responses import Response from starlette.responses import Response
from fastapi_traffic.exceptions import RateLimitExceeded
F = TypeVar("F", bound=Callable[..., Any]) F = TypeVar("F", bound=Callable[..., Any])
# Note: Config loader from secrets .env
@overload @overload
def rate_limit( def rate_limit(

View File

@@ -3,18 +3,24 @@
from __future__ import annotations from __future__ import annotations
import logging import logging
from typing import TYPE_CHECKING, Awaitable, Callable from typing import TYPE_CHECKING
from starlette.middleware.base import BaseHTTPMiddleware from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import JSONResponse from starlette.responses import JSONResponse
from fastapi_traffic.backends.memory import MemoryBackend from fastapi_traffic.backends.memory import MemoryBackend
from fastapi_traffic.core.algorithms import Algorithm from fastapi_traffic.core.algorithms import Algorithm
from fastapi_traffic.core.config import GlobalConfig, RateLimitConfig, default_key_extractor from fastapi_traffic.core.config import (
GlobalConfig,
RateLimitConfig,
default_key_extractor,
)
from fastapi_traffic.core.limiter import RateLimiter from fastapi_traffic.core.limiter import RateLimiter
from fastapi_traffic.exceptions import RateLimitExceeded from fastapi_traffic.exceptions import RateLimitExceeded
if TYPE_CHECKING: if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from starlette.requests import Request from starlette.requests import Request
from starlette.responses import Response from starlette.responses import Response
from starlette.types import ASGIApp from starlette.types import ASGIApp

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "fastapi-traffic" name = "fastapi-traffic"
version = "0.1.0" version = "0.2.0"
description = "Production-grade rate limiting for FastAPI with multiple algorithms and backends" description = "Production-grade rate limiting for FastAPI with multiple algorithms and backends"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"
@@ -42,10 +42,9 @@ dev = [
] ]
[project.urls] [project.urls]
Homepage = "https://github.com/fastapi-traffic/fastapi-traffic" Documentation = "https://gitlab.com/zanewalker/fastapi-traffic#readme"
Documentation = "https://github.com/fastapi-traffic/fastapi-traffic#readme" Repository = "https://github.com/zanewalker/fastapi-traffic"
Repository = "https://github.com/fastapi-traffic/fastapi-traffic" Issues = "https://gitlab.com/zanewalker/fastapi-traffic/issues"
Issues = "https://github.com/fastapi-traffic/fastapi-traffic/issues"
[build-system] [build-system]
requires = ["hatchling"] requires = ["hatchling"]
@@ -54,6 +53,17 @@ build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel] [tool.hatch.build.targets.wheel]
packages = ["fastapi_traffic"] packages = ["fastapi_traffic"]
[tool.hatch.build.targets.sdist]
exclude = [
".venv",
".cache",
".pytest_cache",
".ruff_cache",
".qodo",
".vscode",
"*.db",
]
[tool.ruff] [tool.ruff]
target-version = "py310" target-version = "py310"
line-length = 88 line-length = 88
@@ -82,6 +92,12 @@ ignore = [
[tool.ruff.lint.isort] [tool.ruff.lint.isort]
known-first-party = ["fastapi_traffic"] known-first-party = ["fastapi_traffic"]
[tool.ruff.lint.per-file-ignores]
"tests/*" = ["ARG001", "ARG002"]
"examples/*" = ["ARG001"]
"fastapi_traffic/__init__.py" = ["F401"]
"fastapi_traffic/backends/__init__.py" = ["F401"]
[tool.pyright] [tool.pyright]
pythonVersion = "3.10" pythonVersion = "3.10"
typeCheckingMode = "strict" typeCheckingMode = "strict"
@@ -91,6 +107,9 @@ reportUnknownArgumentType = false
reportUnknownVariableType = false reportUnknownVariableType = false
reportUnknownParameterType = false reportUnknownParameterType = false
reportMissingImports = false reportMissingImports = false
reportUnusedFunction = false
reportInvalidTypeArguments = false
reportGeneralTypeIssues = false
[tool.pytest.ini_options] [tool.pytest.ini_options]
asyncio_mode = "auto" asyncio_mode = "auto"
@@ -100,6 +119,7 @@ addopts = "-v --tb=short"
[dependency-groups] [dependency-groups]
dev = [ dev = [
"black>=25.12.0",
"fastapi>=0.128.0", "fastapi>=0.128.0",
"httpx>=0.28.1", "httpx>=0.28.1",
"pytest>=9.0.2", "pytest>=9.0.2",

View File

@@ -3,7 +3,7 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
from typing import TYPE_CHECKING, AsyncGenerator, Generator from typing import TYPE_CHECKING
import pytest import pytest
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
@@ -13,8 +13,8 @@ from httpx import ASGITransport, AsyncClient
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
SQLiteBackend, SQLiteBackend,
rate_limit, rate_limit,
) )
@@ -23,6 +23,8 @@ from fastapi_traffic.core.limiter import set_limiter
from fastapi_traffic.middleware import RateLimitMiddleware from fastapi_traffic.middleware import RateLimitMiddleware
if TYPE_CHECKING: if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Generator
pass pass

View File

@@ -12,8 +12,7 @@ Comprehensive tests covering:
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import time from typing import TYPE_CHECKING
from typing import AsyncGenerator
import pytest import pytest
@@ -28,6 +27,9 @@ from fastapi_traffic.core.algorithms import (
get_algorithm, get_algorithm,
) )
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
@pytest.fixture @pytest.fixture
async def backend() -> AsyncGenerator[MemoryBackend, None]: async def backend() -> AsyncGenerator[MemoryBackend, None]:
@@ -41,9 +43,7 @@ async def backend() -> AsyncGenerator[MemoryBackend, None]:
class TestTokenBucketAlgorithm: class TestTokenBucketAlgorithm:
"""Tests for TokenBucketAlgorithm.""" """Tests for TokenBucketAlgorithm."""
async def test_allows_requests_within_limit( async def test_allows_requests_within_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests within limit are allowed.""" """Test that requests within limit are allowed."""
algo = TokenBucketAlgorithm(10, 60.0, backend) algo = TokenBucketAlgorithm(10, 60.0, backend)
@@ -51,9 +51,7 @@ class TestTokenBucketAlgorithm:
allowed, _ = await algo.check(f"key_{i % 2}") allowed, _ = await algo.check(f"key_{i % 2}")
assert allowed, f"Request {i} should be allowed" assert allowed, f"Request {i} should be allowed"
async def test_blocks_requests_over_limit( async def test_blocks_requests_over_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests over limit are blocked.""" """Test that requests over limit are blocked."""
algo = TokenBucketAlgorithm(3, 60.0, backend) algo = TokenBucketAlgorithm(3, 60.0, backend)
@@ -86,9 +84,7 @@ class TestTokenBucketAlgorithm:
class TestSlidingWindowAlgorithm: class TestSlidingWindowAlgorithm:
"""Tests for SlidingWindowAlgorithm.""" """Tests for SlidingWindowAlgorithm."""
async def test_allows_requests_within_limit( async def test_allows_requests_within_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests within limit are allowed.""" """Test that requests within limit are allowed."""
algo = SlidingWindowAlgorithm(5, 60.0, backend) algo = SlidingWindowAlgorithm(5, 60.0, backend)
@@ -96,9 +92,7 @@ class TestSlidingWindowAlgorithm:
allowed, _ = await algo.check("test_key") allowed, _ = await algo.check("test_key")
assert allowed assert allowed
async def test_blocks_requests_over_limit( async def test_blocks_requests_over_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests over limit are blocked.""" """Test that requests over limit are blocked."""
algo = SlidingWindowAlgorithm(3, 60.0, backend) algo = SlidingWindowAlgorithm(3, 60.0, backend)
@@ -115,9 +109,7 @@ class TestSlidingWindowAlgorithm:
class TestFixedWindowAlgorithm: class TestFixedWindowAlgorithm:
"""Tests for FixedWindowAlgorithm.""" """Tests for FixedWindowAlgorithm."""
async def test_allows_requests_within_limit( async def test_allows_requests_within_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests within limit are allowed.""" """Test that requests within limit are allowed."""
algo = FixedWindowAlgorithm(5, 60.0, backend) algo = FixedWindowAlgorithm(5, 60.0, backend)
@@ -125,9 +117,7 @@ class TestFixedWindowAlgorithm:
allowed, _ = await algo.check("test_key") allowed, _ = await algo.check("test_key")
assert allowed assert allowed
async def test_blocks_requests_over_limit( async def test_blocks_requests_over_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests over limit are blocked.""" """Test that requests over limit are blocked."""
algo = FixedWindowAlgorithm(3, 60.0, backend) algo = FixedWindowAlgorithm(3, 60.0, backend)
@@ -144,9 +134,7 @@ class TestFixedWindowAlgorithm:
class TestLeakyBucketAlgorithm: class TestLeakyBucketAlgorithm:
"""Tests for LeakyBucketAlgorithm.""" """Tests for LeakyBucketAlgorithm."""
async def test_allows_requests_within_limit( async def test_allows_requests_within_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests within limit are allowed.""" """Test that requests within limit are allowed."""
algo = LeakyBucketAlgorithm(5, 60.0, backend) algo = LeakyBucketAlgorithm(5, 60.0, backend)
@@ -154,9 +142,7 @@ class TestLeakyBucketAlgorithm:
allowed, _ = await algo.check("test_key") allowed, _ = await algo.check("test_key")
assert allowed assert allowed
async def test_blocks_requests_over_limit( async def test_blocks_requests_over_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests over limit are blocked.""" """Test that requests over limit are blocked."""
algo = LeakyBucketAlgorithm(3, 60.0, backend) algo = LeakyBucketAlgorithm(3, 60.0, backend)
@@ -176,9 +162,7 @@ class TestLeakyBucketAlgorithm:
class TestSlidingWindowCounterAlgorithm: class TestSlidingWindowCounterAlgorithm:
"""Tests for SlidingWindowCounterAlgorithm.""" """Tests for SlidingWindowCounterAlgorithm."""
async def test_allows_requests_within_limit( async def test_allows_requests_within_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests within limit are allowed.""" """Test that requests within limit are allowed."""
algo = SlidingWindowCounterAlgorithm(5, 60.0, backend) algo = SlidingWindowCounterAlgorithm(5, 60.0, backend)
@@ -186,9 +170,7 @@ class TestSlidingWindowCounterAlgorithm:
allowed, _ = await algo.check("test_key") allowed, _ = await algo.check("test_key")
assert allowed assert allowed
async def test_blocks_requests_over_limit( async def test_blocks_requests_over_limit(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that requests over limit are blocked.""" """Test that requests over limit are blocked."""
algo = SlidingWindowCounterAlgorithm(3, 60.0, backend) algo = SlidingWindowCounterAlgorithm(3, 60.0, backend)
@@ -224,9 +206,7 @@ class TestGetAlgorithm:
algo = get_algorithm(Algorithm.LEAKY_BUCKET, 10, 60.0, backend) algo = get_algorithm(Algorithm.LEAKY_BUCKET, 10, 60.0, backend)
assert isinstance(algo, LeakyBucketAlgorithm) assert isinstance(algo, LeakyBucketAlgorithm)
async def test_get_sliding_window_counter( async def test_get_sliding_window_counter(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test getting sliding window counter algorithm.""" """Test getting sliding window counter algorithm."""
algo = get_algorithm(Algorithm.SLIDING_WINDOW_COUNTER, 10, 60.0, backend) algo = get_algorithm(Algorithm.SLIDING_WINDOW_COUNTER, 10, 60.0, backend)
assert isinstance(algo, SlidingWindowCounterAlgorithm) assert isinstance(algo, SlidingWindowCounterAlgorithm)
@@ -446,7 +426,8 @@ class TestSlidingWindowCounterAdvanced:
allowed, _ = await algo.check("precision_key") allowed, _ = await algo.check("precision_key")
assert not allowed assert not allowed
await asyncio.sleep(0.5) # Wait for the full window to pass to ensure tokens are fully replenished
await asyncio.sleep(1.1)
allowed, _ = await algo.check("precision_key") allowed, _ = await algo.check("precision_key")
assert allowed assert allowed
@@ -477,9 +458,7 @@ class TestAlgorithmStateManagement:
state = await algo.get_state("nonexistent_key") state = await algo.get_state("nonexistent_key")
assert state is None assert state is None
async def test_reset_restores_full_capacity( async def test_reset_restores_full_capacity(self, backend: MemoryBackend) -> None:
self, backend: MemoryBackend
) -> None:
"""Test that reset restores full capacity.""" """Test that reset restores full capacity."""
algo = TokenBucketAlgorithm(5, 60.0, backend) algo = TokenBucketAlgorithm(5, 60.0, backend)

View File

@@ -13,13 +13,16 @@ Comprehensive tests covering:
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
from typing import AsyncGenerator from typing import TYPE_CHECKING
import pytest import pytest
from fastapi_traffic.backends.memory import MemoryBackend from fastapi_traffic.backends.memory import MemoryBackend
from fastapi_traffic.backends.sqlite import SQLiteBackend from fastapi_traffic.backends.sqlite import SQLiteBackend
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
@pytest.mark.asyncio @pytest.mark.asyncio
class TestMemoryBackend: class TestMemoryBackend:
@@ -163,6 +166,7 @@ class TestMemoryBackendAdvanced:
"""Test concurrent write operations don't corrupt data.""" """Test concurrent write operations don't corrupt data."""
backend = MemoryBackend(max_size=1000) backend = MemoryBackend(max_size=1000)
try: try:
async def write_key(i: int) -> None: async def write_key(i: int) -> None:
await backend.set(f"key_{i}", {"value": i}, ttl=60.0) await backend.set(f"key_{i}", {"value": i}, ttl=60.0)
@@ -302,6 +306,7 @@ class TestSQLiteBackendAdvanced:
backend = SQLiteBackend(":memory:") backend = SQLiteBackend(":memory:")
await backend.initialize() await backend.initialize()
try: try:
async def write_key(i: int) -> None: async def write_key(i: int) -> None:
await backend.set(f"key_{i}", {"value": i}, ttl=60.0) await backend.set(f"key_{i}", {"value": i}, ttl=60.0)
@@ -377,7 +382,9 @@ class TestBackendInterface:
"""Tests to verify backend interface consistency.""" """Tests to verify backend interface consistency."""
@pytest.fixture @pytest.fixture
async def backends(self) -> AsyncGenerator[list[MemoryBackend | SQLiteBackend], None]: async def backends(
self,
) -> AsyncGenerator[list[MemoryBackend | SQLiteBackend], None]:
"""Create all backend types for testing.""" """Create all backend types for testing."""
memory = MemoryBackend() memory = MemoryBackend()
sqlite = SQLiteBackend(":memory:") sqlite = SQLiteBackend(":memory:")

View File

@@ -143,9 +143,7 @@ class TestConfigLoaderEnv:
assert config.exempt_ips == {"127.0.0.1", "192.168.1.1", "10.0.0.1"} assert config.exempt_ips == {"127.0.0.1", "192.168.1.1", "10.0.0.1"}
assert config.exempt_paths == {"/health", "/metrics"} assert config.exempt_paths == {"/health", "/metrics"}
def test_load_global_config_from_env_empty_sets( def test_load_global_config_from_env_empty_sets(self, loader: ConfigLoader) -> None:
self, loader: ConfigLoader
) -> None:
"""Test loading GlobalConfig with empty set fields.""" """Test loading GlobalConfig with empty set fields."""
env_vars = { env_vars = {
"FASTAPI_TRAFFIC_GLOBAL_EXEMPT_IPS": "", "FASTAPI_TRAFFIC_GLOBAL_EXEMPT_IPS": "",

View File

@@ -14,7 +14,7 @@ Comprehensive tests covering:
from __future__ import annotations from __future__ import annotations
from typing import AsyncGenerator from typing import TYPE_CHECKING
import pytest import pytest
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
@@ -23,12 +23,15 @@ from httpx import ASGITransport, AsyncClient
from fastapi_traffic import ( from fastapi_traffic import (
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
class TestRateLimitDecorator: class TestRateLimitDecorator:
"""Tests for the @rate_limit decorator.""" """Tests for the @rate_limit decorator."""
@@ -175,9 +178,7 @@ class TestCustomKeyExtractor:
) -> None: ) -> None:
"""Test that different API keys have separate rate limits.""" """Test that different API keys have separate rate limits."""
for _ in range(2): for _ in range(2):
response = await client.get( response = await client.get("/by-api-key", headers={"X-API-Key": "key-a"})
"/by-api-key", headers={"X-API-Key": "key-a"}
)
assert response.status_code == 200 assert response.status_code == 200
response = await client.get("/by-api-key", headers={"X-API-Key": "key-a"}) response = await client.get("/by-api-key", headers={"X-API-Key": "key-a"})
@@ -186,9 +187,7 @@ class TestCustomKeyExtractor:
response = await client.get("/by-api-key", headers={"X-API-Key": "key-b"}) response = await client.get("/by-api-key", headers={"X-API-Key": "key-b"})
assert response.status_code == 200 assert response.status_code == 200
async def test_anonymous_key_for_missing_header( async def test_anonymous_key_for_missing_header(self, client: AsyncClient) -> None:
self, client: AsyncClient
) -> None:
"""Test that missing API key uses anonymous.""" """Test that missing API key uses anonymous."""
for _ in range(2): for _ in range(2):
response = await client.get("/by-api-key") response = await client.get("/by-api-key")
@@ -255,8 +254,6 @@ class TestExemptionCallback:
assert response.status_code == 429 assert response.status_code == 429
class TestCostParameter: class TestCostParameter:
"""Tests for the cost parameter.""" """Tests for the cost parameter."""

View File

@@ -8,7 +8,7 @@ from __future__ import annotations
import asyncio import asyncio
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from typing import AsyncGenerator from typing import TYPE_CHECKING
import pytest import pytest
from fastapi import FastAPI, Request from fastapi import FastAPI, Request
@@ -18,14 +18,17 @@ from httpx import ASGITransport, AsyncClient
from fastapi_traffic import ( from fastapi_traffic import (
Algorithm, Algorithm,
MemoryBackend, MemoryBackend,
RateLimitExceeded,
RateLimiter, RateLimiter,
RateLimitExceeded,
rate_limit, rate_limit,
) )
from fastapi_traffic.core.config import RateLimitConfig from fastapi_traffic.core.config import RateLimitConfig
from fastapi_traffic.core.limiter import set_limiter from fastapi_traffic.core.limiter import set_limiter
from fastapi_traffic.middleware import RateLimitMiddleware from fastapi_traffic.middleware import RateLimitMiddleware
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
class TestFullApplicationFlow: class TestFullApplicationFlow:
"""Integration tests for a complete application setup.""" """Integration tests for a complete application setup."""
@@ -128,9 +131,7 @@ class TestFullApplicationFlow:
) )
assert response.status_code == 200 assert response.status_code == 200
async def test_basic_rate_limiting_works( async def test_basic_rate_limiting_works(self, client: AsyncClient) -> None:
self, client: AsyncClient
) -> None:
"""Test that basic rate limiting is functional.""" """Test that basic rate limiting is functional."""
# Make a request and verify it works # Make a request and verify it works
response = await client.get("/api/v1/users/1") response = await client.get("/api/v1/users/1")

View File

@@ -13,7 +13,7 @@ Comprehensive tests covering:
from __future__ import annotations from __future__ import annotations
from typing import AsyncGenerator from typing import TYPE_CHECKING
import pytest import pytest
from fastapi import FastAPI from fastapi import FastAPI
@@ -26,6 +26,9 @@ from fastapi_traffic.middleware import (
TokenBucketMiddleware, TokenBucketMiddleware,
) )
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
class TestRateLimitMiddleware: class TestRateLimitMiddleware:
"""Tests for RateLimitMiddleware.""" """Tests for RateLimitMiddleware."""
@@ -81,7 +84,9 @@ class TestRateLimitMiddleware:
assert "X-RateLimit-Remaining" in response.headers assert "X-RateLimit-Remaining" in response.headers
assert "X-RateLimit-Reset" in response.headers assert "X-RateLimit-Reset" in response.headers
async def test_different_endpoints_counted_separately(self, client: AsyncClient) -> None: async def test_different_endpoints_counted_separately(
self, client: AsyncClient
) -> None:
"""Test that different endpoints are counted separately by path.""" """Test that different endpoints are counted separately by path."""
# Middleware includes path in the key by default # Middleware includes path in the key by default
for _ in range(3): for _ in range(3):
@@ -224,14 +229,10 @@ class TestMiddlewareCustomKeyExtractor:
) )
assert response.status_code == 200 assert response.status_code == 200
response = await client.get( response = await client.get("/api/resource", headers={"X-User-ID": "user-1"})
"/api/resource", headers={"X-User-ID": "user-1"}
)
assert response.status_code == 429 assert response.status_code == 429
response = await client.get( response = await client.get("/api/resource", headers={"X-User-ID": "user-2"})
"/api/resource", headers={"X-User-ID": "user-2"}
)
assert response.status_code == 200 assert response.status_code == 200
@@ -313,7 +314,9 @@ class TestMiddlewareErrorHandling:
return app return app
@pytest.fixture @pytest.fixture
async def client(self, app_skip_on_error: FastAPI) -> AsyncGenerator[AsyncClient, None]: async def client(
self, app_skip_on_error: FastAPI
) -> AsyncGenerator[AsyncClient, None]:
"""Create test client.""" """Create test client."""
transport = ASGITransport(app=app_skip_on_error) transport = ASGITransport(app=app_skip_on_error)
async with AsyncClient(transport=transport, base_url="http://test") as client: async with AsyncClient(transport=transport, base_url="http://test") as client:

82
uv.lock generated
View File

@@ -52,6 +52,50 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" },
] ]
[[package]]
name = "black"
version = "25.12.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "mypy-extensions" },
{ name = "packaging" },
{ name = "pathspec" },
{ name = "platformdirs" },
{ name = "pytokens" },
{ name = "tomli", marker = "python_full_version < '3.11'" },
{ name = "typing-extensions", marker = "python_full_version < '3.11'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c4/d9/07b458a3f1c525ac392b5edc6b191ff140b596f9d77092429417a54e249d/black-25.12.0.tar.gz", hash = "sha256:8d3dd9cea14bff7ddc0eb243c811cdb1a011ebb4800a5f0335a01a68654796a7", size = 659264, upload-time = "2025-12-08T01:40:52.501Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/37/d5/8d3145999d380e5d09bb00b0f7024bf0a8ccb5c07b5648e9295f02ec1d98/black-25.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f85ba1ad15d446756b4ab5f3044731bf68b777f8f9ac9cdabd2425b97cd9c4e8", size = 1895720, upload-time = "2025-12-08T01:46:58.197Z" },
{ url = "https://files.pythonhosted.org/packages/06/97/7acc85c4add41098f4f076b21e3e4e383ad6ed0a3da26b2c89627241fc11/black-25.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:546eecfe9a3a6b46f9d69d8a642585a6eaf348bcbbc4d87a19635570e02d9f4a", size = 1727193, upload-time = "2025-12-08T01:52:26.674Z" },
{ url = "https://files.pythonhosted.org/packages/24/f0/fdf0eb8ba907ddeb62255227d29d349e8256ef03558fbcadfbc26ecfe3b2/black-25.12.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:17dcc893da8d73d8f74a596f64b7c98ef5239c2cd2b053c0f25912c4494bf9ea", size = 1774506, upload-time = "2025-12-08T01:46:25.721Z" },
{ url = "https://files.pythonhosted.org/packages/e4/f5/9203a78efe00d13336786b133c6180a9303d46908a9aa72d1104ca214222/black-25.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:09524b0e6af8ba7a3ffabdfc7a9922fb9adef60fed008c7cd2fc01f3048e6e6f", size = 1416085, upload-time = "2025-12-08T01:46:06.073Z" },
{ url = "https://files.pythonhosted.org/packages/ba/cc/7a6090e6b081c3316282c05c546e76affdce7bf7a3b7d2c3a2a69438bd01/black-25.12.0-cp310-cp310-win_arm64.whl", hash = "sha256:b162653ed89eb942758efeb29d5e333ca5bb90e5130216f8369857db5955a7da", size = 1226038, upload-time = "2025-12-08T01:45:29.388Z" },
{ url = "https://files.pythonhosted.org/packages/60/ad/7ac0d0e1e0612788dbc48e62aef8a8e8feffac7eb3d787db4e43b8462fa8/black-25.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0cfa263e85caea2cff57d8f917f9f51adae8e20b610e2b23de35b5b11ce691a", size = 1877003, upload-time = "2025-12-08T01:43:29.967Z" },
{ url = "https://files.pythonhosted.org/packages/e8/dd/a237e9f565f3617a88b49284b59cbca2a4f56ebe68676c1aad0ce36a54a7/black-25.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1a2f578ae20c19c50a382286ba78bfbeafdf788579b053d8e4980afb079ab9be", size = 1712639, upload-time = "2025-12-08T01:52:46.756Z" },
{ url = "https://files.pythonhosted.org/packages/12/80/e187079df1ea4c12a0c63282ddd8b81d5107db6d642f7d7b75a6bcd6fc21/black-25.12.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e1b65634b0e471d07ff86ec338819e2ef860689859ef4501ab7ac290431f9b", size = 1758143, upload-time = "2025-12-08T01:45:29.137Z" },
{ url = "https://files.pythonhosted.org/packages/93/b5/3096ccee4f29dc2c3aac57274326c4d2d929a77e629f695f544e159bfae4/black-25.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a3fa71e3b8dd9f7c6ac4d818345237dfb4175ed3bf37cd5a581dbc4c034f1ec5", size = 1420698, upload-time = "2025-12-08T01:45:53.379Z" },
{ url = "https://files.pythonhosted.org/packages/7e/39/f81c0ffbc25ffbe61c7d0385bf277e62ffc3e52f5ee668d7369d9854fadf/black-25.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:51e267458f7e650afed8445dc7edb3187143003d52a1b710c7321aef22aa9655", size = 1229317, upload-time = "2025-12-08T01:46:35.606Z" },
{ url = "https://files.pythonhosted.org/packages/d1/bd/26083f805115db17fda9877b3c7321d08c647df39d0df4c4ca8f8450593e/black-25.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:31f96b7c98c1ddaeb07dc0f56c652e25bdedaac76d5b68a059d998b57c55594a", size = 1924178, upload-time = "2025-12-08T01:49:51.048Z" },
{ url = "https://files.pythonhosted.org/packages/89/6b/ea00d6651561e2bdd9231c4177f4f2ae19cc13a0b0574f47602a7519b6ca/black-25.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05dd459a19e218078a1f98178c13f861fe6a9a5f88fc969ca4d9b49eb1809783", size = 1742643, upload-time = "2025-12-08T01:49:59.09Z" },
{ url = "https://files.pythonhosted.org/packages/6d/f3/360fa4182e36e9875fabcf3a9717db9d27a8d11870f21cff97725c54f35b/black-25.12.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1f68c5eff61f226934be6b5b80296cf6939e5d2f0c2f7d543ea08b204bfaf59", size = 1800158, upload-time = "2025-12-08T01:44:27.301Z" },
{ url = "https://files.pythonhosted.org/packages/f8/08/2c64830cb6616278067e040acca21d4f79727b23077633953081c9445d61/black-25.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:274f940c147ddab4442d316b27f9e332ca586d39c85ecf59ebdea82cc9ee8892", size = 1426197, upload-time = "2025-12-08T01:45:51.198Z" },
{ url = "https://files.pythonhosted.org/packages/d4/60/a93f55fd9b9816b7432cf6842f0e3000fdd5b7869492a04b9011a133ee37/black-25.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:169506ba91ef21e2e0591563deda7f00030cb466e747c4b09cb0a9dae5db2f43", size = 1237266, upload-time = "2025-12-08T01:45:10.556Z" },
{ url = "https://files.pythonhosted.org/packages/c8/52/c551e36bc95495d2aa1a37d50566267aa47608c81a53f91daa809e03293f/black-25.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a05ddeb656534c3e27a05a29196c962877c83fa5503db89e68857d1161ad08a5", size = 1923809, upload-time = "2025-12-08T01:46:55.126Z" },
{ url = "https://files.pythonhosted.org/packages/a0/f7/aac9b014140ee56d247e707af8db0aae2e9efc28d4a8aba92d0abd7ae9d1/black-25.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ec77439ef3e34896995503865a85732c94396edcc739f302c5673a2315e1e7f", size = 1742384, upload-time = "2025-12-08T01:49:37.022Z" },
{ url = "https://files.pythonhosted.org/packages/74/98/38aaa018b2ab06a863974c12b14a6266badc192b20603a81b738c47e902e/black-25.12.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e509c858adf63aa61d908061b52e580c40eae0dfa72415fa47ac01b12e29baf", size = 1798761, upload-time = "2025-12-08T01:46:05.386Z" },
{ url = "https://files.pythonhosted.org/packages/16/3a/a8ac542125f61574a3f015b521ca83b47321ed19bb63fe6d7560f348bfe1/black-25.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:252678f07f5bac4ff0d0e9b261fbb029fa530cfa206d0a636a34ab445ef8ca9d", size = 1429180, upload-time = "2025-12-08T01:45:34.903Z" },
{ url = "https://files.pythonhosted.org/packages/e6/2d/bdc466a3db9145e946762d52cd55b1385509d9f9004fec1c97bdc8debbfb/black-25.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bc5b1c09fe3c931ddd20ee548511c64ebf964ada7e6f0763d443947fd1c603ce", size = 1239350, upload-time = "2025-12-08T01:46:09.458Z" },
{ url = "https://files.pythonhosted.org/packages/35/46/1d8f2542210c502e2ae1060b2e09e47af6a5e5963cb78e22ec1a11170b28/black-25.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0a0953b134f9335c2434864a643c842c44fba562155c738a2a37a4d61f00cad5", size = 1917015, upload-time = "2025-12-08T01:53:27.987Z" },
{ url = "https://files.pythonhosted.org/packages/41/37/68accadf977672beb8e2c64e080f568c74159c1aaa6414b4cd2aef2d7906/black-25.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2355bbb6c3b76062870942d8cc450d4f8ac71f9c93c40122762c8784df49543f", size = 1741830, upload-time = "2025-12-08T01:54:36.861Z" },
{ url = "https://files.pythonhosted.org/packages/ac/76/03608a9d8f0faad47a3af3a3c8c53af3367f6c0dd2d23a84710456c7ac56/black-25.12.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9678bd991cc793e81d19aeeae57966ee02909877cb65838ccffef24c3ebac08f", size = 1791450, upload-time = "2025-12-08T01:44:52.581Z" },
{ url = "https://files.pythonhosted.org/packages/06/99/b2a4bd7dfaea7964974f947e1c76d6886d65fe5d24f687df2d85406b2609/black-25.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:97596189949a8aad13ad12fcbb4ae89330039b96ad6742e6f6b45e75ad5cfd83", size = 1452042, upload-time = "2025-12-08T01:46:13.188Z" },
{ url = "https://files.pythonhosted.org/packages/b2/7c/d9825de75ae5dd7795d007681b752275ea85a1c5d83269b4b9c754c2aaab/black-25.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:778285d9ea197f34704e3791ea9404cd6d07595745907dd2ce3da7a13627b29b", size = 1267446, upload-time = "2025-12-08T01:46:14.497Z" },
{ url = "https://files.pythonhosted.org/packages/68/11/21331aed19145a952ad28fca2756a1433ee9308079bd03bd898e903a2e53/black-25.12.0-py3-none-any.whl", hash = "sha256:48ceb36c16dbc84062740049eef990bb2ce07598272e673c17d1a7720c71c828", size = 206191, upload-time = "2025-12-08T01:40:50.963Z" },
]
[[package]] [[package]]
name = "certifi" name = "certifi"
version = "2026.1.4" version = "2026.1.4"
@@ -246,6 +290,7 @@ redis = [
[package.dev-dependencies] [package.dev-dependencies]
dev = [ dev = [
{ name = "black" },
{ name = "fastapi" }, { name = "fastapi" },
{ name = "httpx" }, { name = "httpx" },
{ name = "pytest" }, { name = "pytest" },
@@ -274,6 +319,7 @@ provides-extras = ["redis", "fastapi", "all", "dev"]
[package.metadata.requires-dev] [package.metadata.requires-dev]
dev = [ dev = [
{ name = "black", specifier = ">=25.12.0" },
{ name = "fastapi", specifier = ">=0.128.0" }, { name = "fastapi", specifier = ">=0.128.0" },
{ name = "httpx", specifier = ">=0.28.1" }, { name = "httpx", specifier = ">=0.28.1" },
{ name = "pytest", specifier = ">=9.0.2" }, { name = "pytest", specifier = ">=9.0.2" },
@@ -336,6 +382,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
] ]
[[package]]
name = "mypy-extensions"
version = "1.1.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
]
[[package]] [[package]]
name = "nodeenv" name = "nodeenv"
version = "1.10.0" version = "1.10.0"
@@ -354,6 +409,24 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
] ]
[[package]]
name = "pathspec"
version = "1.0.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/4c/b2/bb8e495d5262bfec41ab5cb18f522f1012933347fb5d9e62452d446baca2/pathspec-1.0.3.tar.gz", hash = "sha256:bac5cf97ae2c2876e2d25ebb15078eb04d76e4b98921ee31c6f85ade8b59444d", size = 130841, upload-time = "2026-01-09T15:46:46.009Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/32/2b/121e912bd60eebd623f873fd090de0e84f322972ab25a7f9044c056804ed/pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c", size = 55021, upload-time = "2026-01-09T15:46:44.652Z" },
]
[[package]]
name = "platformdirs"
version = "4.5.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" },
]
[[package]] [[package]]
name = "pluggy" name = "pluggy"
version = "1.6.0" version = "1.6.0"
@@ -564,6 +637,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" },
] ]
[[package]]
name = "pytokens"
version = "0.3.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/4e/8d/a762be14dae1c3bf280202ba3172020b2b0b4c537f94427435f19c413b72/pytokens-0.3.0.tar.gz", hash = "sha256:2f932b14ed08de5fcf0b391ace2642f858f1394c0857202959000b68ed7a458a", size = 17644, upload-time = "2025-11-05T13:36:35.34Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/84/25/d9db8be44e205a124f6c98bc0324b2bb149b7431c53877fc6d1038dddaf5/pytokens-0.3.0-py3-none-any.whl", hash = "sha256:95b2b5eaf832e469d141a378872480ede3f251a5a5041b8ec6e581d3ac71bbf3", size = 12195, upload-time = "2025-11-05T13:36:33.183Z" },
]
[[package]] [[package]]
name = "redis" name = "redis"
version = "7.1.0" version = "7.1.0"