Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .env.dist
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ NEW_ROUTE_PATH=

# Rate limiting
RETRY_AFTER_HEADER=Retry-After
BLIZZARD_RATE_LIMIT_KEY=blizzard-rate-limit
BLIZZARD_RATE_LIMIT_RETRY_AFTER=5
RATE_LIMIT_PER_SECOND_PER_IP=30
RATE_LIMIT_PER_IP_BURST=5
Expand All @@ -30,6 +31,13 @@ VALKEY_MEMORY_LIMIT=1gb
# Persistent Storage (SQLite)
# Use ":memory:" for in-memory database (testing/ephemeral deployments)
STORAGE_PATH=/code/data/overfast.db

# SQLite memory-mapped I/O size in bytes (performance tuning)
# Improves read performance by mapping database pages directly into memory
# Default: 0 (disabled) - Recommended for production: 268435456 (256MB) to 1073741824 (1GB)
# Set according to your database size and available RAM
SQLITE_MMAP_SIZE=0

UNKNOWN_PLAYER_BASE_RETRY_AFTER=600
UNKNOWN_PLAYER_RETRY_MULTIPLIER=3
UNKNOWN_PLAYER_MAX_RETRY_AFTER=21600
Expand Down
15 changes: 12 additions & 3 deletions app/adapters/blizzard/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,10 @@ async def get(
) -> httpx.Response:
"""Make an HTTP GET request with custom headers and retrieve the result"""

# First, check if we're being rate limited
# Check if we're being rate limited
# Note: Nginx also checks this on cache miss, but this check remains for:
# - Race conditions (multiple requests in flight when rate limit is set)
# - Defense in depth
await self._check_rate_limit()

# Prepare kwargs
Expand Down Expand Up @@ -122,8 +125,14 @@ async def aclose(self) -> None:
await self.close()

async def _check_rate_limit(self) -> None:
"""Make sure we're not being rate limited by Blizzard before making
any API call. Else, return an HTTP 429 with Retry-After header.
"""Check if we're being rate limited by Blizzard before making any API call.

Returns HTTP 429 with Retry-After header if rate limited.

Note: Nginx also performs this check on API cache miss for better performance,
but this method remains necessary for:
- Race conditions (concurrent requests when rate limit is first set)
- Defense in depth (if nginx check fails or is bypassed)
"""
if await self.cache_manager.is_being_rate_limited():
raise self._too_many_requests_response(
Expand Down
6 changes: 6 additions & 0 deletions app/adapters/storage/sqlite_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,12 @@ async def _get_connection(self) -> AsyncIterator[aiosqlite.Connection]:
# Acceptable for cache data that can be re-fetched from Blizzard
await db.execute("PRAGMA synchronous=NORMAL")

# Configure memory-mapped I/O if enabled
# This can significantly improve read performance by mapping database
# pages directly into memory
if settings.sqlite_mmap_size > 0:
await db.execute(f"PRAGMA mmap_size={settings.sqlite_mmap_size}")
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

security (python.sqlalchemy.security.sqlalchemy-execute-raw-query): Avoiding SQL string concatenation: untrusted input concatenated with raw SQL query can result in SQL Injection. In order to execute raw query safely, prepared statement should be used. SQLAlchemy provides TextualSQL to easily used prepared statement with named parameters. For complex SQL composition, use SQL Expression Language or Schema Definition Language. In most cases, SQLAlchemy ORM will be a better option.

Source: opengrep


yield db
except Exception as e:
# Track connection errors
Expand Down
3 changes: 3 additions & 0 deletions app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,9 @@ class Settings(BaseSettings):
# Use ":memory:" for in-memory database (testing/ephemeral deployments)
storage_path: str = "data/overfast.db"

# SQLite memory-mapped I/O size in bytes (optional performance tuning)
sqlite_mmap_size: int = 0

# Unknown player exponential backoff configuration
unknown_player_initial_retry: int = 600 # 10 minutes (first check)
unknown_player_retry_multiplier: int = 3 # retry_after *= 3 each check
Expand Down
7 changes: 6 additions & 1 deletion build/nginx/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ set -o pipefail 2>/dev/null || true
: "${NGINX_WORKER_CONNECTIONS:=1024}"
: "${NGINX_MULTI_ACCEPT:=true}"

# Set defaults for rate limiting variables if not provided
: "${BLIZZARD_RATE_LIMIT_KEY:=blizzard-rate-limit}"
: "${BLIZZARD_RATE_LIMIT_RETRY_AFTER:=5}"
: "${RETRY_AFTER_HEADER:=Retry-After}"

# Convert NGINX_WORKER_PROCESSES: 0 → "auto" (nginx auto-detect syntax)
if [ "$NGINX_WORKER_PROCESSES" = "0" ]; then
NGINX_WORKER_PROCESSES_VALUE="auto"
Expand Down Expand Up @@ -50,7 +55,7 @@ envsubst '${NGINX_WORKER_PROCESSES_VALUE} ${NGINX_WORKER_CONNECTIONS} ${NGINX_MU

# Replace placeholders and generate config and lua script from templates
envsubst '${RATE_LIMIT_PER_SECOND_PER_IP} ${RATE_LIMIT_PER_IP_BURST} ${MAX_CONNECTIONS_PER_IP} ${RETRY_AFTER_HEADER} ${PROMETHEUS_LUA_SHARED_DICT} ${PROMETHEUS_INIT_WORKER} ${PROMETHEUS_LOG_BY_LUA} ${PROMETHEUS_METRICS_SERVER}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
envsubst '${VALKEY_HOST} ${VALKEY_PORT} ${CACHE_TTL_HEADER} ${PROMETHEUS_ENABLED}' < /usr/local/openresty/lualib/valkey_handler.lua.template > /usr/local/openresty/lualib/valkey_handler.lua
envsubst '${VALKEY_HOST} ${VALKEY_PORT} ${CACHE_TTL_HEADER} ${BLIZZARD_RATE_LIMIT_KEY} ${BLIZZARD_RATE_LIMIT_RETRY_AFTER} ${RETRY_AFTER_HEADER}' < /usr/local/openresty/lualib/valkey_handler.lua.template > /usr/local/openresty/lualib/valkey_handler.lua

# Check OpenResty config before starting
openresty -t
Expand Down
30 changes: 29 additions & 1 deletion build/nginx/valkey_handler.lua.template
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,37 @@ local function handle_valkey_request()
local compressed_value = results[1]
local ttl = results[2]

-- Cache miss
-- Cache miss - check global rate limit before forwarding to app
if not compressed_value or compressed_value == ngx.null then
ngx.log(ngx.INFO, "Cache miss for key: ", key)

-- Check if Blizzard is currently rate limiting us
local rate_limit_key = "${BLIZZARD_RATE_LIMIT_KEY}"
local rate_limit_exists, rl_err = valk:exists(rate_limit_key)
if rl_err then
ngx.log(ngx.ERR, "Failed to check rate limit key: ", rl_err)
cleanup()
ngx.exec("@fallback")
return
end

-- If rate limited, get TTL and return 429 immediately
if rate_limit_exists == 1 then
local rate_limit_ttl, ttl_err = valk:ttl(rate_limit_key)
if ttl_err or not rate_limit_ttl or rate_limit_ttl < 0 then
-- If TTL check fails or returns invalid value, use default
ngx.log(ngx.WARN, "Failed to get rate limit TTL or invalid value: ", tostring(ttl_err or rate_limit_ttl))
rate_limit_ttl = ${BLIZZARD_RATE_LIMIT_RETRY_AFTER}
end

ngx.log(ngx.WARN, "Blizzard rate limit active, returning 429 from nginx")
ngx.header["${RETRY_AFTER_HEADER}"] = rate_limit_ttl
ngx.status = ngx.HTTP_TOO_MANY_REQUESTS
ngx.say('{"error":"API has been rate limited by Blizzard, please wait for ' .. rate_limit_ttl .. ' seconds before retrying"}')
cleanup()
return
end

cleanup()
ngx.exec("@fallback")
return
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.