Skip to content

Commit 2405baf

Browse files
disnetclaude
andauthored
Add per-user rate limiting for API requests (#14)
* Implement per-user rate limiting for authenticated API requests Add D1-based rate limiting with three tiers: expensive endpoints (30/min), standard (100/min), and light (300/min). Includes atomic increment via SQL, automatic window cleanup via cron, and proper 429 response headers. Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com> * Add D1 migrations to deploy workflow Run migrations automatically before deploying the worker for both staging and production environments. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * Make migrations idempotent and add continue-on-error - Add IF NOT EXISTS to CREATE TABLE/INDEX statements in all migrations - Add continue-on-error to migration steps in deploy workflow - This handles the case where migrations were previously applied manually Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * Add bootstrap migration to mark legacy migrations as applied The bootstrap runs via d1 execute (not d1 migrations apply) to populate the d1_migrations table with records for all previously manually-applied migrations. This allows d1 migrations apply to skip them and only run new migrations (0015, 0016). Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> * Add batch feed fetching to reduce API calls on login The frontend now fetches all ready feeds in a single batch request instead of making N individual requests. This prevents rate limiting issues when users have many subscriptions. Pending feeds are still fetched gradually. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com> --------- Co-authored-by: Claude Haiku 4.5 <noreply@anthropic.com>
1 parent b545a5a commit 2405baf

File tree

15 files changed

+478
-38
lines changed

15 files changed

+478
-38
lines changed

.github/workflows/deploy.yml

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,24 @@ jobs:
7070
working-directory: backend
7171
run: npm ci
7272

73+
- name: Bootstrap D1 migrations table (staging)
74+
if: needs.changes.outputs.environment == 'staging'
75+
uses: cloudflare/wrangler-action@v3
76+
with:
77+
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
78+
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
79+
workingDirectory: backend
80+
command: d1 execute skyreader-staging --env=staging --remote --file=migrations/0000_bootstrap.sql
81+
82+
- name: Run D1 migrations (staging)
83+
if: needs.changes.outputs.environment == 'staging'
84+
uses: cloudflare/wrangler-action@v3
85+
with:
86+
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
87+
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
88+
workingDirectory: backend
89+
command: d1 migrations apply skyreader-staging --env=staging --remote
90+
7391
- name: Deploy Worker (staging)
7492
if: needs.changes.outputs.environment == 'staging'
7593
uses: cloudflare/wrangler-action@v3
@@ -79,6 +97,24 @@ jobs:
7997
workingDirectory: backend
8098
command: deploy --env=staging
8199

100+
- name: Bootstrap D1 migrations table (production)
101+
if: needs.changes.outputs.environment != 'staging'
102+
uses: cloudflare/wrangler-action@v3
103+
with:
104+
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
105+
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
106+
workingDirectory: backend
107+
command: d1 execute skyreader --remote --file=migrations/0000_bootstrap.sql
108+
109+
- name: Run D1 migrations (production)
110+
if: needs.changes.outputs.environment != 'staging'
111+
uses: cloudflare/wrangler-action@v3
112+
with:
113+
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
114+
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
115+
workingDirectory: backend
116+
command: d1 migrations apply skyreader --remote
117+
82118
- name: Deploy Worker (production)
83119
if: needs.changes.outputs.environment != 'staging'
84120
uses: cloudflare/wrangler-action@v3
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
-- Bootstrap: Mark all legacy migrations as applied
2+
-- This handles the case where migrations were applied manually via d1 execute
3+
-- before switching to d1 migrations apply
4+
5+
-- Create the migrations tracking table if it doesn't exist
6+
CREATE TABLE IF NOT EXISTS d1_migrations (
7+
id INTEGER PRIMARY KEY AUTOINCREMENT,
8+
name TEXT UNIQUE NOT NULL,
9+
applied_at TEXT NOT NULL DEFAULT (datetime('now'))
10+
);
11+
12+
-- Insert records for all legacy migrations (ignore if already exists)
13+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0001_initial.sql');
14+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0002_scheduled_feeds.sql');
15+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0003_read_positions_cache.sql');
16+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0004_follows_sync.sql');
17+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0005_share_guid_published.sql');
18+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0006_share_feed_url.sql');
19+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0007_share_content.sql');
20+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0008_sessions.sql');
21+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0009_oauth_state.sql');
22+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0010_feed_cache.sql');
23+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0011_feed_items.sql');
24+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0012_inapp_follows.sql');
25+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0013_share_read_positions_cache.sql');
26+
INSERT OR IGNORE INTO d1_migrations (name) VALUES ('0014_feed_metadata_image_url.sql');

backend/migrations/0001_initial.sql

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
-- Users table - caches AT Protocol user information
2-
CREATE TABLE users (
2+
CREATE TABLE IF NOT EXISTS users (
33
did TEXT PRIMARY KEY,
44
handle TEXT NOT NULL,
55
display_name TEXT,
@@ -10,21 +10,21 @@ CREATE TABLE users (
1010
updated_at INTEGER NOT NULL DEFAULT (unixepoch())
1111
);
1212

13-
CREATE INDEX idx_users_handle ON users(handle);
13+
CREATE INDEX IF NOT EXISTS idx_users_handle ON users(handle);
1414

1515
-- Follows cache - which users follow whom (from Bluesky)
16-
CREATE TABLE follows_cache (
16+
CREATE TABLE IF NOT EXISTS follows_cache (
1717
follower_did TEXT NOT NULL,
1818
following_did TEXT NOT NULL,
1919
created_at INTEGER NOT NULL DEFAULT (unixepoch()),
2020
PRIMARY KEY (follower_did, following_did),
2121
FOREIGN KEY (follower_did) REFERENCES users(did) ON DELETE CASCADE
2222
);
2323

24-
CREATE INDEX idx_follows_following ON follows_cache(following_did);
24+
CREATE INDEX IF NOT EXISTS idx_follows_following ON follows_cache(following_did);
2525

2626
-- Aggregated shares from all users
27-
CREATE TABLE shares (
27+
CREATE TABLE IF NOT EXISTS shares (
2828
id INTEGER PRIMARY KEY AUTOINCREMENT,
2929
author_did TEXT NOT NULL,
3030
record_uri TEXT UNIQUE NOT NULL,
@@ -41,12 +41,12 @@ CREATE TABLE shares (
4141
FOREIGN KEY (author_did) REFERENCES users(did) ON DELETE CASCADE
4242
);
4343

44-
CREATE INDEX idx_shares_author ON shares(author_did);
45-
CREATE INDEX idx_shares_created ON shares(created_at DESC);
46-
CREATE INDEX idx_shares_item_url ON shares(item_url);
44+
CREATE INDEX IF NOT EXISTS idx_shares_author ON shares(author_did);
45+
CREATE INDEX IF NOT EXISTS idx_shares_created ON shares(created_at DESC);
46+
CREATE INDEX IF NOT EXISTS idx_shares_item_url ON shares(item_url);
4747

4848
-- Feed metadata cache (for proxy optimization)
49-
CREATE TABLE feed_metadata (
49+
CREATE TABLE IF NOT EXISTS feed_metadata (
5050
feed_url TEXT PRIMARY KEY,
5151
title TEXT,
5252
site_url TEXT,
@@ -60,7 +60,7 @@ CREATE TABLE feed_metadata (
6060
);
6161

6262
-- Sync state tracking (e.g., Jetstream cursor)
63-
CREATE TABLE sync_state (
63+
CREATE TABLE IF NOT EXISTS sync_state (
6464
key TEXT PRIMARY KEY,
6565
value TEXT NOT NULL,
6666
updated_at INTEGER NOT NULL DEFAULT (unixepoch())

backend/migrations/0002_scheduled_feeds.sql

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ CREATE INDEX IF NOT EXISTS idx_users_last_active ON users(last_active_at);
77

88
-- Cache of user subscriptions (synced from AT Protocol)
99
-- Used by scheduled feed fetcher to know which feeds to refresh
10-
CREATE TABLE subscriptions_cache (
10+
CREATE TABLE IF NOT EXISTS subscriptions_cache (
1111
id INTEGER PRIMARY KEY AUTOINCREMENT,
1212
user_did TEXT NOT NULL,
1313
record_uri TEXT UNIQUE NOT NULL,
@@ -17,8 +17,8 @@ CREATE TABLE subscriptions_cache (
1717
FOREIGN KEY (user_did) REFERENCES users(did) ON DELETE CASCADE
1818
);
1919

20-
CREATE INDEX idx_subscriptions_cache_user ON subscriptions_cache(user_did);
21-
CREATE INDEX idx_subscriptions_cache_feed_url ON subscriptions_cache(feed_url);
20+
CREATE INDEX IF NOT EXISTS idx_subscriptions_cache_user ON subscriptions_cache(user_did);
21+
CREATE INDEX IF NOT EXISTS idx_subscriptions_cache_feed_url ON subscriptions_cache(feed_url);
2222

2323
-- Add scheduled fetch tracking to feed_metadata
2424
ALTER TABLE feed_metadata ADD COLUMN last_scheduled_fetch_at INTEGER;

backend/migrations/0003_read_positions_cache.sql

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
-- Cache of user read positions (synced from AT Protocol)
22
-- Used to dedupe before syncing to PDS
3-
CREATE TABLE read_positions_cache (
3+
CREATE TABLE IF NOT EXISTS read_positions_cache (
44
id INTEGER PRIMARY KEY AUTOINCREMENT,
55
user_did TEXT NOT NULL,
66
rkey TEXT NOT NULL,
@@ -16,6 +16,6 @@ CREATE TABLE read_positions_cache (
1616
UNIQUE(user_did, rkey)
1717
);
1818

19-
CREATE INDEX idx_read_positions_cache_user ON read_positions_cache(user_did);
20-
CREATE INDEX idx_read_positions_cache_guid ON read_positions_cache(user_did, item_guid);
21-
CREATE INDEX idx_read_positions_cache_unsynced ON read_positions_cache(synced_at) WHERE synced_at IS NULL;
19+
CREATE INDEX IF NOT EXISTS idx_read_positions_cache_user ON read_positions_cache(user_did);
20+
CREATE INDEX IF NOT EXISTS idx_read_positions_cache_guid ON read_positions_cache(user_did, item_guid);
21+
CREATE INDEX IF NOT EXISTS idx_read_positions_cache_unsynced ON read_positions_cache(synced_at) WHERE synced_at IS NULL;

backend/migrations/0005_share_guid_published.sql

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@ ALTER TABLE shares ADD COLUMN item_published_at INTEGER;
44
ALTER TABLE shares ADD COLUMN feed_url TEXT;
55

66
-- Index for looking up shares by guid
7-
CREATE INDEX idx_shares_item_guid ON shares(item_guid);
7+
CREATE INDEX IF NOT EXISTS idx_shares_item_guid ON shares(item_guid);

backend/migrations/0011_feed_items.sql

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
-- Store individual feed items for efficient querying
2-
CREATE TABLE feed_items (
2+
CREATE TABLE IF NOT EXISTS feed_items (
33
id INTEGER PRIMARY KEY AUTOINCREMENT,
44
feed_url TEXT NOT NULL,
55
guid TEXT NOT NULL,
@@ -16,13 +16,13 @@ CREATE TABLE feed_items (
1616
);
1717

1818
-- Index for fetching items by feed
19-
CREATE INDEX idx_feed_items_feed_url ON feed_items(feed_url);
19+
CREATE INDEX IF NOT EXISTS idx_feed_items_feed_url ON feed_items(feed_url);
2020

2121
-- Index for cross-feed queries sorted by date
22-
CREATE INDEX idx_feed_items_published_at ON feed_items(published_at DESC);
22+
CREATE INDEX IF NOT EXISTS idx_feed_items_published_at ON feed_items(published_at DESC);
2323

2424
-- Composite index for feed-specific date queries
25-
CREATE INDEX idx_feed_items_feed_published ON feed_items(feed_url, published_at DESC);
25+
CREATE INDEX IF NOT EXISTS idx_feed_items_feed_published ON feed_items(feed_url, published_at DESC);
2626

2727
-- Index for looking up items by URL (for social features)
28-
CREATE INDEX idx_feed_items_url ON feed_items(url);
28+
CREATE INDEX IF NOT EXISTS idx_feed_items_url ON feed_items(url);
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
-- In-app follows table (separate from Bluesky follows in follows_cache)
2-
CREATE TABLE inapp_follows (
2+
CREATE TABLE IF NOT EXISTS inapp_follows (
33
id INTEGER PRIMARY KEY AUTOINCREMENT,
44
follower_did TEXT NOT NULL,
55
following_did TEXT NOT NULL,
@@ -9,6 +9,6 @@ CREATE TABLE inapp_follows (
99
UNIQUE(follower_did, following_did)
1010
);
1111

12-
CREATE INDEX idx_inapp_follows_follower ON inapp_follows(follower_did);
13-
CREATE INDEX idx_inapp_follows_following ON inapp_follows(following_did);
14-
CREATE INDEX idx_inapp_follows_rkey ON inapp_follows(follower_did, rkey);
12+
CREATE INDEX IF NOT EXISTS idx_inapp_follows_follower ON inapp_follows(follower_did);
13+
CREATE INDEX IF NOT EXISTS idx_inapp_follows_following ON inapp_follows(following_did);
14+
CREATE INDEX IF NOT EXISTS idx_inapp_follows_rkey ON inapp_follows(follower_did, rkey);

backend/migrations/0013_share_read_positions_cache.sql

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
-- Cache of share read positions (synced from AT Protocol)
22
-- Used to dedupe before syncing to PDS
3-
CREATE TABLE share_read_positions_cache (
3+
CREATE TABLE IF NOT EXISTS share_read_positions_cache (
44
id INTEGER PRIMARY KEY AUTOINCREMENT,
55
user_did TEXT NOT NULL,
66
rkey TEXT NOT NULL,
@@ -17,5 +17,5 @@ CREATE TABLE share_read_positions_cache (
1717
UNIQUE(user_did, share_uri)
1818
);
1919

20-
CREATE INDEX idx_share_read_positions_cache_user ON share_read_positions_cache(user_did);
21-
CREATE INDEX idx_share_read_positions_cache_share ON share_read_positions_cache(user_did, share_uri);
20+
CREATE INDEX IF NOT EXISTS idx_share_read_positions_cache_user ON share_read_positions_cache(user_did);
21+
CREATE INDEX IF NOT EXISTS idx_share_read_positions_cache_share ON share_read_positions_cache(user_did, share_uri);
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
-- Rate limiting table for per-user API request tracking
2+
CREATE TABLE IF NOT EXISTS rate_limits (
3+
did TEXT NOT NULL,
4+
endpoint TEXT NOT NULL,
5+
window_start INTEGER NOT NULL,
6+
count INTEGER DEFAULT 1,
7+
PRIMARY KEY (did, endpoint, window_start)
8+
);
9+
10+
-- Index for efficient cleanup of old rate limit records
11+
CREATE INDEX IF NOT EXISTS idx_rate_limits_cleanup ON rate_limits(window_start);

0 commit comments

Comments
 (0)