Skip to content

Commit 789468b

Browse files
committed
feat: add llm support w/ themes
1 parent 4024a6f commit 789468b

File tree

3 files changed

+103
-10
lines changed

3 files changed

+103
-10
lines changed

README.md

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,30 @@ Use it in apps, bots, landing pages, Slack integrations, rejection letters, or w
5050

5151
---
5252

53+
## 🧠 Now with LLM-powered excuses! (Optional)
54+
55+
If you enable LLM support and provide an [OpenRouter](https://openrouter.ai/) API key, the excuses will be generated by an AI model for maximum hilarity.
56+
57+
### 🎯 Thematic Excuses
58+
59+
When using the LLM excuses, you can pass an optional theme to get a context-aware excuse:
60+
61+
```http
62+
GET /no?theme=starwars
63+
```
64+
65+
Example response:
66+
67+
```json
68+
{
69+
"reason": "I can't join yout Lightsaber duel club today; I left my lightsaber charging in the Death Star's cathode ray tube overnight and it's still stuck in hyperspace",
70+
"theme": "starwars",
71+
"source": "llm"
72+
}
73+
```
74+
75+
---
76+
5377
## 🛠️ Self-Hosting
5478

5579
Want to run it yourself? It’s lightweight and simple.
@@ -65,7 +89,14 @@ cd no-as-a-service
6589
npm install
6690
```
6791

68-
### 3. Start the server
92+
### 3. Configure environment variables (optional)
93+
Create a .env file with your OpenRouter key:
94+
95+
```env
96+
OPENROUTER_API_KEY=your_openrouter_api_key
97+
```
98+
99+
### 4. Start the server
69100
```bash
70101
npm start
71102
```
@@ -80,6 +111,12 @@ You can also change the port using an environment variable:
80111
PORT=5000 npm start
81112
```
82113

114+
Finally, you can adjust behavior in index.js
115+
```js
116+
const shouldUseLLM = true; // Toggle LLM use
117+
const model = "meta-llama/llama-3.3-8b-instruct:free"; // Select preferred model
118+
```
119+
83120
---
84121

85122
## 📁 Project Structure
@@ -88,6 +125,7 @@ PORT=5000 npm start
88125
no-as-service/
89126
├── index.js # Express API
90127
├── reasons.json # 1000+ universal rejection reasons
128+
├── .env # (optional) environment file with OpenRouter key
91129
├── package.json
92130
├── .devcontainer.json # VS Code / Github devcontainer setup
93131
└── README.md
@@ -111,6 +149,8 @@ For reference, here’s the package config:
111149
"author": "hotheadhacker",
112150
"license": "MIT",
113151
"dependencies": {
152+
"axios": "^1.10.0",
153+
"dotenv": "^16.5.0",
114154
"express": "^4.18.2",
115155
"express-rate-limit": "^7.0.0"
116156
}

index.js

Lines changed: 60 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,81 @@
1-
const express = require('express');
2-
const rateLimit = require('express-rate-limit');
3-
const fs = require('fs');
1+
require("dotenv").config();
2+
const express = require("express");
3+
const rateLimit = require("express-rate-limit");
4+
const fs = require("fs");
5+
const axios = require("axios");
46

57
const app = express();
6-
app.set('trust proxy', true);
8+
app.set("trust proxy", true);
79
const PORT = process.env.PORT || 3000;
810

11+
const shouldUseLLM = true;
12+
const model = "meta-llama/llama-3.3-8b-instruct:free";
13+
const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY;
14+
915
// Load reasons from JSON
10-
const reasons = JSON.parse(fs.readFileSync('./reasons.json', 'utf-8'));
16+
const reasons = JSON.parse(fs.readFileSync("./reasons.json", "utf-8"));
1117

1218
// Rate limiter: 120 requests per minute per IP
1319
const limiter = rateLimit({
1420
windowMs: 60 * 1000, // 1 minute
1521
max: 120,
1622
keyGenerator: (req, res) => {
17-
return req.headers['cf-connecting-ip'] || req.ip; // Fallback if header missing (or for non-CF)
23+
return req.headers["cf-connecting-ip"] || req.ip; // Fallback if header missing (or for non-CF)
24+
},
25+
message: {
26+
error: "Too many requests, please try again later. (120 reqs/min/IP)",
1827
},
19-
message: { error: "Too many requests, please try again later. (120 reqs/min/IP)" }
2028
});
2129

2230
app.use(limiter);
2331

2432
// Random rejection reason endpoint
25-
app.get('/no', (req, res) => {
33+
app.get("/no", async (req, res) => {
34+
if (shouldUseLLM && OPENROUTER_API_KEY) {
35+
const theme = req.query.theme?.toLowerCase();
36+
const userMessage = theme
37+
? `Give me a excuse to say no to something, related to the theme: ${theme}.`
38+
: `Give me a excuse to say no to something.`;
39+
40+
try {
41+
const llmResponse = await axios.post(
42+
"https://openrouter.ai/api/v1/chat/completions",
43+
{
44+
model,
45+
messages: [
46+
{
47+
role: "system",
48+
content:
49+
"You are an API that generates a single, creative, humorous, and random excuse to say 'no'. Your response must contain only the excuse — no greetings, explanations, or additional text.",
50+
},
51+
{
52+
role: "user",
53+
content: userMessage,
54+
},
55+
],
56+
},
57+
{
58+
headers: {
59+
Authorization: `Bearer ${OPENROUTER_API_KEY}`,
60+
"Content-Type": "application/json",
61+
},
62+
}
63+
);
64+
65+
const reason = llmResponse.data.choices?.[0]?.message?.content?.trim();
66+
67+
if (reason) {
68+
return res.json({ reason, theme, source: "llm" });
69+
}
70+
71+
console.warn("LLM response was empty, falling back to local reasons.");
72+
} catch (err) {
73+
console.error("LLM error, falling back to local reasons:", err.message);
74+
}
75+
}
76+
2677
const reason = reasons[Math.floor(Math.random() * reasons.length)];
27-
res.json({ reason });
78+
res.json({ reason, source: "offline" });
2879
});
2980

3081
// Start server

package.json

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
"author": "hotheadhacker",
1010
"license": "MIT",
1111
"dependencies": {
12+
"axios": "^1.10.0",
13+
"dotenv": "^16.5.0",
1214
"express": "^4.18.2",
1315
"express-rate-limit": "^7.0.0"
1416
}

0 commit comments

Comments
 (0)