Frameworks
Integrate SpendLil with popular web frameworks: Next.js, Express, FastAPI, Django, Rails, and more.
SpendLil uses provider-specific gateway subdomains and X-Provider-Key instead of Authorization. This means you call the gateway directly with fetch/requests rather than using provider SDKs (which send Authorization). Here are copy-paste patterns for the most popular frameworks.
Provider SDKs send the standard Authorization header, which AWS API Gateway intercepts. SpendLil uses X-Provider-Key instead. Use fetch or requests to call the gateway directly, or use the local proxy (see Cursor & AI IDEs guide) to translate headers if you prefer SDKs.
Next.js
import { NextResponse } from 'next/server';
export async function POST(req: Request) {
const { message } = await req.json();
const response = await fetch(
'https://openai.gateway.spendlil.ai/v1/chat/completions',
{
method: 'POST',
headers: {
'X-SpendLil-Key': process.env.SPENDLIL_KEY!,
'X-Provider-Key': `Bearer ${process.env.OPENAI_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: message }],
}),
}
);
const data = await response.json();
return NextResponse.json({ reply: data.choices[0].message.content });
} Express.js
import { Router } from 'express';
const router = Router();
router.post('/chat', async (req, res) => {
const response = await fetch(
'https://openai.gateway.spendlil.ai/v1/chat/completions',
{
method: 'POST',
headers: {
'X-SpendLil-Key': process.env.SPENDLIL_KEY,
'X-Provider-Key': `Bearer ${process.env.OPENAI_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: req.body.message }],
}),
}
);
const data = await response.json();
res.json({ reply: data.choices[0].message.content });
});
export default router; FastAPI (Python)
import os
import requests as http
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class ChatRequest(BaseModel):
message: str
@app.post("/chat")
async def chat(req: ChatRequest):
response = http.post(
"https://openai.gateway.spendlil.ai/v1/chat/completions",
headers={
"X-SpendLil-Key": os.environ["SPENDLIL_KEY"],
"X-Provider-Key": f"Bearer {os.environ['OPENAI_API_KEY']}",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": req.message}],
},
)
data = response.json()
return {"reply": data["choices"][0]["message"]["content"]} Django
import os
import json
import requests as http
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def chat(request):
body = json.loads(request.body)
response = http.post(
"https://openai.gateway.spendlil.ai/v1/chat/completions",
headers={
"X-SpendLil-Key": os.environ["SPENDLIL_KEY"],
"X-Provider-Key": f"Bearer {os.environ['OPENAI_API_KEY']}",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": body["message"]}],
},
)
data = response.json()
return JsonResponse({"reply": data["choices"][0]["message"]["content"]}) Flask
import os
import requests as http
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.post("/chat")
def chat():
data = request.get_json()
response = http.post(
"https://openai.gateway.spendlil.ai/v1/chat/completions",
headers={
"X-SpendLil-Key": os.environ["SPENDLIL_KEY"],
"X-Provider-Key": f"Bearer {os.environ['OPENAI_API_KEY']}",
"Content-Type": "application/json",
},
json={
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": data["message"]}],
},
)
result = response.json()
return jsonify(reply=result["choices"][0]["message"]["content"]) Ruby on Rails
require 'net/http'
require 'json'
class AiClient
def self.chat(message, model: 'gpt-4o-mini')
uri = URI('https://openai.gateway.spendlil.ai/v1/chat/completions')
http = Net::HTTP.new(uri.host, uri.port)
http.use_ssl = true
request = Net::HTTP::Post.new(uri)
request['X-SpendLil-Key'] = ENV['SPENDLIL_KEY']
request['X-Provider-Key'] = "Bearer #{ENV['OPENAI_API_KEY']}"
request['Content-Type'] = 'application/json'
request.body = {
model: model,
messages: [{ role: 'user', content: message }]
}.to_json
response = http.request(request)
JSON.parse(response.body)
end
end Go
package main
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
)
func main() {
body, _ := json.Marshal(map[string]interface{}{
"model": "gpt-4o-mini",
"messages": []map[string]string{
{"role": "user", "content": "Hello"},
},
})
req, _ := http.NewRequest("POST",
"https://openai.gateway.spendlil.ai/v1/chat/completions",
bytes.NewBuffer(body))
req.Header.Set("X-SpendLil-Key", os.Getenv("SPENDLIL_KEY"))
req.Header.Set("X-Provider-Key", "Bearer "+os.Getenv("OPENAI_API_KEY"))
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
var result map[string]interface{}
json.NewDecoder(resp.Body).Decode(&result)
fmt.Println(result)
} LangChain (via local proxy)
LangChain uses the OpenAI SDK internally which sends Authorization. Run the local header injection proxy (see Cursor & AI IDEs guide) and point LangChain at it.
# First: PROVIDER=openai SPENDLIL_KEY=sl_abc123 node spendlil-proxy.mjs
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
model="gpt-4o-mini",
api_key=os.environ["OPENAI_API_KEY"],
base_url="http://localhost:8787/v1",
)
response = llm.invoke("Hello from LangChain")
print(response.content) Vercel AI SDK (via local proxy)
// First: PROVIDER=openai SPENDLIL_KEY=sl_abc123 node spendlil-proxy.mjs
import { createOpenAI } from '@ai-sdk/openai';
import { generateText } from 'ai';
const openai = createOpenAI({
apiKey: process.env.OPENAI_API_KEY,
baseURL: 'http://localhost:8787/v1',
});
const { text } = await generateText({
model: openai('gpt-4o-mini'),
prompt: 'Hello from Vercel AI SDK',
});