$ my-api/
├── main.py
├── requirements.txt
└── render.yaml # optional, for infrastructure-as-code
my-api/
├── main.py
├── requirements.txt
└── render.yaml # optional, for infrastructure-as-code
my-api/
├── main.py
├── requirements.txt
└── render.yaml # optional, for infrastructure-as-code
from fastapi import FastAPI
from pydantic import BaseModel
import os app = FastAPI(title="My Free API on Render") class MessageRequest(BaseModel): text: str @app.get("/")
def health_check(): return {"-weight: 500;">status": "ok", "environment": os.environ.get("RENDER_ENV", "local")} @app.post("/echo")
def echo(request: MessageRequest): return {"received": request.text, "length": len(request.text)} @app.get("/items/{item_id}")
def get_item(item_id: int, q: str = None): result = {"item_id": item_id} if q: result["query"] = q return result
from fastapi import FastAPI
from pydantic import BaseModel
import os app = FastAPI(title="My Free API on Render") class MessageRequest(BaseModel): text: str @app.get("/")
def health_check(): return {"-weight: 500;">status": "ok", "environment": os.environ.get("RENDER_ENV", "local")} @app.post("/echo")
def echo(request: MessageRequest): return {"received": request.text, "length": len(request.text)} @app.get("/items/{item_id}")
def get_item(item_id: int, q: str = None): result = {"item_id": item_id} if q: result["query"] = q return result
from fastapi import FastAPI
from pydantic import BaseModel
import os app = FastAPI(title="My Free API on Render") class MessageRequest(BaseModel): text: str @app.get("/")
def health_check(): return {"-weight: 500;">status": "ok", "environment": os.environ.get("RENDER_ENV", "local")} @app.post("/echo")
def echo(request: MessageRequest): return {"received": request.text, "length": len(request.text)} @app.get("/items/{item_id}")
def get_item(item_id: int, q: str = None): result = {"item_id": item_id} if q: result["query"] = q return result
fastapi==0.115.0
uvicorn[standard]==0.30.0
fastapi==0.115.0
uvicorn[standard]==0.30.0
fastapi==0.115.0
uvicorn[standard]==0.30.0
services: - type: web name: my-fastapi-app runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt startCommand: uvicorn main:app --host 0.0.0.0 --port $PORT plan: free envVars: - key: PYTHON_VERSION value: 3.11.0
services: - type: web name: my-fastapi-app runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt startCommand: uvicorn main:app --host 0.0.0.0 --port $PORT plan: free envVars: - key: PYTHON_VERSION value: 3.11.0
services: - type: web name: my-fastapi-app runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt startCommand: uvicorn main:app --host 0.0.0.0 --port $PORT plan: free envVars: - key: PYTHON_VERSION value: 3.11.0
// server.js
const express = require('express');
const app = express(); app.use(express.json()); app.get('/', (req, res) => { res.json({ -weight: 500;">status: 'ok', platform: 'Render' });
}); app.post('/webhook', (req, res) => { console.log('Received webhook:', req.body); res.json({ received: true });
}); const PORT = process.env.PORT || 3000;
app.listen(PORT, () => { console.log(`Server running on port ${PORT}`);
});
// server.js
const express = require('express');
const app = express(); app.use(express.json()); app.get('/', (req, res) => { res.json({ -weight: 500;">status: 'ok', platform: 'Render' });
}); app.post('/webhook', (req, res) => { console.log('Received webhook:', req.body); res.json({ received: true });
}); const PORT = process.env.PORT || 3000;
app.listen(PORT, () => { console.log(`Server running on port ${PORT}`);
});
// server.js
const express = require('express');
const app = express(); app.use(express.json()); app.get('/', (req, res) => { res.json({ -weight: 500;">status: 'ok', platform: 'Render' });
}); app.post('/webhook', (req, res) => { console.log('Received webhook:', req.body); res.json({ received: true });
}); const PORT = process.env.PORT || 3000;
app.listen(PORT, () => { console.log(`Server running on port ${PORT}`);
});
// package.json (partial)
{ "scripts": { "-weight: 500;">start": "node server.js" }, "dependencies": { "express": "^4.18.2" }
}
// package.json (partial)
{ "scripts": { "-weight: 500;">start": "node server.js" }, "dependencies": { "express": "^4.18.2" }
}
// package.json (partial)
{ "scripts": { "-weight: 500;">start": "node server.js" }, "dependencies": { "express": "^4.18.2" }
}
import os
import psycopg2 DATABASE_URL = os.environ.get("DATABASE_URL") conn = psycopg2.connect(DATABASE_URL)
cursor = conn.cursor() cursor.execute(""" CREATE TABLE IF NOT EXISTS users ( id SERIAL PRIMARY KEY, email VARCHAR(255) UNIQUE NOT NULL, created_at TIMESTAMP DEFAULT NOW() )
""")
conn.commit() cursor.execute("INSERT INTO users (email) VALUES (%s) ON CONFLICT DO NOTHING", ("[email protected]",))
conn.commit() cursor.execute("SELECT * FROM users")
rows = cursor.fetchall()
print(rows) cursor.close()
conn.close()
import os
import psycopg2 DATABASE_URL = os.environ.get("DATABASE_URL") conn = psycopg2.connect(DATABASE_URL)
cursor = conn.cursor() cursor.execute(""" CREATE TABLE IF NOT EXISTS users ( id SERIAL PRIMARY KEY, email VARCHAR(255) UNIQUE NOT NULL, created_at TIMESTAMP DEFAULT NOW() )
""")
conn.commit() cursor.execute("INSERT INTO users (email) VALUES (%s) ON CONFLICT DO NOTHING", ("[email protected]",))
conn.commit() cursor.execute("SELECT * FROM users")
rows = cursor.fetchall()
print(rows) cursor.close()
conn.close()
import os
import psycopg2 DATABASE_URL = os.environ.get("DATABASE_URL") conn = psycopg2.connect(DATABASE_URL)
cursor = conn.cursor() cursor.execute(""" CREATE TABLE IF NOT EXISTS users ( id SERIAL PRIMARY KEY, email VARCHAR(255) UNIQUE NOT NULL, created_at TIMESTAMP DEFAULT NOW() )
""")
conn.commit() cursor.execute("INSERT INTO users (email) VALUES (%s) ON CONFLICT DO NOTHING", ("[email protected]",))
conn.commit() cursor.execute("SELECT * FROM users")
rows = cursor.fetchall()
print(rows) cursor.close()
conn.close()
from sqlalchemy import create_engine, Column, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
from datetime import datetime DATABASE_URL = os.environ["DATABASE_URL"] # Render PostgreSQL URLs -weight: 500;">start with postgres://, SQLAlchemy needs postgresql://
if DATABASE_URL.startswith("postgres://"): DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://", 1) engine = create_engine(DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base() class User(Base): __tablename__ = "users" id = Column(Integer, primary_key=True, index=True) email = Column(String, unique=True, index=True) created_at = Column(DateTime, default=datetime.utcnow) Base.metadata.create_all(bind=engine)
from sqlalchemy import create_engine, Column, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
from datetime import datetime DATABASE_URL = os.environ["DATABASE_URL"] # Render PostgreSQL URLs -weight: 500;">start with postgres://, SQLAlchemy needs postgresql://
if DATABASE_URL.startswith("postgres://"): DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://", 1) engine = create_engine(DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base() class User(Base): __tablename__ = "users" id = Column(Integer, primary_key=True, index=True) email = Column(String, unique=True, index=True) created_at = Column(DateTime, default=datetime.utcnow) Base.metadata.create_all(bind=engine)
from sqlalchemy import create_engine, Column, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
from datetime import datetime DATABASE_URL = os.environ["DATABASE_URL"] # Render PostgreSQL URLs -weight: 500;">start with postgres://, SQLAlchemy needs postgresql://
if DATABASE_URL.startswith("postgres://"): DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://", 1) engine = create_engine(DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base() class User(Base): __tablename__ = "users" id = Column(Integer, primary_key=True, index=True) email = Column(String, unique=True, index=True) created_at = Column(DateTime, default=datetime.utcnow) Base.metadata.create_all(bind=engine)
# next.config.js
/** @type {import('next').NextConfig} */
const nextConfig = { output: 'export', // generates static files in /out
} module.exports = nextConfig
# next.config.js
/** @type {import('next').NextConfig} */
const nextConfig = { output: 'export', // generates static files in /out
} module.exports = nextConfig
# next.config.js
/** @type {import('next').NextConfig} */
const nextConfig = { output: 'export', // generates static files in /out
} module.exports = nextConfig
# render.yaml for a cron job
services: - type: cron name: daily-cleanup runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt schedule: "0 2 * * *" # Run at 2 AM UTC daily startCommand: python cleanup.py
# render.yaml for a cron job
services: - type: cron name: daily-cleanup runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt schedule: "0 2 * * *" # Run at 2 AM UTC daily startCommand: python cleanup.py
# render.yaml for a cron job
services: - type: cron name: daily-cleanup runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt schedule: "0 2 * * *" # Run at 2 AM UTC daily startCommand: python cleanup.py
# cleanup.py
import os
import psycopg2
from datetime import datetime, timedelta def cleanup_old_records(): conn = psycopg2.connect(os.environ["DATABASE_URL"]) cursor = conn.cursor() cutoff_date = datetime.now() - timedelta(days=30) cursor.execute( "DELETE FROM events WHERE created_at < %s", (cutoff_date,) ) deleted = cursor.rowcount conn.commit() conn.close() print(f"Cleaned up {deleted} old records at {datetime.now()}") if __name__ == "__main__": cleanup_old_records()
# cleanup.py
import os
import psycopg2
from datetime import datetime, timedelta def cleanup_old_records(): conn = psycopg2.connect(os.environ["DATABASE_URL"]) cursor = conn.cursor() cutoff_date = datetime.now() - timedelta(days=30) cursor.execute( "DELETE FROM events WHERE created_at < %s", (cutoff_date,) ) deleted = cursor.rowcount conn.commit() conn.close() print(f"Cleaned up {deleted} old records at {datetime.now()}") if __name__ == "__main__": cleanup_old_records()
# cleanup.py
import os
import psycopg2
from datetime import datetime, timedelta def cleanup_old_records(): conn = psycopg2.connect(os.environ["DATABASE_URL"]) cursor = conn.cursor() cutoff_date = datetime.now() - timedelta(days=30) cursor.execute( "DELETE FROM events WHERE created_at < %s", (cutoff_date,) ) deleted = cursor.rowcount conn.commit() conn.close() print(f"Cleaned up {deleted} old records at {datetime.now()}") if __name__ == "__main__": cleanup_old_records()
# render.yaml for OpenClaw + AI API backend
services: - type: web name: ai-agent-backend runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt startCommand: uvicorn agent:app --host 0.0.0.0 --port $PORT plan: free envVars: - key: GROQ_API_KEY sync: false # set manually in dashboard - key: DATABASE_URL fromDatabase: name: agent-db property: connectionString databases: - name: agent-db plan: free
# render.yaml for OpenClaw + AI API backend
services: - type: web name: ai-agent-backend runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt startCommand: uvicorn agent:app --host 0.0.0.0 --port $PORT plan: free envVars: - key: GROQ_API_KEY sync: false # set manually in dashboard - key: DATABASE_URL fromDatabase: name: agent-db property: connectionString databases: - name: agent-db plan: free
# render.yaml for OpenClaw + AI API backend
services: - type: web name: ai-agent-backend runtime: python buildCommand: -weight: 500;">pip -weight: 500;">install -r requirements.txt startCommand: uvicorn agent:app --host 0.0.0.0 --port $PORT plan: free envVars: - key: GROQ_API_KEY sync: false # set manually in dashboard - key: DATABASE_URL fromDatabase: name: agent-db property: connectionString databases: - name: agent-db plan: free
# agent.py — AI agent backend using Groq via OpenClaw
import os
from fastapi import FastAPI
from pydantic import BaseModel
from openai import OpenAI app = FastAPI() # Use Groq's free API with OpenAI-compatible endpoint
client = OpenAI( api_key=os.environ["GROQ_API_KEY"], base_url="https://api.groq.com/openai/v1"
) class AgentRequest(BaseModel): message: str system_prompt: str = "You are a helpful assistant." @app.post("/chat")
async def chat(request: AgentRequest): response = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": request.system_prompt}, {"role": "user", "content": request.message} ] ) return { "response": response.choices[0].message.content, "model": "llama-3.3-70b-versatile", "provider": "groq" } @app.get("/health")
def health(): return {"-weight: 500;">status": "ok"}
# agent.py — AI agent backend using Groq via OpenClaw
import os
from fastapi import FastAPI
from pydantic import BaseModel
from openai import OpenAI app = FastAPI() # Use Groq's free API with OpenAI-compatible endpoint
client = OpenAI( api_key=os.environ["GROQ_API_KEY"], base_url="https://api.groq.com/openai/v1"
) class AgentRequest(BaseModel): message: str system_prompt: str = "You are a helpful assistant." @app.post("/chat")
async def chat(request: AgentRequest): response = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": request.system_prompt}, {"role": "user", "content": request.message} ] ) return { "response": response.choices[0].message.content, "model": "llama-3.3-70b-versatile", "provider": "groq" } @app.get("/health")
def health(): return {"-weight: 500;">status": "ok"}
# agent.py — AI agent backend using Groq via OpenClaw
import os
from fastapi import FastAPI
from pydantic import BaseModel
from openai import OpenAI app = FastAPI() # Use Groq's free API with OpenAI-compatible endpoint
client = OpenAI( api_key=os.environ["GROQ_API_KEY"], base_url="https://api.groq.com/openai/v1"
) class AgentRequest(BaseModel): message: str system_prompt: str = "You are a helpful assistant." @app.post("/chat")
async def chat(request: AgentRequest): response = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": request.system_prompt}, {"role": "user", "content": request.message} ] ) return { "response": response.choices[0].message.content, "model": "llama-3.3-70b-versatile", "provider": "groq" } @app.get("/health")
def health(): return {"-weight: 500;">status": "ok"}
# Access in Python
import os DATABASE_URL = os.environ["DATABASE_URL"]
API_KEY = os.environ["MY_API_KEY"]
DEBUG = os.environ.get("DEBUG", "false").lower() == "true"
# Access in Python
import os DATABASE_URL = os.environ["DATABASE_URL"]
API_KEY = os.environ["MY_API_KEY"]
DEBUG = os.environ.get("DEBUG", "false").lower() == "true"
# Access in Python
import os DATABASE_URL = os.environ["DATABASE_URL"]
API_KEY = os.environ["MY_API_KEY"]
DEBUG = os.environ.get("DEBUG", "false").lower() == "true"
# Access in Node.js
const databaseUrl = process.env.DATABASE_URL;
const apiKey = process.env.MY_API_KEY;
# Access in Node.js
const databaseUrl = process.env.DATABASE_URL;
const apiKey = process.env.MY_API_KEY;
# Access in Node.js
const databaseUrl = process.env.DATABASE_URL;
const apiKey = process.env.MY_API_KEY;
# Simple Python keepalive using cron-job.org (free external cron)
# Add this endpoint to your app, then schedule a ping every 10 minutes @app.get("/ping")
def ping(): return {"pong": True}
# Simple Python keepalive using cron-job.org (free external cron)
# Add this endpoint to your app, then schedule a ping every 10 minutes @app.get("/ping")
def ping(): return {"pong": True}
# Simple Python keepalive using cron-job.org (free external cron)
# Add this endpoint to your app, then schedule a ping every 10 minutes @app.get("/ping")
def ping(): return {"pong": True}
# FastAPI health endpoint
@app.get("/health")
def health_check(): return {"-weight: 500;">status": "healthy"}
# FastAPI health endpoint
@app.get("/health")
def health_check(): return {"-weight: 500;">status": "healthy"}
# FastAPI health endpoint
@app.get("/health")
def health_check(): return {"-weight: 500;">status": "healthy"} - Scheduled pinger: Use a free cron -weight: 500;">service (like cron-job.org) to ping your Render URL every 10 minutes, keeping it warm
- Static site + serverless API: Host your frontend as a Render static site (always on) and use a different free -weight: 500;">service (Cloudflare Workers, Vercel functions) for dynamic endpoints
- Accept the trade-off: For internal tools, demos, or low-traffic APIs where cold starts are fine, Render free is perfectly usable
- Upgrade to a paid instance ($7/month): Paid instances have no spin-down and are always on - Go to render.com and sign up with GitHub, GitLab, or email
- Connect your GitHub or GitLab account when prompted — this is how Render pulls your code
- No credit card required to -weight: 500;">start - From the Render dashboard, click New + → Web Service
- Select the Git repository you want to deploy
- Choose a name, region (US East, US West, Frankfurt, Singapore, or Ohio), and branch
- Set the Runtime (Python, Node, Go, Ruby, Rust, Docker, etc.)
- Set the Build Command and Start Command
- Choose the free plan and click Create Web Service - Runtime: Python 3
- Build Command: -weight: 500;">pip -weight: 500;">install -r requirements.txt
- Start Command: uvicorn main:app --host 0.0.0.0 --port $PORT - In the Render dashboard, click New + → PostgreSQL
- Name it, choose a region, select Free plan
- Click Create Database
- Copy the Internal Database URL (use this for services within Render) or External Database URL (use this from outside Render) - No spin-down (always instantly accessible)
- Global CDN distribution
- Automatic HTTPS
- Custom domain support
- Auto-deploy on every Git push
- Pull request previews (preview deployments for every PR)
- 100 GB bandwidth/month - Build Command: -weight: 500;">npm -weight: 500;">install && -weight: 500;">npm run build
- Publish Directory: out - Go to your -weight: 500;">service settings → Custom Domains
- Add your domain (e.g., api.yourdomain.com)
- Add the CNAME record Render shows you to your DNS provider
- Wait for DNS propagation (usually under 5 minutes with Cloudflare)
- Render auto-provisions an SSL certificate via Let's Encrypt - For databases: Use Render's PostgreSQL instead of SQLite
- For file uploads: Store in AWS S3, Cloudflare R2, or Supabase Storage
- For caching: Use Redis (Render offers a free Redis instance) or an external cache
- Render Disks: Paid feature ($0.25/GB/month) adds persistent disk storage - Side project APIs: A FastAPI or Express backend for your portfolio app — cold starts are acceptable, and 750 hours covers real usage
- Internal tools: An admin dashboard or automation API used by a few people — cold starts are tolerable for internal users
- Static websites: Portfolios, documentation sites, marketing pages — always on, globally cached, zero cost
- Demos and prototypes: Showing something to a client or investor — spin up a real deployed URL, not localhost
- Cron jobs: Daily database cleanups, scheduled reports, periodic data syncs
- Webhooks: Receiving webhooks from GitHub, Stripe, or other services — the webhook wakes the -weight: 500;">service if needed - Cold starts hurt real users: If your -weight: 500;">service is public-facing and 50-second first loads are unacceptable
- You need persistent disk: For file uploads, SQLite, or any write-to-disk workflow
- Database expiration is a problem: The 90-day free database limit is untenable for production data
- You need more compute: The free tier gives you 0.1 CPU and 512MB RAM — enough for light workloads, not for AI inference or heavy processing
- You need team collaboration: Multiple developers, role-based access, audit logs - Supabase vs Neon: Which Free PostgreSQL Database Should You Use in 2026?
- Vercel vs Netlify vs Cloudflare Pages: Free Frontend Hosting Compared
- Railway App Review 2026: The Best Heroku Alternative for Developers
- Oracle Cloud Always Free: Get a 4-Core 24GB ARM VPS for Free
- 7 Best Free Web Hosting for Developers: Cloudflare Pages, Vercel, Netlify and More