$ -weight: 600;">sudo -weight: 500;">apt -weight: 500;">update && -weight: 600;">sudo -weight: 500;">apt -weight: 500;">upgrade -y
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">update && -weight: 600;">sudo -weight: 500;">apt -weight: 500;">upgrade -y
-weight: 600;">sudo -weight: 500;">apt -weight: 500;">update && -weight: 600;">sudo -weight: 500;">apt -weight: 500;">upgrade -y
-weight: 600;">sudo -weight: 500;">systemctl -weight: 500;">enable ssh
-weight: 600;">sudo -weight: 500;">systemctl -weight: 500;">start ssh
-weight: 600;">sudo -weight: 500;">systemctl -weight: 500;">enable ssh
-weight: 600;">sudo -weight: 500;">systemctl -weight: 500;">start ssh
-weight: 600;">sudo -weight: 500;">systemctl -weight: 500;">enable ssh
-weight: 600;">sudo -weight: 500;">systemctl -weight: 500;">start ssh
ssh username@your-agents-local-ip
ssh username@your-agents-local-ip
ssh username@your-agents-local-ip
-weight: 500;">curl -fsSL https://deb.nodesource.com/setup_20.x | -weight: 600;">sudo -E bash -
-weight: 600;">sudo -weight: 500;">apt-get -weight: 500;">install -y nodejs
-weight: 500;">curl -fsSL https://deb.nodesource.com/setup_20.x | -weight: 600;">sudo -E bash -
-weight: 600;">sudo -weight: 500;">apt-get -weight: 500;">install -y nodejs
-weight: 500;">curl -fsSL https://deb.nodesource.com/setup_20.x | -weight: 600;">sudo -E bash -
-weight: 600;">sudo -weight: 500;">apt-get -weight: 500;">install -y nodejs
node --version
# Should output v20.x.x
-weight: 500;">npm --version
# Should output 10.x.x or higher
node --version
# Should output v20.x.x
-weight: 500;">npm --version
# Should output 10.x.x or higher
node --version
# Should output v20.x.x
-weight: 500;">npm --version
# Should output 10.x.x or higher
-weight: 600;">sudo -weight: 500;">npm -weight: 500;">install pm2 -g
-weight: 600;">sudo -weight: 500;">npm -weight: 500;">install pm2 -g
-weight: 600;">sudo -weight: 500;">npm -weight: 500;">install pm2 -g
-weight: 500;">npm -weight: 500;">install -g openclaw
openclaw init
-weight: 500;">npm -weight: 500;">install -g openclaw
openclaw init
-weight: 500;">npm -weight: 500;">install -g openclaw
openclaw init
pm2 -weight: 500;">start "openclaw gateway -weight: 500;">start" --name openclaw
pm2 save
pm2 -weight: 500;">start "openclaw gateway -weight: 500;">start" --name openclaw
pm2 save
pm2 -weight: 500;">start "openclaw gateway -weight: 500;">start" --name openclaw
pm2 save
pm2 startup
pm2 startup
pm2 startup
pm2 -weight: 500;">status openclaw
pm2 logs openclaw --lines 50
pm2 monit
pm2 -weight: 500;">status openclaw
pm2 logs openclaw --lines 50
pm2 monit
pm2 -weight: 500;">status openclaw
pm2 logs openclaw --lines 50
pm2 monit
-weight: 500;">curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/-weight: 500;">install.sh | bash
-weight: 500;">curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/-weight: 500;">install.sh | bash
-weight: 500;">curl -fsSL https://raw.githubusercontent.com/NousResearch/hermes-agent/main/scripts/-weight: 500;">install.sh | bash
source ~/.bashrc # or source ~/.zshrc
source ~/.bashrc # or source ~/.zshrc
source ~/.bashrc # or source ~/.zshrc
hermes --version
hermes --version
hermes --version
hermes setup
hermes setup
hermes setup
-weight: 600;">sudo -weight: 500;">npm -weight: 500;">install pm2 -g
-weight: 600;">sudo -weight: 500;">npm -weight: 500;">install pm2 -g
-weight: 600;">sudo -weight: 500;">npm -weight: 500;">install pm2 -g
pm2 -weight: 500;">start "hermes gateway -weight: 500;">start" --name hermes
pm2 save
pm2 -weight: 500;">start "hermes gateway -weight: 500;">start" --name hermes
pm2 save
pm2 -weight: 500;">start "hermes gateway -weight: 500;">start" --name hermes
pm2 save
pm2 startup
pm2 startup
pm2 startup
hermes # Open the interactive TUI
hermes -weight: 500;">status # Check the current session
hermes tools # View and configure available tools
hermes skills # Browse installed skills
hermes # Open the interactive TUI
hermes -weight: 500;">status # Check the current session
hermes tools # View and configure available tools
hermes skills # Browse installed skills
hermes # Open the interactive TUI
hermes -weight: 500;">status # Check the current session
hermes tools # View and configure available tools
hermes skills # Browse installed skills
-weight: 500;">curl -fsSL https://tailscale.com/-weight: 500;">install.sh | sh
-weight: 600;">sudo tailscale up
-weight: 500;">curl -fsSL https://tailscale.com/-weight: 500;">install.sh | sh
-weight: 600;">sudo tailscale up
-weight: 500;">curl -fsSL https://tailscale.com/-weight: 500;">install.sh | sh
-weight: 600;">sudo tailscale up - Extremely low power consumption (under $2/month on most electricity plans).
- Silent operation — no fans are needed for light-to-moderate workloads.
- Tiny footprint — fits on a shelf, behind a monitor, or in a drawer.
- Large community support — if something goes wrong, someone has already solved it. - ARM architecture can occasionally cause compatibility issues with certain Docker images or niche Python packages.
- Storage is typically SD card-based, which is slower and less reliable than SSD. (Use an external SSD via USB 3.0 for better performance and longevity.)
- Not ideal if you plan to run local LLMs alongside your agent — the Pi lacks the RAM and compute for that use case. - The best price is free.
- Laptops have a built-in UPS — the battery keeps the agent running through short power outages.
- Standard x86 architecture means maximum software compatibility.
- Typically has more RAM and faster storage than a Raspberry Pi. - Can consume significantly more power (20-60 watts idle for a desktop).
- Desktops are noisy and take up physical space.
- Older laptops may have degraded batteries or failing storage.
- Not as visually appealing to leave on a desk 24/7. - x86 architecture — full compatibility with all software.
- Significantly more powerful than a Raspberry Pi.
- Still very small and relatively quiet.
- SSD storage means fast boot times and reliable data.
- Can comfortably handle multiple agents running simultaneously (e.g., OpenClaw + Hermes). - Requires an upfront investment, though the payback period versus a VPS is under six months.
- Still draws slightly more power than a Raspberry Pi (6-15 watts idle). - Best For: Users who want their agent to interact with their local filesystem, run browser automation, manage home servers, or act as a persistent local assistant.
- Strengths: Excellent WebSocket and browser integration. Mature process management with pm2. Strong local tool support. Active community around OpenClaw-specific skills and plugins.
- Weaknesses: Primarily focused on single-machine operation. Multi-agent parallelism is less developed.
- Language: Node.js / TypeScript - Best For: Users who want an agent that builds skills from experience, maintains persistent memory across sessions, and can delegate work to sub-agents.
- Strengths: Autonomous skill creation and improvement. Full-featured TUI (text user interface). Supports six different terminal backends (local, Docker, SSH, Daytona, Singularity, Modal). Native cross-platform support (Telegram, Discord, Slack, WhatsApp, Signal). Built-in cron scheduler for automations.
- Weaknesses: Relatively newer project, so the plugin ecosystem is still growing. Windows is not natively supported (requires WSL2).
- Language: Python - For Raspberry Pi: Raspberry Pi OS Lite (64-bit). Flash it using the official Raspberry Pi Imager.
- For Mini PC / Old PC: Ubuntu Server 24.04 LTS or Debian 12. Both are free, extremely stable, and well-supported. - Selecting your LLM provider and model
- Configuring your messaging gateway (Telegram, Discord, Slack, WhatsApp, Signal)
- Setting up API keys
- Enabling or disabling tools - SSH directly to it from anywhere.
- Access the OpenClaw web UI or Hermes gateway.
- Transfer files securely. - Check if PM2 is running: pm2 list. If it's empty, the PM2 startup script isn't configured correctly. Run pm2 startup again and follow the instructions.
- Ensure the agent was saved in PM2's process list before rebooting: pm2 save. - OpenClaw and Hermes are relatively lightweight, but if you're also running local LLMs (like Ollama), memory can fill up quickly. A Raspberry Pi with 4GB is fine for the agent alone but will struggle with an additional large model.
- Use pm2 monit or htop to monitor resource usage. If a process is using too much memory, consider restarting it: pm2 -weight: 500;">restart openclaw or pm2 -weight: 500;">restart hermes. - Never expose your agent's ports directly to the internet. Always use Tailscale, SSH tunnels, or a reverse proxy with authentication.
- Keep your OS and software updated. Set up unattended upgrades: -weight: 600;">sudo -weight: 500;">apt -weight: 500;">install unattended-upgrades && -weight: 600;">sudo dpkg-reconfigure --priority=low unattended-upgrades.
- Enable a firewall. Use ufw (Uncomplicated Firewall): -weight: 600;">sudo ufw -weight: 500;">enable. Only allow SSH (port 22) and the Tailscale interface.
- Rotate your API keys. Don't leave your OpenAI, Anthropic, or OpenRouter keys in plaintext where other processes on the machine might access them. Use environment variables or a .env file with restricted permissions. - Business-Critical Operations: If your agent is handling customer-facing tasks, processing payments, or managing critical workflows, the risk of a 10-minute outage due to a home internet hiccup could be costly. Managed hosting providers offer SLA-backed uptime, redundant networking, and enterprise-grade infrastructure.
- Team Collaboration: When multiple team members need to interact with or manage the agent, a cloud-hosted solution is more accessible and easier to permission than a home machine behind your router.
- Zero Maintenance Preference: Some people simply don't want to think about hardware, OS updates, or process restarts. They want the agent to work, full -weight: 500;">stop. That's exactly what a managed -weight: 500;">service delivers.
- Scaling Requirements: If you plan to run large models locally (70B+ parameter LLMs) or need GPU-accelerated inference, a home Mini PC won't cut it. Managed providers offer GPU-equipped instances for heavy workloads.