$ name: Nightly export on: schedule: - cron: '0 2 * * *' # 2am UTC every day workflow_dispatch: # allows manual triggering for testing jobs: export: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run export run: python scripts/export.py - name: Ping DeadManCheck if: success() run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }} > /dev/null
name: Nightly export on: schedule: - cron: '0 2 * * *' # 2am UTC every day workflow_dispatch: # allows manual triggering for testing jobs: export: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run export run: python scripts/export.py - name: Ping DeadManCheck if: success() run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }} > /dev/null
name: Nightly export on: schedule: - cron: '0 2 * * *' # 2am UTC every day workflow_dispatch: # allows manual triggering for testing jobs: export: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Run export run: python scripts/export.py - name: Ping DeadManCheck if: success() run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }} > /dev/null
steps: - uses: actions/checkout@v4 - name: Ping -weight: 500;">start run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/-weight: 500;">start > /dev/null || true - name: Run ETL id: etl run: | python scripts/run_etl.py echo "rows=$(cat /tmp/etl_row_count.txt)" >> $GITHUB_OUTPUT - name: Ping done if: success() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}?count=${{ steps.etl.outputs.rows }}" \ > /dev/null || true - name: Ping fail if: failure() run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/fail > /dev/null || true
steps: - uses: actions/checkout@v4 - name: Ping -weight: 500;">start run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/-weight: 500;">start > /dev/null || true - name: Run ETL id: etl run: | python scripts/run_etl.py echo "rows=$(cat /tmp/etl_row_count.txt)" >> $GITHUB_OUTPUT - name: Ping done if: success() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}?count=${{ steps.etl.outputs.rows }}" \ > /dev/null || true - name: Ping fail if: failure() run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/fail > /dev/null || true
steps: - uses: actions/checkout@v4 - name: Ping -weight: 500;">start run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/-weight: 500;">start > /dev/null || true - name: Run ETL id: etl run: | python scripts/run_etl.py echo "rows=$(cat /tmp/etl_row_count.txt)" >> $GITHUB_OUTPUT - name: Ping done if: success() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}?count=${{ steps.etl.outputs.rows }}" \ > /dev/null || true - name: Ping fail if: failure() run: -weight: 500;">curl -fsS https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/fail > /dev/null || true
gh workflow run nightly-export.yml
gh workflow run nightly-export.yml
gh workflow run nightly-export.yml
name: Nightly database backup on: schedule: - cron: '0 2 * * *' workflow_dispatch: jobs: backup: runs-on: ubuntu-latest timeout-minutes: 30 # hard limit — prevent hung jobs accumulating steps: - uses: actions/checkout@v4 - name: Ping -weight: 500;">start run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/-weight: 500;">start" \ > /dev/null || true # don't fail if monitoring is down - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-1 - name: Run backup id: backup run: | python scripts/backup.py echo "rows=$(cat /tmp/backup_row_count.txt)" >> $GITHUB_OUTPUT - name: Upload to S3 run: aws s3 cp /backups/latest.dump s3://my-backups/ - name: Ping done if: success() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}?count=${{ steps.backup.outputs.rows }}" \ > /dev/null || true - name: Ping fail if: failure() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/fail" \ > /dev/null || true
name: Nightly database backup on: schedule: - cron: '0 2 * * *' workflow_dispatch: jobs: backup: runs-on: ubuntu-latest timeout-minutes: 30 # hard limit — prevent hung jobs accumulating steps: - uses: actions/checkout@v4 - name: Ping -weight: 500;">start run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/-weight: 500;">start" \ > /dev/null || true # don't fail if monitoring is down - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-1 - name: Run backup id: backup run: | python scripts/backup.py echo "rows=$(cat /tmp/backup_row_count.txt)" >> $GITHUB_OUTPUT - name: Upload to S3 run: aws s3 cp /backups/latest.dump s3://my-backups/ - name: Ping done if: success() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}?count=${{ steps.backup.outputs.rows }}" \ > /dev/null || true - name: Ping fail if: failure() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/fail" \ > /dev/null || true
name: Nightly database backup on: schedule: - cron: '0 2 * * *' workflow_dispatch: jobs: backup: runs-on: ubuntu-latest timeout-minutes: 30 # hard limit — prevent hung jobs accumulating steps: - uses: actions/checkout@v4 - name: Ping -weight: 500;">start run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/-weight: 500;">start" \ > /dev/null || true # don't fail if monitoring is down - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-1 - name: Run backup id: backup run: | python scripts/backup.py echo "rows=$(cat /tmp/backup_row_count.txt)" >> $GITHUB_OUTPUT - name: Upload to S3 run: aws s3 cp /backups/latest.dump s3://my-backups/ - name: Ping done if: success() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}?count=${{ steps.backup.outputs.rows }}" \ > /dev/null || true - name: Ping fail if: failure() run: | -weight: 500;">curl -fsS \ "https://deadmancheck.io/ping/${{ secrets.DEADMANCHECK_TOKEN }}/fail" \ > /dev/null || true - timeout-minutes: 30 is a hard ceiling. Without it, a hung job can sit there for 6 hours consuming a runner.
- || true on the monitoring pings means a DeadManCheck outage won't cause your backup job to report failed.
- The row count flows from the backup step through $GITHUB_OUTPUT to the ping step. - The workflow runs end-to-end without errors
- DeadManCheck shows a recent ping on your monitor dashboard
- The count looks correct for what the job processed