type Builder interface { Build(ctx context.Context, job BuildJob) (imageTag string, err error)
}
type Builder interface { Build(ctx context.Context, job BuildJob) (imageTag string, err error)
}
type Builder interface { Build(ctx context.Context, job BuildJob) (imageTag string, err error)
}
bJob := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, // "build-dep_abc123" Namespace: "staxa-system", Labels: map[string]string{ "staxa.dev/deployment-id": job.DeploymentID, }, }, Spec: batchv1.JobSpec{ BackoffLimit: &zero, // No retries — fail fast Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, Containers: []corev1.Container{{ Name: "buildah", Image: cfg.BuilderImage, Command: []string{"sh", "-c", buildScript()}, Env: env, SecurityContext: &corev1.SecurityContext{ RunAsUser: &rootUser, Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{ "CHOWN", "DAC_OVERRIDE", "FOWNER", "SETUID", "SETGID", "SETFCAP", "SYS_CHROOT", }, }, }, }}, }, }, },
}
bJob := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, // "build-dep_abc123" Namespace: "staxa-system", Labels: map[string]string{ "staxa.dev/deployment-id": job.DeploymentID, }, }, Spec: batchv1.JobSpec{ BackoffLimit: &zero, // No retries — fail fast Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, Containers: []corev1.Container{{ Name: "buildah", Image: cfg.BuilderImage, Command: []string{"sh", "-c", buildScript()}, Env: env, SecurityContext: &corev1.SecurityContext{ RunAsUser: &rootUser, Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{ "CHOWN", "DAC_OVERRIDE", "FOWNER", "SETUID", "SETGID", "SETFCAP", "SYS_CHROOT", }, }, }, }}, }, }, },
}
bJob := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, // "build-dep_abc123" Namespace: "staxa-system", Labels: map[string]string{ "staxa.dev/deployment-id": job.DeploymentID, }, }, Spec: batchv1.JobSpec{ BackoffLimit: &zero, // No retries — fail fast Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, Containers: []corev1.Container{{ Name: "buildah", Image: cfg.BuilderImage, Command: []string{"sh", "-c", buildScript()}, Env: env, SecurityContext: &corev1.SecurityContext{ RunAsUser: &rootUser, Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{ "CHOWN", "DAC_OVERRIDE", "FOWNER", "SETUID", "SETGID", "SETFCAP", "SYS_CHROOT", }, }, }, }}, }, }, },
}
set -e # Inject GitHub token for private repos if available
CLONE_URL="$REPO_URL"
if [ -n "${GITHUB_TOKEN:-}" ]; then CLONE_URL="$(echo "$REPO_URL" | \ sed "s|https://|https://x-access-token:${GITHUB_TOKEN}@|")"
fi # Shallow clone — only the branch we need, one commit deep
git clone --depth 1 --branch "$BRANCH" "$CLONE_URL" /workspace
cd /workspace # Build with VFS storage driver + chroot isolation (no privileged mode needed)
buildah --storage-driver vfs build \ --isolation chroot \ --layers \ $BUILD_ARGS \ --tag "$IMAGE_TAG" \ --file "$DOCKERFILE_PATH" \ "${BUILD_CONTEXT:-.}" # Push to internal registry (TLS disabled for in-cluster registry)
buildah --storage-driver vfs push \ --tls-verify=false \ "$IMAGE_TAG" \ "docker://$IMAGE_TAG"
set -e # Inject GitHub token for private repos if available
CLONE_URL="$REPO_URL"
if [ -n "${GITHUB_TOKEN:-}" ]; then CLONE_URL="$(echo "$REPO_URL" | \ sed "s|https://|https://x-access-token:${GITHUB_TOKEN}@|")"
fi # Shallow clone — only the branch we need, one commit deep
git clone --depth 1 --branch "$BRANCH" "$CLONE_URL" /workspace
cd /workspace # Build with VFS storage driver + chroot isolation (no privileged mode needed)
buildah --storage-driver vfs build \ --isolation chroot \ --layers \ $BUILD_ARGS \ --tag "$IMAGE_TAG" \ --file "$DOCKERFILE_PATH" \ "${BUILD_CONTEXT:-.}" # Push to internal registry (TLS disabled for in-cluster registry)
buildah --storage-driver vfs push \ --tls-verify=false \ "$IMAGE_TAG" \ "docker://$IMAGE_TAG"
set -e # Inject GitHub token for private repos if available
CLONE_URL="$REPO_URL"
if [ -n "${GITHUB_TOKEN:-}" ]; then CLONE_URL="$(echo "$REPO_URL" | \ sed "s|https://|https://x-access-token:${GITHUB_TOKEN}@|")"
fi # Shallow clone — only the branch we need, one commit deep
git clone --depth 1 --branch "$BRANCH" "$CLONE_URL" /workspace
cd /workspace # Build with VFS storage driver + chroot isolation (no privileged mode needed)
buildah --storage-driver vfs build \ --isolation chroot \ --layers \ $BUILD_ARGS \ --tag "$IMAGE_TAG" \ --file "$DOCKERFILE_PATH" \ "${BUILD_CONTEXT:-.}" # Push to internal registry (TLS disabled for in-cluster registry)
buildah --storage-driver vfs push \ --tls-verify=false \ "$IMAGE_TAG" \ "docker://$IMAGE_TAG"
{registry}/tenants/{tenant_id}/{service_name}:{version}
{registry}/tenants/{tenant_id}/{service_name}:{version}
{registry}/tenants/{tenant_id}/{service_name}:{version}
imageTag := fmt.Sprintf("%s/tenants/%s/%s:%d", job.RegistryURL, job.TenantID, name, job.Version)
imageTag := fmt.Sprintf("%s/tenants/%s/%s:%d", job.RegistryURL, job.TenantID, name, job.Version)
imageTag := fmt.Sprintf("%s/tenants/%s/%s:%d", job.RegistryURL, job.TenantID, name, job.Version)
func (b *BuildahBuilder) resolveGitHubToken(ctx context.Context, token string) []corev1.EnvVar { // Tier 1: Per-deployment token (from GitHub App installation) if token != "" { return []corev1.EnvVar{{Name: "GITHUB_TOKEN", Value: token}} } // Tier 2: Cluster-level secret (for environments without GitHub App) _, err := b.client.CoreV1().Secrets(buildahNamespace). Get(ctx, "github-credentials", metav1.GetOptions{}) if err != nil { return nil // No token available — public repo only } return []corev1.EnvVar{{ Name: "GITHUB_TOKEN", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: "github-credentials", }, Key: "token", }, }, }}
}
func (b *BuildahBuilder) resolveGitHubToken(ctx context.Context, token string) []corev1.EnvVar { // Tier 1: Per-deployment token (from GitHub App installation) if token != "" { return []corev1.EnvVar{{Name: "GITHUB_TOKEN", Value: token}} } // Tier 2: Cluster-level secret (for environments without GitHub App) _, err := b.client.CoreV1().Secrets(buildahNamespace). Get(ctx, "github-credentials", metav1.GetOptions{}) if err != nil { return nil // No token available — public repo only } return []corev1.EnvVar{{ Name: "GITHUB_TOKEN", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: "github-credentials", }, Key: "token", }, }, }}
}
func (b *BuildahBuilder) resolveGitHubToken(ctx context.Context, token string) []corev1.EnvVar { // Tier 1: Per-deployment token (from GitHub App installation) if token != "" { return []corev1.EnvVar{{Name: "GITHUB_TOKEN", Value: token}} } // Tier 2: Cluster-level secret (for environments without GitHub App) _, err := b.client.CoreV1().Secrets(buildahNamespace). Get(ctx, "github-credentials", metav1.GetOptions{}) if err != nil { return nil // No token available — public repo only } return []corev1.EnvVar{{ Name: "GITHUB_TOKEN", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: "github-credentials", }, Key: "token", }, }, }}
}
func (b *BuildahBuilder) registryCredentials(ctx context.Context) ([]corev1.Volume, []corev1.VolumeMount) { _, err := b.client.CoreV1().Secrets(buildahNamespace). Get(ctx, "registry-credentials", metav1.GetOptions{}) if err != nil { return nil, nil // No credentials, internal registry, no auth needed } volumes := []corev1.Volume{{ Name: "registry-credentials", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: "registry-credentials", }, }, }} mounts := []corev1.VolumeMount{{ Name: "registry-credentials", MountPath: "/root/.docker/config.json", SubPath: "config.json", ReadOnly: true, }} return volumes, mounts
}
func (b *BuildahBuilder) registryCredentials(ctx context.Context) ([]corev1.Volume, []corev1.VolumeMount) { _, err := b.client.CoreV1().Secrets(buildahNamespace). Get(ctx, "registry-credentials", metav1.GetOptions{}) if err != nil { return nil, nil // No credentials, internal registry, no auth needed } volumes := []corev1.Volume{{ Name: "registry-credentials", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: "registry-credentials", }, }, }} mounts := []corev1.VolumeMount{{ Name: "registry-credentials", MountPath: "/root/.docker/config.json", SubPath: "config.json", ReadOnly: true, }} return volumes, mounts
}
func (b *BuildahBuilder) registryCredentials(ctx context.Context) ([]corev1.Volume, []corev1.VolumeMount) { _, err := b.client.CoreV1().Secrets(buildahNamespace). Get(ctx, "registry-credentials", metav1.GetOptions{}) if err != nil { return nil, nil // No credentials, internal registry, no auth needed } volumes := []corev1.Volume{{ Name: "registry-credentials", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: "registry-credentials", }, }, }} mounts := []corev1.VolumeMount{{ Name: "registry-credentials", MountPath: "/root/.docker/config.json", SubPath: "config.json", ReadOnly: true, }} return volumes, mounts
}
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 10*time.Minute, false, func(ctx context.Context) (bool, error) { j, err := b.client.BatchV1().Jobs(buildahNamespace). Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } if j.Status.Succeeded >= 1 { return true, nil } if j.Status.Failed >= 1 { buildErr = fmt.Errorf("buildah: build failed for %s", imageTag) if logs := b.tailBuildLogs(ctx, jobName); logs != "" { buildErr = fmt.Errorf("%w\nbuild logs:\n%s", buildErr, logs) } return false, buildErr } return false, nil },
)
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 10*time.Minute, false, func(ctx context.Context) (bool, error) { j, err := b.client.BatchV1().Jobs(buildahNamespace). Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } if j.Status.Succeeded >= 1 { return true, nil } if j.Status.Failed >= 1 { buildErr = fmt.Errorf("buildah: build failed for %s", imageTag) if logs := b.tailBuildLogs(ctx, jobName); logs != "" { buildErr = fmt.Errorf("%w\nbuild logs:\n%s", buildErr, logs) } return false, buildErr } return false, nil },
)
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 10*time.Minute, false, func(ctx context.Context) (bool, error) { j, err := b.client.BatchV1().Jobs(buildahNamespace). Get(ctx, jobName, metav1.GetOptions{}) if err != nil { return false, err } if j.Status.Succeeded >= 1 { return true, nil } if j.Status.Failed >= 1 { buildErr = fmt.Errorf("buildah: build failed for %s", imageTag) if logs := b.tailBuildLogs(ctx, jobName); logs != "" { buildErr = fmt.Errorf("%w\nbuild logs:\n%s", buildErr, logs) } return false, buildErr } return false, nil },
)
func (b *BuildahBuilder) tailBuildLogs(ctx context.Context, jobName string) string { pods, err := b.client.CoreV1().Pods(buildahNamespace).List(ctx, metav1.ListOptions{LabelSelector: "job-name=" + jobName}) if err != nil || len(pods.Items) == 0 { return "" } tailLines := int64(50) req := b.client.CoreV1().Pods(buildahNamespace). GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{TailLines: &tailLines}) rc, err := req.Stream(ctx) if err != nil { return "" } defer rc.Close() var buf bytes.Buffer io.Copy(&buf, rc) return buf.String()
}
func (b *BuildahBuilder) tailBuildLogs(ctx context.Context, jobName string) string { pods, err := b.client.CoreV1().Pods(buildahNamespace).List(ctx, metav1.ListOptions{LabelSelector: "job-name=" + jobName}) if err != nil || len(pods.Items) == 0 { return "" } tailLines := int64(50) req := b.client.CoreV1().Pods(buildahNamespace). GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{TailLines: &tailLines}) rc, err := req.Stream(ctx) if err != nil { return "" } defer rc.Close() var buf bytes.Buffer io.Copy(&buf, rc) return buf.String()
}
func (b *BuildahBuilder) tailBuildLogs(ctx context.Context, jobName string) string { pods, err := b.client.CoreV1().Pods(buildahNamespace).List(ctx, metav1.ListOptions{LabelSelector: "job-name=" + jobName}) if err != nil || len(pods.Items) == 0 { return "" } tailLines := int64(50) req := b.client.CoreV1().Pods(buildahNamespace). GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{TailLines: &tailLines}) rc, err := req.Stream(ctx) if err != nil { return "" } defer rc.Close() var buf bytes.Buffer io.Copy(&buf, rc) return buf.String()
}
func (b *BuildahBuilder) cleanupBuildJob(ctx context.Context, jobName string) { prop := metav1.DeletePropagationBackground b.client.BatchV1().Jobs(buildahNamespace).Delete(ctx, jobName, metav1.DeleteOptions{PropagationPolicy: &prop})
}
func (b *BuildahBuilder) cleanupBuildJob(ctx context.Context, jobName string) { prop := metav1.DeletePropagationBackground b.client.BatchV1().Jobs(buildahNamespace).Delete(ctx, jobName, metav1.DeleteOptions{PropagationPolicy: &prop})
}
func (b *BuildahBuilder) cleanupBuildJob(ctx context.Context, jobName string) { prop := metav1.DeletePropagationBackground b.client.BatchV1().Jobs(buildahNamespace).Delete(ctx, jobName, metav1.DeleteOptions{PropagationPolicy: &prop})
}
type Builder interface { Build(ctx context.Context, job BuildJob) (imageTag string, err error)
}
type Builder interface { Build(ctx context.Context, job BuildJob) (imageTag string, err error)
}
type Builder interface { Build(ctx context.Context, job BuildJob) (imageTag string, err error)
} - User pushes source code (GitHub repo) or provides a Dockerfile
- The platform needs to turn that source into a container image
- The image gets pushed to an internal registry
- The orchestrator deploys it into the tenant's isolated namespace
All of this happens inside a Kubernetes cluster. There's no external CI service, no GitHub Actions, no cloud build service. The build runs as a Kubernetes Job in the staxa-system namespace. - Must work on a single-node k3s cluster (ARM64)
- Must handle private GitHub repos (token injection)
- Must support auto-generated Dockerfiles (framework detection feeds into this)
- Must clean up after itself (no build job accumulation)
- Must surface build logs back to the user on failure - staxad (Go API) receives deploy request
- Creates a Kubernetes Job running Buildah
- Job clones repo → builds image → pushes to registry
- Go API polls Job status until success or failure
- On failure: captures last 50 lines of build logs
- On completion: cleans up the Job - Tenant isolation in the registry: each tenant's images live under their own path
- Multi-service support: a tenant with a frontend and backend gets separate image paths
- Version history: rollback is just redeploying a previous version number
- No tag collisions: version is an incrementing integer from the database