$ // app.js
const express = require('express');
const app = express();
const PORT = process.env.PORT || 3000; app.get('/', (req, res) => { res.json({ message: 'DevOps Pipeline Demo', version: process.env.APP_VERSION || '1.0.0', environment: process.env.NODE_ENV || 'development' });
}); app.get('/health', (req, res) => { res.-weight: 500;">status(200).json({ -weight: 500;">status: 'healthy' });
}); app.listen(PORT, () => { console.log(`App running on port ${PORT}`);
}); module.exports = app;
// app.js
const express = require('express');
const app = express();
const PORT = process.env.PORT || 3000; app.get('/', (req, res) => { res.json({ message: 'DevOps Pipeline Demo', version: process.env.APP_VERSION || '1.0.0', environment: process.env.NODE_ENV || 'development' });
}); app.get('/health', (req, res) => { res.-weight: 500;">status(200).json({ -weight: 500;">status: 'healthy' });
}); app.listen(PORT, () => { console.log(`App running on port ${PORT}`);
}); module.exports = app;
// app.js
const express = require('express');
const app = express();
const PORT = process.env.PORT || 3000; app.get('/', (req, res) => { res.json({ message: 'DevOps Pipeline Demo', version: process.env.APP_VERSION || '1.0.0', environment: process.env.NODE_ENV || 'development' });
}); app.get('/health', (req, res) => { res.-weight: 500;">status(200).json({ -weight: 500;">status: 'healthy' });
}); app.listen(PORT, () => { console.log(`App running on port ${PORT}`);
}); module.exports = app;
// app.test.js
const request = require('supertest');
const app = require('./app'); describe('GET /', () => { it('should return 200 and pipeline demo message', async () => { const res = await request(app).get('/'); expect(res.statusCode).toBe(200); expect(res.body.message).toBe('DevOps Pipeline Demo'); });
}); describe('GET /health', () => { it('should return healthy -weight: 500;">status', async () => { const res = await request(app).get('/health'); expect(res.statusCode).toBe(200); expect(res.body.-weight: 500;">status).toBe('healthy'); });
});
// app.test.js
const request = require('supertest');
const app = require('./app'); describe('GET /', () => { it('should return 200 and pipeline demo message', async () => { const res = await request(app).get('/'); expect(res.statusCode).toBe(200); expect(res.body.message).toBe('DevOps Pipeline Demo'); });
}); describe('GET /health', () => { it('should return healthy -weight: 500;">status', async () => { const res = await request(app).get('/health'); expect(res.statusCode).toBe(200); expect(res.body.-weight: 500;">status).toBe('healthy'); });
});
// app.test.js
const request = require('supertest');
const app = require('./app'); describe('GET /', () => { it('should return 200 and pipeline demo message', async () => { const res = await request(app).get('/'); expect(res.statusCode).toBe(200); expect(res.body.message).toBe('DevOps Pipeline Demo'); });
}); describe('GET /health', () => { it('should return healthy -weight: 500;">status', async () => { const res = await request(app).get('/health'); expect(res.statusCode).toBe(200); expect(res.body.-weight: 500;">status).toBe('healthy'); });
});
// package.json
{ "name": "devops-pipeline-demo", "version": "1.0.0", "scripts": { "-weight: 500;">start": "node app.js", "test": "jest --coverage" }, "dependencies": { "express": "^4.18.2" }, "devDependencies": { "jest": "^29.0.0", "supertest": "^6.3.0" }
}
// package.json
{ "name": "devops-pipeline-demo", "version": "1.0.0", "scripts": { "-weight: 500;">start": "node app.js", "test": "jest --coverage" }, "dependencies": { "express": "^4.18.2" }, "devDependencies": { "jest": "^29.0.0", "supertest": "^6.3.0" }
}
// package.json
{ "name": "devops-pipeline-demo", "version": "1.0.0", "scripts": { "-weight: 500;">start": "node app.js", "test": "jest --coverage" }, "dependencies": { "express": "^4.18.2" }, "devDependencies": { "jest": "^29.0.0", "supertest": "^6.3.0" }
}
# Stage 1 — Build and Test
FROM node:18-alpine AS builder WORKDIR /app # Copy dependency files first (layer caching optimization)
# Changing app.js won't invalidate this layer if package.json is unchanged
COPY package*.json ./
RUN -weight: 500;">npm ci --only=production # Copy source code
COPY . . # Run tests in build stage
# If tests fail, the build fails — nothing proceeds
RUN -weight: 500;">npm test # Stage 2 — Production Image
FROM node:18-alpine AS production # Security: run as non-root user
RUN addgroup -g 1001 -S nodejs && \ adduser -S nodeuser -u 1001 -G nodejs WORKDIR /app # Copy only production dependencies from builder stage
COPY --from=builder --chown=nodeuser:nodejs /app/node_modules ./node_modules
COPY --from=builder --chown=nodeuser:nodejs /app/app.js ./app.js
COPY --from=builder --chown=nodeuser:nodejs /app/package.json ./package.json USER nodeuser EXPOSE 3000 # Health check built into the image
HEALTHCHECK --interval=30s --timeout=3s ---weight: 500;">start-period=5s --retries=3 \ CMD -weight: 500;">wget --quiet --tries=1 --spider http://localhost:3000/health || exit 1 CMD ["node", "app.js"]
# Stage 1 — Build and Test
FROM node:18-alpine AS builder WORKDIR /app # Copy dependency files first (layer caching optimization)
# Changing app.js won't invalidate this layer if package.json is unchanged
COPY package*.json ./
RUN -weight: 500;">npm ci --only=production # Copy source code
COPY . . # Run tests in build stage
# If tests fail, the build fails — nothing proceeds
RUN -weight: 500;">npm test # Stage 2 — Production Image
FROM node:18-alpine AS production # Security: run as non-root user
RUN addgroup -g 1001 -S nodejs && \ adduser -S nodeuser -u 1001 -G nodejs WORKDIR /app # Copy only production dependencies from builder stage
COPY --from=builder --chown=nodeuser:nodejs /app/node_modules ./node_modules
COPY --from=builder --chown=nodeuser:nodejs /app/app.js ./app.js
COPY --from=builder --chown=nodeuser:nodejs /app/package.json ./package.json USER nodeuser EXPOSE 3000 # Health check built into the image
HEALTHCHECK --interval=30s --timeout=3s ---weight: 500;">start-period=5s --retries=3 \ CMD -weight: 500;">wget --quiet --tries=1 --spider http://localhost:3000/health || exit 1 CMD ["node", "app.js"]
# Stage 1 — Build and Test
FROM node:18-alpine AS builder WORKDIR /app # Copy dependency files first (layer caching optimization)
# Changing app.js won't invalidate this layer if package.json is unchanged
COPY package*.json ./
RUN -weight: 500;">npm ci --only=production # Copy source code
COPY . . # Run tests in build stage
# If tests fail, the build fails — nothing proceeds
RUN -weight: 500;">npm test # Stage 2 — Production Image
FROM node:18-alpine AS production # Security: run as non-root user
RUN addgroup -g 1001 -S nodejs && \ adduser -S nodeuser -u 1001 -G nodejs WORKDIR /app # Copy only production dependencies from builder stage
COPY --from=builder --chown=nodeuser:nodejs /app/node_modules ./node_modules
COPY --from=builder --chown=nodeuser:nodejs /app/app.js ./app.js
COPY --from=builder --chown=nodeuser:nodejs /app/package.json ./package.json USER nodeuser EXPOSE 3000 # Health check built into the image
HEALTHCHECK --interval=30s --timeout=3s ---weight: 500;">start-period=5s --retries=3 \ CMD -weight: 500;">wget --quiet --tries=1 --spider http://localhost:3000/health || exit 1 CMD ["node", "app.js"]
-weight: 500;">docker build -t devops-pipeline-demo:local .
-weight: 500;">docker run -p 3000:3000 devops-pipeline-demo:local
-weight: 500;">curl http://localhost:3000/health
# {"-weight: 500;">status":"healthy"}
-weight: 500;">docker build -t devops-pipeline-demo:local .
-weight: 500;">docker run -p 3000:3000 devops-pipeline-demo:local
-weight: 500;">curl http://localhost:3000/health
# {"-weight: 500;">status":"healthy"}
-weight: 500;">docker build -t devops-pipeline-demo:local .
-weight: 500;">docker run -p 3000:3000 devops-pipeline-demo:local
-weight: 500;">curl http://localhost:3000/health
# {"-weight: 500;">status":"healthy"}
// Jenkinsfile
pipeline { agent any environment { // Registry configuration — use your actual registry DOCKER_REGISTRY = 'your-registry-url' IMAGE_NAME = 'devops-pipeline-demo' IMAGE_TAG = "${BUILD_NUMBER}-${GIT_COMMIT[0..7]}" FULL_IMAGE = "${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}" // Kubernetes namespace targets STAGING_NAMESPACE = 'staging' PRODUCTION_NAMESPACE = 'production' } stages { stage('Checkout') { steps { checkout scm script { echo "Building commit: ${GIT_COMMIT}" echo "Branch: ${GIT_BRANCH}" } } } stage('Code Quality Check') { steps { sh '-weight: 500;">npm -weight: 500;">install' // ESLint for code quality (add .eslintrc.js to your repo) sh 'npx eslint . --ext .js || true' } } stage('Security Scan — Dependencies') { steps { // Audit -weight: 500;">npm dependencies for known vulnerabilities sh '-weight: 500;">npm audit --audit-level=high' } } stage('Build Docker Image') { steps { script { // Tests run inside the Dockerfile builder stage // If tests fail, -weight: 500;">docker build fails — pipeline stops here -weight: 500;">docker.build(FULL_IMAGE, '--target production .') echo "Built image: ${FULL_IMAGE}" } } } stage('Security Scan — Container Image') { steps { // Trivy scans the built image for OS and library vulnerabilities sh """ trivy image \ --exit-code 1 \ --severity HIGH,CRITICAL \ --no-progress \ ${FULL_IMAGE} """ } } stage('Push to Registry') { steps { script { -weight: 500;">docker.withRegistry("https://${DOCKER_REGISTRY}", '-weight: 500;">docker-registry-credentials') { -weight: 500;">docker.image(FULL_IMAGE).push() // Also tag as latest for the branch -weight: 500;">docker.image(FULL_IMAGE).push('latest') } echo "Pushed: ${FULL_IMAGE}" } } } stage('Deploy to Staging') { steps { script { sh """ helm -weight: 500;">upgrade ---weight: 500;">install \ devops-demo-staging \ ./helm/devops-demo \ --namespace ${STAGING_NAMESPACE} \ --create-namespace \ --set image.repository=${DOCKER_REGISTRY}/${IMAGE_NAME} \ --set image.tag=${IMAGE_TAG} \ --set environment=staging \ --wait \ --timeout 5m """ } } } stage('Smoke Tests — Staging') { steps { script { // Wait for deployment to be ready sh "-weight: 500;">kubectl rollout -weight: 500;">status deployment/devops-demo-staging \ -n ${STAGING_NAMESPACE} --timeout=120s" // Run smoke tests against staging sh """ STAGING_URL=\$(-weight: 500;">kubectl get svc devops-demo-staging \ -n ${STAGING_NAMESPACE} \ -o jsonpath='{.-weight: 500;">status.loadBalancer.ingress[0].ip}') -weight: 500;">curl -f http://\${STAGING_URL}:3000/health || exit 1 echo "Staging smoke tests passed" """ } } } stage('Deploy to Production') { when { branch 'main' } steps { // Manual approval gate for production deployments input message: 'Deploy to production?', ok: 'Deploy', submitter: 'admin,devops-lead' script { sh """ helm -weight: 500;">upgrade ---weight: 500;">install \ devops-demo-prod \ ./helm/devops-demo \ --namespace ${PRODUCTION_NAMESPACE} \ --create-namespace \ --set image.repository=${DOCKER_REGISTRY}/${IMAGE_NAME} \ --set image.tag=${IMAGE_TAG} \ --set environment=production \ --set replicaCount=3 \ --wait \ --timeout 10m """ } } } } post { success { echo "Pipeline succeeded — ${FULL_IMAGE} deployed" // Add Slack/email notification here } failure { echo "Pipeline failed at stage: ${currentBuild.result}" // Add failure notification here } always { // Clean up local Docker images to free disk space sh "-weight: 500;">docker rmi ${FULL_IMAGE} || true" cleanWs() } }
}
// Jenkinsfile
pipeline { agent any environment { // Registry configuration — use your actual registry DOCKER_REGISTRY = 'your-registry-url' IMAGE_NAME = 'devops-pipeline-demo' IMAGE_TAG = "${BUILD_NUMBER}-${GIT_COMMIT[0..7]}" FULL_IMAGE = "${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}" // Kubernetes namespace targets STAGING_NAMESPACE = 'staging' PRODUCTION_NAMESPACE = 'production' } stages { stage('Checkout') { steps { checkout scm script { echo "Building commit: ${GIT_COMMIT}" echo "Branch: ${GIT_BRANCH}" } } } stage('Code Quality Check') { steps { sh '-weight: 500;">npm -weight: 500;">install' // ESLint for code quality (add .eslintrc.js to your repo) sh 'npx eslint . --ext .js || true' } } stage('Security Scan — Dependencies') { steps { // Audit -weight: 500;">npm dependencies for known vulnerabilities sh '-weight: 500;">npm audit --audit-level=high' } } stage('Build Docker Image') { steps { script { // Tests run inside the Dockerfile builder stage // If tests fail, -weight: 500;">docker build fails — pipeline stops here -weight: 500;">docker.build(FULL_IMAGE, '--target production .') echo "Built image: ${FULL_IMAGE}" } } } stage('Security Scan — Container Image') { steps { // Trivy scans the built image for OS and library vulnerabilities sh """ trivy image \ --exit-code 1 \ --severity HIGH,CRITICAL \ --no-progress \ ${FULL_IMAGE} """ } } stage('Push to Registry') { steps { script { -weight: 500;">docker.withRegistry("https://${DOCKER_REGISTRY}", '-weight: 500;">docker-registry-credentials') { -weight: 500;">docker.image(FULL_IMAGE).push() // Also tag as latest for the branch -weight: 500;">docker.image(FULL_IMAGE).push('latest') } echo "Pushed: ${FULL_IMAGE}" } } } stage('Deploy to Staging') { steps { script { sh """ helm -weight: 500;">upgrade ---weight: 500;">install \ devops-demo-staging \ ./helm/devops-demo \ --namespace ${STAGING_NAMESPACE} \ --create-namespace \ --set image.repository=${DOCKER_REGISTRY}/${IMAGE_NAME} \ --set image.tag=${IMAGE_TAG} \ --set environment=staging \ --wait \ --timeout 5m """ } } } stage('Smoke Tests — Staging') { steps { script { // Wait for deployment to be ready sh "-weight: 500;">kubectl rollout -weight: 500;">status deployment/devops-demo-staging \ -n ${STAGING_NAMESPACE} --timeout=120s" // Run smoke tests against staging sh """ STAGING_URL=\$(-weight: 500;">kubectl get svc devops-demo-staging \ -n ${STAGING_NAMESPACE} \ -o jsonpath='{.-weight: 500;">status.loadBalancer.ingress[0].ip}') -weight: 500;">curl -f http://\${STAGING_URL}:3000/health || exit 1 echo "Staging smoke tests passed" """ } } } stage('Deploy to Production') { when { branch 'main' } steps { // Manual approval gate for production deployments input message: 'Deploy to production?', ok: 'Deploy', submitter: 'admin,devops-lead' script { sh """ helm -weight: 500;">upgrade ---weight: 500;">install \ devops-demo-prod \ ./helm/devops-demo \ --namespace ${PRODUCTION_NAMESPACE} \ --create-namespace \ --set image.repository=${DOCKER_REGISTRY}/${IMAGE_NAME} \ --set image.tag=${IMAGE_TAG} \ --set environment=production \ --set replicaCount=3 \ --wait \ --timeout 10m """ } } } } post { success { echo "Pipeline succeeded — ${FULL_IMAGE} deployed" // Add Slack/email notification here } failure { echo "Pipeline failed at stage: ${currentBuild.result}" // Add failure notification here } always { // Clean up local Docker images to free disk space sh "-weight: 500;">docker rmi ${FULL_IMAGE} || true" cleanWs() } }
}
// Jenkinsfile
pipeline { agent any environment { // Registry configuration — use your actual registry DOCKER_REGISTRY = 'your-registry-url' IMAGE_NAME = 'devops-pipeline-demo' IMAGE_TAG = "${BUILD_NUMBER}-${GIT_COMMIT[0..7]}" FULL_IMAGE = "${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_TAG}" // Kubernetes namespace targets STAGING_NAMESPACE = 'staging' PRODUCTION_NAMESPACE = 'production' } stages { stage('Checkout') { steps { checkout scm script { echo "Building commit: ${GIT_COMMIT}" echo "Branch: ${GIT_BRANCH}" } } } stage('Code Quality Check') { steps { sh '-weight: 500;">npm -weight: 500;">install' // ESLint for code quality (add .eslintrc.js to your repo) sh 'npx eslint . --ext .js || true' } } stage('Security Scan — Dependencies') { steps { // Audit -weight: 500;">npm dependencies for known vulnerabilities sh '-weight: 500;">npm audit --audit-level=high' } } stage('Build Docker Image') { steps { script { // Tests run inside the Dockerfile builder stage // If tests fail, -weight: 500;">docker build fails — pipeline stops here -weight: 500;">docker.build(FULL_IMAGE, '--target production .') echo "Built image: ${FULL_IMAGE}" } } } stage('Security Scan — Container Image') { steps { // Trivy scans the built image for OS and library vulnerabilities sh """ trivy image \ --exit-code 1 \ --severity HIGH,CRITICAL \ --no-progress \ ${FULL_IMAGE} """ } } stage('Push to Registry') { steps { script { -weight: 500;">docker.withRegistry("https://${DOCKER_REGISTRY}", '-weight: 500;">docker-registry-credentials') { -weight: 500;">docker.image(FULL_IMAGE).push() // Also tag as latest for the branch -weight: 500;">docker.image(FULL_IMAGE).push('latest') } echo "Pushed: ${FULL_IMAGE}" } } } stage('Deploy to Staging') { steps { script { sh """ helm -weight: 500;">upgrade ---weight: 500;">install \ devops-demo-staging \ ./helm/devops-demo \ --namespace ${STAGING_NAMESPACE} \ --create-namespace \ --set image.repository=${DOCKER_REGISTRY}/${IMAGE_NAME} \ --set image.tag=${IMAGE_TAG} \ --set environment=staging \ --wait \ --timeout 5m """ } } } stage('Smoke Tests — Staging') { steps { script { // Wait for deployment to be ready sh "-weight: 500;">kubectl rollout -weight: 500;">status deployment/devops-demo-staging \ -n ${STAGING_NAMESPACE} --timeout=120s" // Run smoke tests against staging sh """ STAGING_URL=\$(-weight: 500;">kubectl get svc devops-demo-staging \ -n ${STAGING_NAMESPACE} \ -o jsonpath='{.-weight: 500;">status.loadBalancer.ingress[0].ip}') -weight: 500;">curl -f http://\${STAGING_URL}:3000/health || exit 1 echo "Staging smoke tests passed" """ } } } stage('Deploy to Production') { when { branch 'main' } steps { // Manual approval gate for production deployments input message: 'Deploy to production?', ok: 'Deploy', submitter: 'admin,devops-lead' script { sh """ helm -weight: 500;">upgrade ---weight: 500;">install \ devops-demo-prod \ ./helm/devops-demo \ --namespace ${PRODUCTION_NAMESPACE} \ --create-namespace \ --set image.repository=${DOCKER_REGISTRY}/${IMAGE_NAME} \ --set image.tag=${IMAGE_TAG} \ --set environment=production \ --set replicaCount=3 \ --wait \ --timeout 10m """ } } } } post { success { echo "Pipeline succeeded — ${FULL_IMAGE} deployed" // Add Slack/email notification here } failure { echo "Pipeline failed at stage: ${currentBuild.result}" // Add failure notification here } always { // Clean up local Docker images to free disk space sh "-weight: 500;">docker rmi ${FULL_IMAGE} || true" cleanWs() } }
}
helm/devops-demo/
├── Chart.yaml
├── values.yaml
├── values-staging.yaml
├── values-production.yaml
└── templates/ ├── deployment.yaml ├── -weight: 500;">service.yaml ├── hpa.yaml ├── configmap.yaml └── _helpers.tpl
helm/devops-demo/
├── Chart.yaml
├── values.yaml
├── values-staging.yaml
├── values-production.yaml
└── templates/ ├── deployment.yaml ├── -weight: 500;">service.yaml ├── hpa.yaml ├── configmap.yaml └── _helpers.tpl
helm/devops-demo/
├── Chart.yaml
├── values.yaml
├── values-staging.yaml
├── values-production.yaml
└── templates/ ├── deployment.yaml ├── -weight: 500;">service.yaml ├── hpa.yaml ├── configmap.yaml └── _helpers.tpl
# Chart.yaml
apiVersion: v2
name: devops-demo
description: DevOps Pipeline Demo Application
type: application
version: 0.1.0
appVersion: "1.0.0"
# Chart.yaml
apiVersion: v2
name: devops-demo
description: DevOps Pipeline Demo Application
type: application
version: 0.1.0
appVersion: "1.0.0"
# Chart.yaml
apiVersion: v2
name: devops-demo
description: DevOps Pipeline Demo Application
type: application
version: 0.1.0
appVersion: "1.0.0"
# values.yaml — default values
replicaCount: 2 image: repository: your-registry/devops-pipeline-demo pullPolicy: IfNotPresent tag: "latest" -weight: 500;">service: type: LoadBalancer port: 3000 resources: limits: cpu: 500m memory: 256Mi requests: cpu: 100m memory: 128Mi autoscaling: enabled: true minReplicas: 2 maxReplicas: 10 targetCPUUtilizationPercentage: 70 environment: development # Health check configuration
livenessProbe: httpGet: path: /health port: 3000 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: httpGet: path: /health port: 3000 initialDelaySeconds: 5 periodSeconds: 5
# values.yaml — default values
replicaCount: 2 image: repository: your-registry/devops-pipeline-demo pullPolicy: IfNotPresent tag: "latest" -weight: 500;">service: type: LoadBalancer port: 3000 resources: limits: cpu: 500m memory: 256Mi requests: cpu: 100m memory: 128Mi autoscaling: enabled: true minReplicas: 2 maxReplicas: 10 targetCPUUtilizationPercentage: 70 environment: development # Health check configuration
livenessProbe: httpGet: path: /health port: 3000 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: httpGet: path: /health port: 3000 initialDelaySeconds: 5 periodSeconds: 5
# values.yaml — default values
replicaCount: 2 image: repository: your-registry/devops-pipeline-demo pullPolicy: IfNotPresent tag: "latest" -weight: 500;">service: type: LoadBalancer port: 3000 resources: limits: cpu: 500m memory: 256Mi requests: cpu: 100m memory: 128Mi autoscaling: enabled: true minReplicas: 2 maxReplicas: 10 targetCPUUtilizationPercentage: 70 environment: development # Health check configuration
livenessProbe: httpGet: path: /health port: 3000 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: httpGet: path: /health port: 3000 initialDelaySeconds: 5 periodSeconds: 5
# templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata: name: {{ include "devops-demo.fullname" . }} labels: {{- include "devops-demo.labels" . | nindent 4 }}
spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{- include "devops-demo.selectorLabels" . | nindent 6 }} strategy: type: RollingUpdate rollingUpdate: # Never have zero pods during deployment maxUnavailable: 0 # Allow one extra pod during rollout maxSurge: 1 template: metadata: labels: {{- include "devops-demo.selectorLabels" . | nindent 8 }} annotations: # Force pod -weight: 500;">restart when configmap changes checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} spec: # Security context — run as non-root securityContext: runAsNonRoot: true runAsUser: 1001 fsGroup: 1001 containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - containerPort: 3000 protocol: TCP env: - name: NODE_ENV value: {{ .Values.environment }} - name: APP_VERSION value: {{ .Values.image.tag }} livenessProbe: {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 12 }} resources: {{- toYaml .Values.resources | nindent 12 }}
# templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata: name: {{ include "devops-demo.fullname" . }} labels: {{- include "devops-demo.labels" . | nindent 4 }}
spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{- include "devops-demo.selectorLabels" . | nindent 6 }} strategy: type: RollingUpdate rollingUpdate: # Never have zero pods during deployment maxUnavailable: 0 # Allow one extra pod during rollout maxSurge: 1 template: metadata: labels: {{- include "devops-demo.selectorLabels" . | nindent 8 }} annotations: # Force pod -weight: 500;">restart when configmap changes checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} spec: # Security context — run as non-root securityContext: runAsNonRoot: true runAsUser: 1001 fsGroup: 1001 containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - containerPort: 3000 protocol: TCP env: - name: NODE_ENV value: {{ .Values.environment }} - name: APP_VERSION value: {{ .Values.image.tag }} livenessProbe: {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 12 }} resources: {{- toYaml .Values.resources | nindent 12 }}
# templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata: name: {{ include "devops-demo.fullname" . }} labels: {{- include "devops-demo.labels" . | nindent 4 }}
spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: {{- include "devops-demo.selectorLabels" . | nindent 6 }} strategy: type: RollingUpdate rollingUpdate: # Never have zero pods during deployment maxUnavailable: 0 # Allow one extra pod during rollout maxSurge: 1 template: metadata: labels: {{- include "devops-demo.selectorLabels" . | nindent 8 }} annotations: # Force pod -weight: 500;">restart when configmap changes checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} spec: # Security context — run as non-root securityContext: runAsNonRoot: true runAsUser: 1001 fsGroup: 1001 containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - containerPort: 3000 protocol: TCP env: - name: NODE_ENV value: {{ .Values.environment }} - name: APP_VERSION value: {{ .Values.image.tag }} livenessProbe: {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 12 }} resources: {{- toYaml .Values.resources | nindent 12 }}
# main.tf — EKS Cluster Infrastructure terraform { required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" version = "~> 5.0" } } # Remote state — critical for team environments backend "s3" { bucket = "your-terraform-state-bucket" key = "devops-demo/terraform.tfstate" region = "ap-south-1" encrypt = true dynamodb_table = "terraform-state-lock" }
} provider "aws" { region = var.aws_region
} # VPC
module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" name = "${var.project_name}-vpc" cidr = "10.0.0.0/16" azs = ["ap-south-1a", "ap-south-1b", "ap-south-1c"] private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true # Required tags for EKS to discover subnets public_subnet_tags = { "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/role/internal-elb" = 1 }
} # EKS Cluster
module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 20.0" cluster_name = "${var.project_name}-cluster" cluster_version = "1.29" vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets cluster_endpoint_public_access = true # Managed node groups eks_managed_node_groups = { general = { instance_types = ["t3.medium"] min_size = 2 max_size = 5 desired_size = 2 labels = { role = "general" } } }
} # Variables
variable "aws_region" { description = "AWS region" type = string default = "ap-south-1"
} variable "project_name" { description = "Project name prefix for all resources" type = string default = "devops-demo"
} # Outputs
output "cluster_endpoint" { value = module.eks.cluster_endpoint
} output "cluster_name" { value = module.eks.cluster_name
}
# main.tf — EKS Cluster Infrastructure terraform { required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" version = "~> 5.0" } } # Remote state — critical for team environments backend "s3" { bucket = "your-terraform-state-bucket" key = "devops-demo/terraform.tfstate" region = "ap-south-1" encrypt = true dynamodb_table = "terraform-state-lock" }
} provider "aws" { region = var.aws_region
} # VPC
module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" name = "${var.project_name}-vpc" cidr = "10.0.0.0/16" azs = ["ap-south-1a", "ap-south-1b", "ap-south-1c"] private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true # Required tags for EKS to discover subnets public_subnet_tags = { "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/role/internal-elb" = 1 }
} # EKS Cluster
module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 20.0" cluster_name = "${var.project_name}-cluster" cluster_version = "1.29" vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets cluster_endpoint_public_access = true # Managed node groups eks_managed_node_groups = { general = { instance_types = ["t3.medium"] min_size = 2 max_size = 5 desired_size = 2 labels = { role = "general" } } }
} # Variables
variable "aws_region" { description = "AWS region" type = string default = "ap-south-1"
} variable "project_name" { description = "Project name prefix for all resources" type = string default = "devops-demo"
} # Outputs
output "cluster_endpoint" { value = module.eks.cluster_endpoint
} output "cluster_name" { value = module.eks.cluster_name
}
# main.tf — EKS Cluster Infrastructure terraform { required_version = ">= 1.0" required_providers { aws = { source = "hashicorp/aws" version = "~> 5.0" } } # Remote state — critical for team environments backend "s3" { bucket = "your-terraform-state-bucket" key = "devops-demo/terraform.tfstate" region = "ap-south-1" encrypt = true dynamodb_table = "terraform-state-lock" }
} provider "aws" { region = var.aws_region
} # VPC
module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" name = "${var.project_name}-vpc" cidr = "10.0.0.0/16" azs = ["ap-south-1a", "ap-south-1b", "ap-south-1c"] private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true # Required tags for EKS to discover subnets public_subnet_tags = { "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/role/internal-elb" = 1 }
} # EKS Cluster
module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 20.0" cluster_name = "${var.project_name}-cluster" cluster_version = "1.29" vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets cluster_endpoint_public_access = true # Managed node groups eks_managed_node_groups = { general = { instance_types = ["t3.medium"] min_size = 2 max_size = 5 desired_size = 2 labels = { role = "general" } } }
} # Variables
variable "aws_region" { description = "AWS region" type = string default = "ap-south-1"
} variable "project_name" { description = "Project name prefix for all resources" type = string default = "devops-demo"
} # Outputs
output "cluster_endpoint" { value = module.eks.cluster_endpoint
} output "cluster_name" { value = module.eks.cluster_name
}
terraform init
terraform plan -out=tfplan
terraform apply tfplan # Configure -weight: 500;">kubectl to use the new cluster
aws eks -weight: 500;">update-kubeconfig \ --region ap-south-1 \ --name devops-demo-cluster
terraform init
terraform plan -out=tfplan
terraform apply tfplan # Configure -weight: 500;">kubectl to use the new cluster
aws eks -weight: 500;">update-kubeconfig \ --region ap-south-1 \ --name devops-demo-cluster
terraform init
terraform plan -out=tfplan
terraform apply tfplan # Configure -weight: 500;">kubectl to use the new cluster
aws eks -weight: 500;">update-kubeconfig \ --region ap-south-1 \ --name devops-demo-cluster
# Add the prometheus-community Helm repository
helm repo add prometheus-community \ https://prometheus-community.github.io/helm-charts
helm repo -weight: 500;">update # Install the complete monitoring stack
helm -weight: 500;">install monitoring prometheus-community/kube-prometheus-stack \ --namespace monitoring \ --create-namespace \ --set grafana.adminPassword='your-secure-password' \ --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
# Add the prometheus-community Helm repository
helm repo add prometheus-community \ https://prometheus-community.github.io/helm-charts
helm repo -weight: 500;">update # Install the complete monitoring stack
helm -weight: 500;">install monitoring prometheus-community/kube-prometheus-stack \ --namespace monitoring \ --create-namespace \ --set grafana.adminPassword='your-secure-password' \ --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
# Add the prometheus-community Helm repository
helm repo add prometheus-community \ https://prometheus-community.github.io/helm-charts
helm repo -weight: 500;">update # Install the complete monitoring stack
helm -weight: 500;">install monitoring prometheus-community/kube-prometheus-stack \ --namespace monitoring \ --create-namespace \ --set grafana.adminPassword='your-secure-password' \ --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
# templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata: name: {{ include "devops-demo.fullname" . }} labels: {{- include "devops-demo.labels" . | nindent 4 }}
spec: selector: matchLabels: {{- include "devops-demo.selectorLabels" . | nindent 6 }} endpoints: - port: http path: /metrics interval: 15s
# templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata: name: {{ include "devops-demo.fullname" . }} labels: {{- include "devops-demo.labels" . | nindent 4 }}
spec: selector: matchLabels: {{- include "devops-demo.selectorLabels" . | nindent 6 }} endpoints: - port: http path: /metrics interval: 15s
# templates/servicemonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata: name: {{ include "devops-demo.fullname" . }} labels: {{- include "devops-demo.labels" . | nindent 4 }}
spec: selector: matchLabels: {{- include "devops-demo.selectorLabels" . | nindent 6 }} endpoints: - port: http path: /metrics interval: 15s
// Add to app.js
const client = require('prom-client'); // Collect default Node.js metrics
client.collectDefaultMetrics(); // Custom HTTP request counter
const httpRequestsTotal = new client.Counter({ name: 'http_requests_total', help: 'Total number of HTTP requests', labelNames: ['method', 'route', 'status_code']
}); // Request duration histogram
const httpRequestDuration = new client.Histogram({ name: 'http_request_duration_seconds', help: 'HTTP request duration in seconds', labelNames: ['method', 'route'], buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5]
}); // Metrics endpoint for Prometheus scraping
app.get('/metrics', async (req, res) => { res.set('Content-Type', client.register.contentType); res.end(await client.register.metrics());
}); // Middleware to record metrics for all routes
app.use((req, res, next) => { const end = httpRequestDuration.startTimer({ method: req.method, route: req.path }); res.on('finish', () => { httpRequestsTotal.inc({ method: req.method, route: req.path, status_code: res.statusCode }); end(); }); next();
});
// Add to app.js
const client = require('prom-client'); // Collect default Node.js metrics
client.collectDefaultMetrics(); // Custom HTTP request counter
const httpRequestsTotal = new client.Counter({ name: 'http_requests_total', help: 'Total number of HTTP requests', labelNames: ['method', 'route', 'status_code']
}); // Request duration histogram
const httpRequestDuration = new client.Histogram({ name: 'http_request_duration_seconds', help: 'HTTP request duration in seconds', labelNames: ['method', 'route'], buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5]
}); // Metrics endpoint for Prometheus scraping
app.get('/metrics', async (req, res) => { res.set('Content-Type', client.register.contentType); res.end(await client.register.metrics());
}); // Middleware to record metrics for all routes
app.use((req, res, next) => { const end = httpRequestDuration.startTimer({ method: req.method, route: req.path }); res.on('finish', () => { httpRequestsTotal.inc({ method: req.method, route: req.path, status_code: res.statusCode }); end(); }); next();
});
// Add to app.js
const client = require('prom-client'); // Collect default Node.js metrics
client.collectDefaultMetrics(); // Custom HTTP request counter
const httpRequestsTotal = new client.Counter({ name: 'http_requests_total', help: 'Total number of HTTP requests', labelNames: ['method', 'route', 'status_code']
}); // Request duration histogram
const httpRequestDuration = new client.Histogram({ name: 'http_request_duration_seconds', help: 'HTTP request duration in seconds', labelNames: ['method', 'route'], buckets: [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5]
}); // Metrics endpoint for Prometheus scraping
app.get('/metrics', async (req, res) => { res.set('Content-Type', client.register.contentType); res.end(await client.register.metrics());
}); // Middleware to record metrics for all routes
app.use((req, res, next) => { const end = httpRequestDuration.startTimer({ method: req.method, route: req.path }); res.on('finish', () => { httpRequestsTotal.inc({ method: req.method, route: req.path, status_code: res.statusCode }); end(); }); next();
});
-weight: 500;">kubectl port-forward svc/monitoring-grafana 3001:80 -n monitoring
# Open http://localhost:3001
# Default credentials: admin / your-secure-password
-weight: 500;">kubectl port-forward svc/monitoring-grafana 3001:80 -n monitoring
# Open http://localhost:3001
# Default credentials: admin / your-secure-password
-weight: 500;">kubectl port-forward svc/monitoring-grafana 3001:80 -n monitoring
# Open http://localhost:3001
# Default credentials: admin / your-secure-password
# In Jenkins:
# 1. Install the GitHub plugin (Manage Jenkins → Plugins)
# 2. Create a GitHub Personal Access Token with repo and admin:repo_hook scopes
# 3. Add the token to Jenkins credentials (ID: github-token)
# 4. In your Pipeline job: check "GitHub hook trigger for GITScm polling" # In GitHub repository settings:
# Settings → Webhooks → Add webhook
# Payload URL: http://your-jenkins-url/github-webhook/
# Content type: application/json
# Events: Just the push event
# Active: checked
# In Jenkins:
# 1. Install the GitHub plugin (Manage Jenkins → Plugins)
# 2. Create a GitHub Personal Access Token with repo and admin:repo_hook scopes
# 3. Add the token to Jenkins credentials (ID: github-token)
# 4. In your Pipeline job: check "GitHub hook trigger for GITScm polling" # In GitHub repository settings:
# Settings → Webhooks → Add webhook
# Payload URL: http://your-jenkins-url/github-webhook/
# Content type: application/json
# Events: Just the push event
# Active: checked
# In Jenkins:
# 1. Install the GitHub plugin (Manage Jenkins → Plugins)
# 2. Create a GitHub Personal Access Token with repo and admin:repo_hook scopes
# 3. Add the token to Jenkins credentials (ID: github-token)
# 4. In your Pipeline job: check "GitHub hook trigger for GITScm polling" # In GitHub repository settings:
# Settings → Webhooks → Add webhook
# Payload URL: http://your-jenkins-url/github-webhook/
# Content type: application/json
# Events: Just the push event
# Active: checked - ✋ Manual approval gate — a human confirms production deployment
- ☸️ Helm deploys to production with 3 replicas and zero-downtime rolling -weight: 500;">update
This is a real production-pattern pipeline. This is what DevOps Training in Electronic City at eMexo Technologies builds with students in hands-on lab sessions — not a simplified demo, but the actual architecture that Electronic City's top engineering teams run.