# Install Ollama on macOS
-weight: 500;">brew -weight: 500;">install --cask ollama # Start the Ollama daemon (or open the app)
# Then pull and test the model:
ollama run qwen2.5-coder:7b
# Install Ollama on macOS
-weight: 500;">brew -weight: 500;">install --cask ollama # Start the Ollama daemon (or open the app)
# Then pull and test the model:
ollama run qwen2.5-coder:7b
# Install Ollama on macOS
-weight: 500;">brew -weight: 500;">install --cask ollama # Start the Ollama daemon (or open the app)
# Then pull and test the model:
ollama run qwen2.5-coder:7b
export GITLAB_HOME=$HOME/gitlab
mkdir -p ~/gitlab/config ~/gitlab/logs ~/gitlab/data
export GITLAB_HOME=$HOME/gitlab
mkdir -p ~/gitlab/config ~/gitlab/logs ~/gitlab/data
export GITLAB_HOME=$HOME/gitlab
mkdir -p ~/gitlab/config ~/gitlab/logs ~/gitlab/data
version: '3.6'
services: web: image: 'gitlab/gitlab-ce:latest' -weight: 500;">restart: always hostname: 'localhost' environment: GITLAB_OMNIBUS_CONFIG: | external_url 'http://localhost' ports: - '80:80' volumes: - '$GITLAB_HOME/config:/etc/gitlab' - '$GITLAB_HOME/logs:/var/log/gitlab' - '$GITLAB_HOME/data:/var/opt/gitlab'
version: '3.6'
services: web: image: 'gitlab/gitlab-ce:latest' -weight: 500;">restart: always hostname: 'localhost' environment: GITLAB_OMNIBUS_CONFIG: | external_url 'http://localhost' ports: - '80:80' volumes: - '$GITLAB_HOME/config:/etc/gitlab' - '$GITLAB_HOME/logs:/var/log/gitlab' - '$GITLAB_HOME/data:/var/opt/gitlab'
version: '3.6'
services: web: image: 'gitlab/gitlab-ce:latest' -weight: 500;">restart: always hostname: 'localhost' environment: GITLAB_OMNIBUS_CONFIG: | external_url 'http://localhost' ports: - '80:80' volumes: - '$GITLAB_HOME/config:/etc/gitlab' - '$GITLAB_HOME/logs:/var/log/gitlab' - '$GITLAB_HOME/data:/var/opt/gitlab'
# Retrieve the auto-generated root password
-weight: 500;">docker exec -it <your-gitlab-container-name> grep 'Password:' /etc/gitlab/initial_root_password
# Retrieve the auto-generated root password
-weight: 500;">docker exec -it <your-gitlab-container-name> grep 'Password:' /etc/gitlab/initial_root_password
# Retrieve the auto-generated root password
-weight: 500;">docker exec -it <your-gitlab-container-name> grep 'Password:' /etc/gitlab/initial_root_password
# Bypass Apple's Python completely
-weight: 500;">brew -weight: 500;">install [email protected] # Explicitly command the Homebrew binary to build the environment
/opt/homebrew/bin/python3.12 -m venv .venv # Activate and -weight: 500;">install
source .venv/bin/activate
-weight: 500;">pip -weight: 500;">install langchain langgraph langchain-community langchain-ollama mcp fastapi uvicorn python-dotenv requests
# Bypass Apple's Python completely
-weight: 500;">brew -weight: 500;">install [email protected] # Explicitly command the Homebrew binary to build the environment
/opt/homebrew/bin/python3.12 -m venv .venv # Activate and -weight: 500;">install
source .venv/bin/activate
-weight: 500;">pip -weight: 500;">install langchain langgraph langchain-community langchain-ollama mcp fastapi uvicorn python-dotenv requests
# Bypass Apple's Python completely
-weight: 500;">brew -weight: 500;">install [email protected] # Explicitly command the Homebrew binary to build the environment
/opt/homebrew/bin/python3.12 -m venv .venv # Activate and -weight: 500;">install
source .venv/bin/activate
-weight: 500;">pip -weight: 500;">install langchain langgraph langchain-community langchain-ollama mcp fastapi uvicorn python-dotenv requests
GITLAB_API_URL="http://localhost/api/v4"
GITLAB_PERSONAL_ACCESS_TOKEN="glpat-YOUR_TOKEN_HERE"
GITLAB_API_URL="http://localhost/api/v4"
GITLAB_PERSONAL_ACCESS_TOKEN="glpat-YOUR_TOKEN_HERE"
GITLAB_API_URL="http://localhost/api/v4"
GITLAB_PERSONAL_ACCESS_TOKEN="glpat-YOUR_TOKEN_HERE"
# mcp_client.py
import os
import requests
from dotenv import load_dotenv load_dotenv()
GITLAB_API_URL = os.environ.get("GITLAB_API_URL", "http://localhost/api/v4")
GITLAB_PERSONAL_ACCESS_TOKEN = os.environ.get("GITLAB_PERSONAL_ACCESS_TOKEN", "") def get_merge_request_diff(project_id: str, merge_request_iid: str) -> str: print(f"--> Fetching diff for Project {project_id}, MR #{merge_request_iid}") url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{merge_request_iid}/changes" headers = {"PRIVATE-TOKEN": GITLAB_PERSONAL_ACCESS_TOKEN} response = requests.get(url, headers=headers) response.raise_for_status() changes = response.json().get("changes", []) diff_text = "" for change in changes: diff_text += f"\n--- a/{change.get('old_path')} \n+++ b/{change.get('new_path')}\n" diff_text += change.get("diff", "") return diff_text def create_merge_request_note(project_id: str, merge_request_iid: str, body: str) -> str: print(f"--> Posting review to Project {project_id}, MR #{merge_request_iid}") url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{merge_request_iid}/notes" headers = {"PRIVATE-TOKEN": GITLAB_PERSONAL_ACCESS_TOKEN} response = requests.post(url, headers=headers, data={"body": body}) response.raise_for_status() return "Successfully posted note."
# mcp_client.py
import os
import requests
from dotenv import load_dotenv load_dotenv()
GITLAB_API_URL = os.environ.get("GITLAB_API_URL", "http://localhost/api/v4")
GITLAB_PERSONAL_ACCESS_TOKEN = os.environ.get("GITLAB_PERSONAL_ACCESS_TOKEN", "") def get_merge_request_diff(project_id: str, merge_request_iid: str) -> str: print(f"--> Fetching diff for Project {project_id}, MR #{merge_request_iid}") url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{merge_request_iid}/changes" headers = {"PRIVATE-TOKEN": GITLAB_PERSONAL_ACCESS_TOKEN} response = requests.get(url, headers=headers) response.raise_for_status() changes = response.json().get("changes", []) diff_text = "" for change in changes: diff_text += f"\n--- a/{change.get('old_path')} \n+++ b/{change.get('new_path')}\n" diff_text += change.get("diff", "") return diff_text def create_merge_request_note(project_id: str, merge_request_iid: str, body: str) -> str: print(f"--> Posting review to Project {project_id}, MR #{merge_request_iid}") url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{merge_request_iid}/notes" headers = {"PRIVATE-TOKEN": GITLAB_PERSONAL_ACCESS_TOKEN} response = requests.post(url, headers=headers, data={"body": body}) response.raise_for_status() return "Successfully posted note."
# mcp_client.py
import os
import requests
from dotenv import load_dotenv load_dotenv()
GITLAB_API_URL = os.environ.get("GITLAB_API_URL", "http://localhost/api/v4")
GITLAB_PERSONAL_ACCESS_TOKEN = os.environ.get("GITLAB_PERSONAL_ACCESS_TOKEN", "") def get_merge_request_diff(project_id: str, merge_request_iid: str) -> str: print(f"--> Fetching diff for Project {project_id}, MR #{merge_request_iid}") url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{merge_request_iid}/changes" headers = {"PRIVATE-TOKEN": GITLAB_PERSONAL_ACCESS_TOKEN} response = requests.get(url, headers=headers) response.raise_for_status() changes = response.json().get("changes", []) diff_text = "" for change in changes: diff_text += f"\n--- a/{change.get('old_path')} \n+++ b/{change.get('new_path')}\n" diff_text += change.get("diff", "") return diff_text def create_merge_request_note(project_id: str, merge_request_iid: str, body: str) -> str: print(f"--> Posting review to Project {project_id}, MR #{merge_request_iid}") url = f"{GITLAB_API_URL}/projects/{project_id}/merge_requests/{merge_request_iid}/notes" headers = {"PRIVATE-TOKEN": GITLAB_PERSONAL_ACCESS_TOKEN} response = requests.post(url, headers=headers, data={"body": body}) response.raise_for_status() return "Successfully posted note."
# agent.py
from typing import TypedDict, Optional
from langchain_ollama import ChatOllama
from langgraph.graph import StateGraph, END
import mcp_client # Initialize LLM
llm = ChatOllama(model="qwen2.5-coder:7b", temperature=0.1) class AgentState(TypedDict): project_id: str mr_id: str code_diff: Optional[str] review_comment: Optional[str] error: Optional[str] def fetch_code(state: AgentState) -> AgentState: try: diff = mcp_client.get_merge_request_diff(state["project_id"], str(state["mr_id"])) return {"code_diff": diff} except Exception as e: return {"error": f"Failed to fetch diff: {e}"} def review_code(state: AgentState) -> AgentState: if state.get("error"): return {} prompt = f"""You are a Lead AI Architect reviewing a GitLab Pull Request.
Perform a strict logical and security review of this code diff:
{state.get('code_diff', '')}
""" try: response = llm.invoke(prompt) return {"review_comment": response.content} except Exception as e: return {"error": f"LLM Review failed: {e}"} def post_review(state: AgentState) -> AgentState: if state.get("error") or not state.get("review_comment"): return {} try: mcp_client.create_merge_request_note(state["project_id"], str(state["mr_id"]), state["review_comment"]) print("✅ Review successfully posted!") return {} except Exception as e: return {"error": str(e)} # Compile the LangGraph
workflow = StateGraph(AgentState)
workflow.add_node("Fetch", fetch_code)
workflow.add_node("Review", review_code)
workflow.add_node("Comment", post_review) workflow.set_entry_point("Fetch")
workflow.add_edge("Fetch", "Review")
workflow.add_edge("Review", "Comment")
workflow.add_edge("Comment", END) app = workflow.compile()
# agent.py
from typing import TypedDict, Optional
from langchain_ollama import ChatOllama
from langgraph.graph import StateGraph, END
import mcp_client # Initialize LLM
llm = ChatOllama(model="qwen2.5-coder:7b", temperature=0.1) class AgentState(TypedDict): project_id: str mr_id: str code_diff: Optional[str] review_comment: Optional[str] error: Optional[str] def fetch_code(state: AgentState) -> AgentState: try: diff = mcp_client.get_merge_request_diff(state["project_id"], str(state["mr_id"])) return {"code_diff": diff} except Exception as e: return {"error": f"Failed to fetch diff: {e}"} def review_code(state: AgentState) -> AgentState: if state.get("error"): return {} prompt = f"""You are a Lead AI Architect reviewing a GitLab Pull Request.
Perform a strict logical and security review of this code diff:
{state.get('code_diff', '')}
""" try: response = llm.invoke(prompt) return {"review_comment": response.content} except Exception as e: return {"error": f"LLM Review failed: {e}"} def post_review(state: AgentState) -> AgentState: if state.get("error") or not state.get("review_comment"): return {} try: mcp_client.create_merge_request_note(state["project_id"], str(state["mr_id"]), state["review_comment"]) print("✅ Review successfully posted!") return {} except Exception as e: return {"error": str(e)} # Compile the LangGraph
workflow = StateGraph(AgentState)
workflow.add_node("Fetch", fetch_code)
workflow.add_node("Review", review_code)
workflow.add_node("Comment", post_review) workflow.set_entry_point("Fetch")
workflow.add_edge("Fetch", "Review")
workflow.add_edge("Review", "Comment")
workflow.add_edge("Comment", END) app = workflow.compile()
# agent.py
from typing import TypedDict, Optional
from langchain_ollama import ChatOllama
from langgraph.graph import StateGraph, END
import mcp_client # Initialize LLM
llm = ChatOllama(model="qwen2.5-coder:7b", temperature=0.1) class AgentState(TypedDict): project_id: str mr_id: str code_diff: Optional[str] review_comment: Optional[str] error: Optional[str] def fetch_code(state: AgentState) -> AgentState: try: diff = mcp_client.get_merge_request_diff(state["project_id"], str(state["mr_id"])) return {"code_diff": diff} except Exception as e: return {"error": f"Failed to fetch diff: {e}"} def review_code(state: AgentState) -> AgentState: if state.get("error"): return {} prompt = f"""You are a Lead AI Architect reviewing a GitLab Pull Request.
Perform a strict logical and security review of this code diff:
{state.get('code_diff', '')}
""" try: response = llm.invoke(prompt) return {"review_comment": response.content} except Exception as e: return {"error": f"LLM Review failed: {e}"} def post_review(state: AgentState) -> AgentState: if state.get("error") or not state.get("review_comment"): return {} try: mcp_client.create_merge_request_note(state["project_id"], str(state["mr_id"]), state["review_comment"]) print("✅ Review successfully posted!") return {} except Exception as e: return {"error": str(e)} # Compile the LangGraph
workflow = StateGraph(AgentState)
workflow.add_node("Fetch", fetch_code)
workflow.add_node("Review", review_code)
workflow.add_node("Comment", post_review) workflow.set_entry_point("Fetch")
workflow.add_edge("Fetch", "Review")
workflow.add_edge("Review", "Comment")
workflow.add_edge("Comment", END) app = workflow.compile()
# webhook_server.py
from fastapi import FastAPI, Request, BackgroundTasks
import uvicorn
from agent import app as ai_agent app = FastAPI() def execute_pr_review(project_id: str, mr_iid: str): print(f"\n[Background] Executing AI Review for Project: {project_id}, MR: {mr_iid}") ai_agent.invoke({"project_id": str(project_id), "mr_id": str(mr_iid)}) @app.post("/webhook")
async def gitlab_webhook(request: Request, background_tasks: BackgroundTasks): payload = await request.json() if payload.get("object_kind") == "merge_request": attributes = payload.get("object_attributes", {}) if attributes.get("action") in ["open", "-weight: 500;">update"]: project_id = payload.get("project", {}).get("id") mr_iid = attributes.get("iid") # Start the AI in a ghost thread! background_tasks.add_task(execute_pr_review, project_id, mr_iid) return {"-weight: 500;">status": "success", "message": "AI Review started in background."} return {"-weight: 500;">status": "ignored"} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)
# webhook_server.py
from fastapi import FastAPI, Request, BackgroundTasks
import uvicorn
from agent import app as ai_agent app = FastAPI() def execute_pr_review(project_id: str, mr_iid: str): print(f"\n[Background] Executing AI Review for Project: {project_id}, MR: {mr_iid}") ai_agent.invoke({"project_id": str(project_id), "mr_id": str(mr_iid)}) @app.post("/webhook")
async def gitlab_webhook(request: Request, background_tasks: BackgroundTasks): payload = await request.json() if payload.get("object_kind") == "merge_request": attributes = payload.get("object_attributes", {}) if attributes.get("action") in ["open", "-weight: 500;">update"]: project_id = payload.get("project", {}).get("id") mr_iid = attributes.get("iid") # Start the AI in a ghost thread! background_tasks.add_task(execute_pr_review, project_id, mr_iid) return {"-weight: 500;">status": "success", "message": "AI Review started in background."} return {"-weight: 500;">status": "ignored"} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000)
# webhook_server.py
from fastapi import FastAPI, Request, BackgroundTasks
import uvicorn
from agent import app as ai_agent app = FastAPI() def execute_pr_review(project_id: str, mr_iid: str): print(f"\n[Background] Executing AI Review for Project: {project_id}, MR: {mr_iid}") ai_agent.invoke({"project_id": str(project_id), "mr_id": str(mr_iid)}) @app.post("/webhook")
async def gitlab_webhook(request: Request, background_tasks: BackgroundTasks): payload = await request.json() if payload.get("object_kind") == "merge_request": attributes = payload.get("object_attributes", {}) if attributes.get("action") in ["open", "-weight: 500;">update"]: project_id = payload.get("project", {}).get("id") mr_iid = attributes.get("iid") # Start the AI in a ghost thread! background_tasks.add_task(execute_pr_review, project_id, mr_iid) return {"-weight: 500;">status": "success", "message": "AI Review started in background."} return {"-weight: 500;">status": "ignored"} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=8000) - The Shared Memory: We define an AgentState.
- The Model: We anchor qwen2.5-coder:7b to the logic. We specifically use Temperature 0.1. For a PR reviewer, creativity is a disaster. High temperatures cause hallucinations. 0.1 forces extreme analytical strictness. - In your local GitLab, create a new branch in your sandbox repository.
- Open a source code file (like a typical Controller or Route) and intentionally write a terrible bug, such as throwing a raw Exception out of nowhere.
- Commit the change and instantly open a Merge Request.
- Switch to your terminal running the FastAPI server. You will immediately see it print: [Background] Executing AI Review for Project: 1, MR: X.
- Wait roughly 30 seconds for Ollama to process the code, then refresh your GitLab Merge Request page in the browser.