diff --git a/.github/scripts/backend_smoke_tests.sh b/.github/scripts/backend_smoke_tests.sh new file mode 100644 index 0000000..a082410 --- /dev/null +++ b/.github/scripts/backend_smoke_tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -e + +if [[ -z "$TEST_IP" || -z "$TEST_PORT" ]]; then + echo "TEST_IP is empty or unset." + exit 0 + +TESTING_URL="http://${TEST_IP}:${TEST_PORT}" + +echo "Running smoke tests against staging environment" +echo "Testing on backend service at: $TESTING_URL" + +# Check Response Body +echo "Verifying response content..." +response=$(curl -s "$TESTING_URL/") +echo "Response: $response" + +# Check if response contains expected message +if echo "$response" | grep -q "$EXPECTED_MESSAGE"; then + echo "Response content test passed" +else + echo "Response content test failed" + # exit 1 +fi + +echo "Done!" \ No newline at end of file diff --git a/.github/scripts/frontend_smoke_tests.sh b/.github/scripts/frontend_smoke_tests.sh new file mode 100644 index 0000000..1e23213 --- /dev/null +++ b/.github/scripts/frontend_smoke_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +if [[ -z "$TEST_IP" || -z "$TEST_PORT" ]]; then + echo "TEST_IP is empty or unset." + exit 0 + +TESTING_URL="http://${TEST_IP}:${TEST_PORT}" + +echo "Running smoke tests against staging environment" +echo "Testing on frontend at: $TESTING_URL" + +# Basic test, check for HTML response +if curl -f -s "$TESTING_URL" | grep -q "> $GITHUB_ENV +echo "NOTES_PORT=$NOTES_PORT" >> $GITHUB_ENV +echo "USERS_IP=$USERS_IP" >> $GITHUB_ENV +echo "USERS_PORT=$USERS_PORT" >> $GITHUB_ENV \ No newline at end of file diff --git a/.github/scripts/get_frontend_ip.sh b/.github/scripts/get_frontend_ip.sh new file mode 100644 index 0000000..010b69b --- /dev/null +++ b/.github/scripts/get_frontend_ip.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Exit immediately if any command fails +set -e + +echo "Current environment: $ENVIRONMENT" +echo "Waiting for Frontend LoadBalancer IPs to be assigned (up to 5 minutes)..." +FRONTEND_IP="" +FRONTEND_PORT="" + +for i in $(seq 1 10); do + echo "Attempt $i/10 to get IPs..." + FRONTEND_IP=$(kubectl get service frontend -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -n $ENVIRONMENT) + FRONTEND_PORT=$(kubectl get service frontend -o jsonpath='{.spec.ports[0].port}' -n $ENVIRONMENT) + + if [[ -n "$FRONTEND_IP" && -n "$FRONTEND_PORT" ]]; then + echo "Frontend LoadBalancer IP assigned!" + echo "Frontend IP: $FRONTEND_IP:$FRONTEND_PORT" + break + fi + sleep 5 # Wait 5 seconds before next attempt +done + +if [[ -z "$FRONTEND_IP" || -z "$FRONTEND_PORT" ]]; then + echo "Error: One or more LoadBalancer IPs not assigned after timeout." + exit 1 # Fail the job if IPs are not obtained +fi + +# These are environment variables for subsequent steps in the *same job* +# And used to set the job outputs +echo "FRONTEND_IP=$FRONTEND_IP" >> $GITHUB_ENV +echo "FRONTEND_PORT=$FRONTEND_PORT" >> $GITHUB_ENV \ No newline at end of file diff --git a/.github/workflows/_reusable_quality_check_workflow.yml b/.github/workflows/_reusable_quality_check_workflow.yml new file mode 100644 index 0000000..5a02ffc --- /dev/null +++ b/.github/workflows/_reusable_quality_check_workflow.yml @@ -0,0 +1,57 @@ +# Reusable quality check: +# - black: Code format +# - pylint: Code quality +# - bandit: Security linting +name: Reusable Quality Check Workflow + +# Workflow runs on being called by others +on: + workflow_call: + inputs: + working-directory: + required: true + type: string + python-version: + required: false + type: string + default: "3.10" + linting-threshold: + required: false + type: number + default: 8.0 + +jobs: + quality-check: + name: Code Quality and Security Check + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + + - name: Install dependencies + working-directory: ${{ inputs.working-directory }} + run: | + pip install --upgrade pip + pip install -r requirements.txt + pip install -r requirements-dev.txt + + - name: Format check with Black + working-directory: ${{ inputs.working-directory }} + run: | + black --check app/ tests/ + + - name: Lint with Pylint + working-directory: ${{ inputs.working-directory }} + run: | + pylint app/ --fail-under=${{ inputs.linting-threshold }} + + - name: Security scan with Bandit + working-directory: ${{ inputs.working-directory }} + run: | + bandit -r app/ -ll \ No newline at end of file diff --git a/.github/workflows/_reusable_test_workflow.yml b/.github/workflows/_reusable_test_workflow.yml new file mode 100644 index 0000000..3ea6032 --- /dev/null +++ b/.github/workflows/_reusable_test_workflow.yml @@ -0,0 +1,119 @@ +# Reusable test workflow +# - pytest: run all defined test files in tests/ +# - pytest-cov: test coverage +name: Reusable Test Workflow + +# Workflow runs on being called by others +on: + workflow_call: + inputs: + working-directory: + required: true + type: string + python-version: + required: false + type: string + default: "3.10" + coverage-threshold: + required: false + type: number + default: 80 + +env: + POSTGRES_HOST: localhost + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: test_db + +jobs: + unit-test: + name: Run Unit Testing (schemas, basic logic) + runs-on: ubuntu-latest + + services: + postgres: + image: postgres:15 + env: + POSTGRES_USER: ${{ env.POSTGRES_USER }} + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + POSTGRES_DB: ${{ env.POSTGRES_DB }} + ports: + - 5432:5432 + options: >- + --health-cmd "pg_isready -U postgres" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + + - name: Install dependencies + working-directory: ${{ inputs.working-directory }} + run: | + pip install --upgrade pip + pip install -r requirements.txt + pip install -r requirements-dev.txt + + - name: Run unit tests + working-directory: ${{ inputs.working-directory }} + run: | + pytest tests/unit/ -v + + integration-test: + name: Run Integration Testing (API + database) + runs-on: ubuntu-latest + needs: unit-test + + services: + postgres: + image: postgres:15 + env: + POSTGRES_USER: ${{ env.POSTGRES_USER }} + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + POSTGRES_DB: ${{ env.POSTGRES_DB }} + ports: + - 5432:5432 + options: >- + --health-cmd "pg_isready -U postgres" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: ${{ inputs.python-version }} + + - name: Install dependencies + working-directory: ${{ inputs.working-directory }} + run: | + pip install --upgrade pip + pip install -r requirements.txt + pip install -r requirements-dev.txt + + - name: Run tests + working-directory: ${{ inputs.working-directory }} + env: + POSTGRES_USER: ${{ env.POSTGRES_USER }} + POSTGRES_PASSWORD: ${{ env.POSTGRES_PASSWORD }} + POSTGRES_DB: ${{ env.POSTGRES_DB }} + POSTGRES_HOST: ${{ env.POSTGRES_HOST }} + POSTGRES_PORT: 5432 + run: | + pytest tests/integration/ -v --cov=app --cov-report=xml --cov-report=term-missing + + - name: Check coverage + working-directory: ${{ inputs.working-directory }} + run: | + coverage report --fail-under=${{ inputs.coverage-threshold }} \ No newline at end of file diff --git a/.github/workflows/acceptance_test_cd.yml b/.github/workflows/acceptance_test_cd.yml new file mode 100644 index 0000000..056543d --- /dev/null +++ b/.github/workflows/acceptance_test_cd.yml @@ -0,0 +1,80 @@ +name: CD - Staging Tests on PR + +on: + # Manual trigger + workflow_dispatch: + + push: + branches: + - "feature/*staging*" + - "fix/*staging*" + paths: + - "playwright-python/**" + - ".github/workflows/*acceptance*.yml" + + # Run the test when the new PR to develop or main is created + # pull_request: + # branches: + # - develop + # - main + # paths: + # - 'backend/**' + # - 'frontend/**' + # - 'k8s/staging/**' + # - 'infrastructure/staging/**' + # - '.github/workflows/*staging*.yml' + +env: + PYTHON_VERSION: "3.10" + FRONTEND_URL: http://localhost:3000 + USERS_SERVICE_URL: http://localhost:5000 + NOTES_SERVICE_URL: http://localhost:5001 + +jobs: + # Acceptance Tests (End-to-End) + acceptance-tests: + name: Acceptance Tests - End-to-end user flow + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Start services with Docker Compose + run: | + docker compose build --no-cache + docker compose up -d + + - name: Wait for services to be ready + run: | + echo "Waiting for services to start..." + timeout 60 bash -c 'until curl -s ${{ env.USERS_SERVICE_URL }}/health > /dev/null; do sleep 2; done' + timeout 60 bash -c 'until curl -s ${{ env.NOTES_SERVICE_URL }}/health > /dev/null; do sleep 2; done' + timeout 60 bash -c 'until curl -s ${{ env.FRONTEND_URL }} > /dev/null; do sleep 2; done' + echo "Services are ready!" + + - name: Install Playwright + run: | + echo "Installing Playwright..." + pip install pytest-playwright + playwright install + pip install -r ./playwright-python/requirements.txt + + - name: Run acceptance tests + env: + FRONTEND_URL: ${{ env.FRONTEND_URL }} + USERS_SERVICE_URL: ${{ env.USERS_SERVICE_URL }} + NOTES_SERVICE_URL: ${{ env.NOTES_SERVICE_URL }} + run: | + echo "Runing acceptance tests with Playwright..." + pytest ./playwright-python/tests/test_acceptance.py -v + + - name: Stop services + if: always() + run: | + docker compose down -v \ No newline at end of file diff --git a/.github/workflows/cd-production-deploy.yml b/.github/workflows/cd-production-deploy.yml new file mode 100644 index 0000000..520dca2 --- /dev/null +++ b/.github/workflows/cd-production-deploy.yml @@ -0,0 +1,317 @@ +name: Production Branch CD - Deploy to production Environment + +on: + # Manual trigger + workflow_dispatch: + inputs: + version: + description: 'Semantic version for deployment (e.g., v1.2.3)' + required: true + type: string + + # On pull request approved + pull_request: + branches: + - main + types: + - closed + +env: + SHARED_ACR_LOGIN_SERVER: ${{ secrets.SHARED_ACR_LOGIN_SERVER }} + SHARED_ACR_NAME: ${{ secrets.SHARED_ACR_NAME }} + + RESOURCE_GROUP_production: sit722alicestd-production-rg + AKS_CLUSTER_production: sit722alicestd-production-aks + AZURE_LOCATION: australiaeast + + # Image Scan with Trivy + # 1: Fail the build, stop the job if vulnerabilities found + # 0: Don't fail the build, just report security scan result (for learning purpose, I'll use this option) + IMAGE_SECURITY_GATE: 0 + +jobs: + # Build images + build-images: + name: Build and Scan images for all services + runs-on: ubuntu-latest + + outputs: + GIT_SHA: ${{ steps.vars.outputs.GIT_SHA }} + IMAGE_TAG: ${{ steps.vars.outputs.IMAGE_TAG }} + NOTES_SERVICE_IMAGE: ${{ steps.output_images.outputs.notes_service_image }} + USERS_SERVICE_IMAGE: ${{ steps.output_images.outputs.users_service_image }} + FRONTEND_IMAGE: ${{ steps.output_images.outputs.frontend_image }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Azure + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + enable-AzPSSession: true + + # Get image tag with Git SHA, start building and scanning images + - name: Set variables (Short Git SHA and Image tag) + id: vars + run: | + echo "GIT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + echo "IMAGE_TAG=prod-$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + # Start building images + - name: Build Images + run: | + # Set image name based on Git SHA + NOTES_SERVICE_IMAGE="notes_service:${{ steps.vars.outputs.IMAGE_TAG }}" + USERS_SERVICE_IMAGE="users_service:${{ steps.vars.outputs.IMAGE_TAG }}" + FRONTEND_IMAGE="frontend:${{ steps.vars.outputs.IMAGE_TAG }}" + + # Semantic version images + NOTES_SERVICE_IMAGE_VERSION="notes_service:${{ inputs.version }}" + USERS_SERVICE_IMAGE_VERSION="users_service:${{ inputs.version }}" + FRONTEND_IMAGE_VERSION="frontend:${{ inputs.version }}" + + # Build local images for scanning + docker build -t $NOTES_SERVICE_IMAGE -t $NOTES_SERVICE_IMAGE_VERSION ./backend/notes_service + docker build -t $USERS_SERVICE_IMAGE -t $USERS_SERVICE_IMAGE_VERSION ./backend/users_service + docker build -t $FRONTEND_IMAGE -t $FRONTEND_IMAGE_VERSION ./frontend + + # Set image names as GitHub env variables, allowing internal reference within the same job + echo "NOTES_SERVICE_IMAGE=$NOTES_SERVICE_IMAGE" >> $GITHUB_ENV + echo "USERS_SERVICE_IMAGE=$USERS_SERVICE_IMAGE" >> $GITHUB_ENV + echo "FRONTEND_IMAGE=$FRONTEND_IMAGE" >> $GITHUB_ENV + + echo "NOTES_SERVICE_IMAGE_VERSION=$NOTES_SERVICE_IMAGE_VERSION" >> $GITHUB_ENV + echo "USERS_SERVICE_IMAGE_VERSION=$USERS_SERVICE_IMAGE_VERSION" >> $GITHUB_ENV + echo "FRONTEND_IMAGE_VERSION=$FRONTEND_IMAGE_VERSION" >> $GITHUB_ENV + + # Scan images with Trivy + - name: Scan Images + run: | + echo "Scanning Notes Service Image: ${{ env.NOTES_SERVICE_IMAGE_VERSION }}..." + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy:latest image --scanners vuln --severity HIGH,CRITICAL --exit-code ${{ env.IMAGE_SECURITY_GATE }} \ + ${{ env.NOTES_SERVICE_IMAGE_VERSION }} + + echo "Scanning Users Service Image: ${{ env.USERS_SERVICE_IMAGE_VERSION }}..." + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy:latest image --scanners vuln --severity HIGH,CRITICAL --exit-code ${{ env.IMAGE_SECURITY_GATE }} \ + ${{ env.USERS_SERVICE_IMAGE_VERSION }} + + echo "Scanning Frontend Image: ${{ env.FRONTEND_IMAGE_VERSION }}..." + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy:latest image --scanners vuln --severity HIGH,CRITICAL --exit-code ${{ env.IMAGE_SECURITY_GATE }} \ + ${{ env.FRONTEND_IMAGE_VERSION }} + + # All check passed, start pushing images to ACR + - name: Log in to ACR + run: | + az acr login --name ${{ env.SHARED_ACR_LOGIN_SERVER }} + + - name: Tag and Push Images + id: output_images + run: | + # Tag images + docker tag $NOTES_SERVICE_IMAGE ${{ env.SHARED_ACR_LOGIN_SERVER }}/$NOTES_SERVICE_IMAGE + docker tag $USERS_SERVICE_IMAGE ${{ env.SHARED_ACR_LOGIN_SERVER }}/$USERS_SERVICE_IMAGE + docker tag $FRONTEND_IMAGE ${{ env.SHARED_ACR_LOGIN_SERVER }}/$FRONTEND_IMAGE + + docker tag $NOTES_SERVICE_IMAGE_VERSION ${{ env.SHARED_ACR_LOGIN_SERVER }}/$NOTES_SERVICE_IMAGE_VERSION + docker tag $USERS_SERVICE_IMAGE_VERSION ${{ env.SHARED_ACR_LOGIN_SERVER }}/$USERS_SERVICE_IMAGE_VERSION + docker tag $FRONTEND_IMAGE_VERSION ${{ env.SHARED_ACR_LOGIN_SERVER }}/$FRONTEND_IMAGE_VERSION + + # Push images + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$NOTES_SERVICE_IMAGE + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$USERS_SERVICE_IMAGE + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$FRONTEND_IMAGE + + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$NOTES_SERVICE_IMAGE_VERSION + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$USERS_SERVICE_IMAGE_VERSION + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$FRONTEND_IMAGE_VERSION + + # Export image name (with semantic versioning tag) as output + echo "notes_service_image=$NOTES_SERVICE_IMAGE_VERSION" >> $GITHUB_OUTPUT + echo "users_service_image=$USERS_SERVICE_IMAGE_VERSION" >> $GITHUB_OUTPUT + echo "frontend_image=$FRONTEND_IMAGE_VERSION" >> $GITHUB_OUTPUT + + # Deploy services to production AKS + deploy-to-production: + name: Deploy to production environment + runs-on: ubuntu-latest + needs: build-images + + outputs: + NOTES_SERVICE_IP: ${{ steps.get_backend_ips.outputs.notes_ip }} + NOTES_SERVICE_PORT: ${{ steps.get_backend_ips.outputs.notes_port }} + USERS_SERVICE_IP: ${{ steps.get_backend_ips.outputs.users_ip }} + USERS_SERVICE_PORT: ${{ steps.get_backend_ips.outputs.users_port }} + FRONTEND_IP: ${{ steps.get_frontend_ip.outputs.frontend_ip }} + FRONTEND_PORT: ${{ steps.get_frontend_ip.outputs.frontend_port }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Azure + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + enable-AzPSSession: true + + - name: Set Kubernetes context (get AKS credentials) + run: | + az aks get-credentials \ + --resource-group ${{ env.RESOURCE_GROUP_production }} \ + --name ${{ env.AKS_CLUSTER_production }} \ + --overwrite-existing + + - name: Deploy Backend Infrastructure (ConfigMaps, Secrets, Databases) + run: | + kubectl apply -f k8s/production/configmaps.yaml + kubectl apply -f k8s/production/secrets.yaml + kubectl apply -f k8s/production/notes-db-deployment.yaml + kubectl apply -f k8s/production/users-db-deployment.yaml + + - name: Deploy Backend Microservices + run: | + # Update image tag in deployment manifest, using the specific git SHA version + echo "Updating image tag in deployment manifest..." + sed -i "s|_IMAGE_NAME_WITH_TAG_|${{ env.SHARED_ACR_LOGIN_SERVER }}/${{ needs.build-images.outputs.NOTES_SERVICE_IMAGE }}|g" k8s/production/notes-service-deployment.yaml + sed -i "s|_IMAGE_NAME_WITH_TAG_|${{ env.SHARED_ACR_LOGIN_SERVER }}/${{ needs.build-images.outputs.USERS_SERVICE_IMAGE }}|g" k8s/production/users-service-deployment.yaml + + echo "Deploying backend services to AKS..." + kubectl apply -f k8s/production/users-service-deployment.yaml + kubectl apply -f k8s/production/notes-service-deployment.yaml + + - name: Wait for Backend LoadBalancer IPs + env: + ENVIRONMENT: production + run: | + # chmod +x .github/scripts/get_backend_ip.sh + # ./.github/scripts/get_backend_ip.sh + + echo "Assigning sample IP..." + echo "NOTES_IP=https://www.google.com/" >> $GITHUB_ENV + echo "NOTES_PORT=" >> $GITHUB_ENV + echo "USERS_IP=https://www.google.com/" >> $GITHUB_ENV + echo "USERS_PORT=" >> $GITHUB_ENV + + - name: Capture Backend IPs for Workflow Output + id: get_backend_ips + run: | + echo "notes_ip=${{ env.NOTES_IP }}" >> $GITHUB_OUTPUT + echo "notes_port=${{ env.NOTES_PORT }}" >> $GITHUB_OUTPUT + echo "users_ip=${{ env.USERS_IP }}" >> $GITHUB_OUTPUT + echo "users_port=${{ env.USERS_PORT }}" >> $GITHUB_OUTPUT + + # Frontend + - name: Inject Backend IPs into Frontend main.js + run: | + echo "Injecting IPs into frontend/static/js/main.js" + # Ensure frontend/main.js is directly in the path for sed + sed -i "s|http://localhost:5000|http://${{ env.NOTES_IP }}:${{ env.NOTES_PORT }}|g" frontend/main.js + sed -i "s|http://localhost:5001|http://${{ env.USERS_IP }}:${{ env.USERS_PORT }}|g" frontend/main.js + + # Display the modified file content for debugging + echo "--- Modified main.js content ---" + cat frontend/main.js + echo "---------------------------------" + + - name: Deploy Frontend to AKS + run: | + # Update image tag in deployment manifest, using the specific git SHA version + echo "Updating image tag in deployment manifest..." + sed -i "s|_IMAGE_NAME_WITH_TAG_|${{ env.SHARED_ACR_LOGIN_SERVER }}/${{ needs.build-images.outputs.FRONTEND_IMAGE }}|g" k8s/production/frontend-deployment.yaml + + # Student Subscription only allow 2 public IP address, so as a demo, I remove the notes service + kubectl delete -f k8s/production/notes-service-deployment.yaml + + # Apply frontend deployment + echo "Deploying frontend to AKS..." + kubectl apply -f k8s/production/frontend-deployment.yaml + + - name: Wait for Frontend LoadBalancer IP + env: + ENVIRONMENT: production + run: | + # chmod +x .github/scripts/get_frontend_ip.sh + # ./.github/scripts/get_frontend_ip.sh + + echo "Assigning sample IP..." + echo "FRONTEND_IP=https://www.google.com/" >> $GITHUB_ENV + echo "FRONTEND_PORT=" >> $GITHUB_ENV + + - name: Capture Frontend IP for Workflow Output + id: get_frontend_ip + run: | + echo "frontend_ip=${{ env.FRONTEND_IP }}" >> $GITHUB_OUTPUT + echo "frontend_port=${{ env.FRONTEND_PORT }}" >> $GITHUB_OUTPUT + + backend-smoke-tests: + name: Backend smoke tests + runs-on: ubuntu-latest + needs: deploy-to-production + + strategy: + matrix: + service: + - name: notes_service + external_ip: ${{ needs.deploy-to-production.outputs.NOTES_SERVICE_IP }} + service_port: ${{ needs.deploy-to-production.outputs.NOTES_SERVICE_PORT }} + expected_output: "Welcome to the Notes Service!" + - name: users_service + external_ip: ${{ needs.deploy-to-production.outputs.USERS_SERVICE_IP }} + service_port: ${{ needs.deploy-to-production.outputs.USERS_SERVICE_PORT }} + expected_output: "Welcome to the Users Service!" + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Backend Smoke Tests + env: + TEST_IP: ${{ matrix.service.external_ip }} + TEST_PORT: ${{ matrix.service.service_port }} + EXPECTED_MESSAGE: ${{ matrix.service.expected_output }} + run: | + # chmod +x .github/scripts/backend_smoke_tests.sh + # ./.github/scripts/backend_smoke_tests.sh + + echo "Running sample smoke tests..." + echo "Done!" + + frontend-smoke-tests: + name: Frontend smoke tests + runs-on: ubuntu-latest + needs: deploy-to-production + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Backend Smoke Tests + env: + TEST_IP: ${{ needs.deploy-to-production.outputs.FRONTEND_IP }} + TEST_PORT: ${{ needs.deploy-to-production.outputs.FRONTEND_PORT }} + run: | + # chmod +x .github/scripts/frontend_smoke_tests.sh + # ./.github/scripts/frontend_smoke_tests.sh + + echo "Running sample smoke tests..." + echo "Done!" + + # Deployment result + summary: + runs-on: ubuntu-latest + needs: [backend-smoke-tests, frontend-smoke-tests] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Deployment result + run: | + echo "All checks passed" + echo "Deployment success!" + \ No newline at end of file diff --git a/.github/workflows/cd-staging-deploy.yml b/.github/workflows/cd-staging-deploy.yml new file mode 100644 index 0000000..29a1b71 --- /dev/null +++ b/.github/workflows/cd-staging-deploy.yml @@ -0,0 +1,357 @@ +name: Develop Branch CD - Deploy to Staging Environment + +on: + # Manual trigger + workflow_dispatch: + + + # Run the workflow when the new PR to develop is approved and merged + push: + branches: + - develop + paths: + - "backend/**" + - "frontend/**" + - "k8s/staging/**" + - ".github/workflows/**" + - "infrastructure/staging/**" + +env: + SHARED_ACR_LOGIN_SERVER: ${{ secrets.SHARED_ACR_LOGIN_SERVER }} + SHARED_ACR_NAME: ${{ secrets.SHARED_ACR_NAME }} + + RESOURCE_GROUP_STAGING: sit722alicestd-staging-rg + AKS_CLUSTER_STAGING: sit722alicestd-staging-aks + AZURE_LOCATION: australiaeast + + # Image Scan with Trivy + # 1: Fail the build, stop the job if vulnerabilities found + # 0: Don't fail the build, just report security scan result (for learning purpose, I'll use this option) + IMAGE_SECURITY_GATE: 0 + +jobs: + # Build images + build-images: + name: Build and Scan images for all services + runs-on: ubuntu-latest + + outputs: + GIT_SHA: ${{ steps.vars.outputs.GIT_SHA }} + IMAGE_TAG: ${{ steps.vars.outputs.IMAGE_TAG }} + NOTES_SERVICE_IMAGE: ${{ steps.output_images.outputs.notes_service_image }} + USERS_SERVICE_IMAGE: ${{ steps.output_images.outputs.users_service_image }} + FRONTEND_IMAGE: ${{ steps.output_images.outputs.frontend_image }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Azure + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + enable-AzPSSession: true + + # Get image tag with Git SHA, start building and scanning images + - name: Set variables (Short Git SHA and Image tag) + id: vars + run: | + echo "GIT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + echo "IMAGE_TAG=staging-$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + # Start building images + - name: Build Images + run: | + # Set image name based on Git SHA + NOTES_SERVICE_IMAGE="notes_service:${{ steps.vars.outputs.IMAGE_TAG }}" + USERS_SERVICE_IMAGE="users_service:${{ steps.vars.outputs.IMAGE_TAG }}" + FRONTEND_IMAGE="frontend:${{ steps.vars.outputs.IMAGE_TAG }}" + + # Build local images for scanning + docker build -t $NOTES_SERVICE_IMAGE ./backend/notes_service + docker build -t $USERS_SERVICE_IMAGE ./backend/users_service + docker build -t $FRONTEND_IMAGE ./frontend + + # Set image names as GitHub env variables, allowing internal reference within the same job + echo "NOTES_SERVICE_IMAGE=$NOTES_SERVICE_IMAGE" >> $GITHUB_ENV + echo "USERS_SERVICE_IMAGE=$USERS_SERVICE_IMAGE" >> $GITHUB_ENV + echo "FRONTEND_IMAGE=$FRONTEND_IMAGE" >> $GITHUB_ENV + + # Scan images with Trivy + - name: Scan Images + run: | + echo "Scanning Notes Service Image: ${{ env.NOTES_SERVICE_IMAGE }}..." + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy:latest image --scanners vuln --severity HIGH,CRITICAL --exit-code ${{ env.IMAGE_SECURITY_GATE }} \ + ${{ env.NOTES_SERVICE_IMAGE }} + + echo "Scanning Users Service Image: ${{ env.USERS_SERVICE_IMAGE }}..." + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy:latest image --scanners vuln --severity HIGH,CRITICAL --exit-code ${{ env.IMAGE_SECURITY_GATE }} \ + ${{ env.USERS_SERVICE_IMAGE }} + + echo "Scanning Frontend Image: ${{ env.FRONTEND_IMAGE }}..." + docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \ + aquasec/trivy:latest image --scanners vuln --severity HIGH,CRITICAL --exit-code ${{ env.IMAGE_SECURITY_GATE }} \ + ${{ env.FRONTEND_IMAGE }} + + # All check passed, start pushing images to ACR + - name: Log in to ACR + run: | + az acr login --name ${{ env.SHARED_ACR_LOGIN_SERVER }} + + - name: Tag and Push Images + id: output_images + run: | + # Tag images + docker tag $NOTES_SERVICE_IMAGE ${{ env.SHARED_ACR_LOGIN_SERVER }}/$NOTES_SERVICE_IMAGE + docker tag $USERS_SERVICE_IMAGE ${{ env.SHARED_ACR_LOGIN_SERVER }}/$USERS_SERVICE_IMAGE + docker tag $FRONTEND_IMAGE ${{ env.SHARED_ACR_LOGIN_SERVER }}/$FRONTEND_IMAGE + + # Push images + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$NOTES_SERVICE_IMAGE + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$USERS_SERVICE_IMAGE + docker push ${{ env.SHARED_ACR_LOGIN_SERVER }}/$FRONTEND_IMAGE + + # Export image name (with tag) as output + echo "notes_service_image=$NOTES_SERVICE_IMAGE" >> $GITHUB_OUTPUT + echo "users_service_image=$USERS_SERVICE_IMAGE" >> $GITHUB_OUTPUT + echo "frontend_image=$FRONTEND_IMAGE" >> $GITHUB_OUTPUT + + # Provision staging infrastructure with OpenTofu + provision-infrastructure: + name: Provision staging infrastructure with OpenTofu + runs-on: ubuntu-latest + needs: build-images + + defaults: + run: + working-directory: ./infrastructure/staging + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup OpenTofu + run: | + echo "Setting up infrastructure with OpenTofu" + + - name: Setup OpenTofu + uses: opentofu/setup-opentofu@v1 + with: + tofu_version: '1.6.0' + + - name: Log in to Azure + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: OpenTofu Init + run: tofu init + + - name: OpenTofu Plan + run: | + tofu plan \ + -var="git_sha={{ github.sha }}" \ + -out=staging.tfplan + + - name: OpenTofu Apply + run: tofu apply -auto-approve staging.tfplan + + # Deploy services to staging AKS + deploy-to-staging: + name: Deploy to staging environment + runs-on: ubuntu-latest + needs: [build-images, provision-infrastructure] + + outputs: + NOTES_SERVICE_IP: ${{ steps.get_backend_ips.outputs.notes_ip }} + NOTES_SERVICE_PORT: ${{ steps.get_backend_ips.outputs.notes_port }} + USERS_SERVICE_IP: ${{ steps.get_backend_ips.outputs.users_ip }} + USERS_SERVICE_PORT: ${{ steps.get_backend_ips.outputs.users_port }} + FRONTEND_IP: ${{ steps.get_frontend_ip.outputs.frontend_ip }} + FRONTEND_PORT: ${{ steps.get_frontend_ip.outputs.frontend_port }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Azure + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + enable-AzPSSession: true + + - name: Set Kubernetes context (get AKS credentials) + run: | + az aks get-credentials \ + --resource-group ${{ env.RESOURCE_GROUP_STAGING }} \ + --name ${{ env.AKS_CLUSTER_STAGING }} \ + --overwrite-existing + + - name: Deploy Backend Infrastructure (ConfigMaps, Secrets, Databases) + run: | + kubectl apply -f k8s/staging/configmaps.yaml + kubectl apply -f k8s/staging/secrets.yaml + kubectl apply -f k8s/staging/notes-db-deployment.yaml + kubectl apply -f k8s/staging/users-db-deployment.yaml + + - name: Deploy Backend Microservices + run: | + # Update image tag in deployment manifest, using the specific git SHA version + echo "Updating image tag in deployment manifest..." + sed -i "s|_IMAGE_NAME_WITH_TAG_|${{ env.SHARED_ACR_LOGIN_SERVER }}/${{ needs.build-images.outputs.NOTES_SERVICE_IMAGE }}|g" k8s/staging/notes-service-deployment.yaml + sed -i "s|_IMAGE_NAME_WITH_TAG_|${{ env.SHARED_ACR_LOGIN_SERVER }}/${{ needs.build-images.outputs.USERS_SERVICE_IMAGE }}|g" k8s/staging/users-service-deployment.yaml + + echo "Deploying backend services to AKS..." + kubectl apply -f k8s/staging/users-service-deployment.yaml + kubectl apply -f k8s/staging/notes-service-deployment.yaml + + - name: Wait for Backend LoadBalancer IPs + env: + ENVIRONMENT: staging + run: | + # chmod +x .github/scripts/get_backend_ip.sh + # ./.github/scripts/get_backend_ip.sh + + echo "Assigning sample IP..." + echo "NOTES_IP=https://www.google.com/" >> $GITHUB_ENV + echo "NOTES_PORT=" >> $GITHUB_ENV + echo "USERS_IP=https://www.google.com/" >> $GITHUB_ENV + echo "USERS_PORT=" >> $GITHUB_ENV + + - name: Capture Backend IPs for Workflow Output + id: get_backend_ips + run: | + echo "notes_ip=${{ env.NOTES_IP }}" >> $GITHUB_OUTPUT + echo "notes_port=${{ env.NOTES_PORT }}" >> $GITHUB_OUTPUT + echo "users_ip=${{ env.USERS_IP }}" >> $GITHUB_OUTPUT + echo "users_port=${{ env.USERS_PORT }}" >> $GITHUB_OUTPUT + + # Frontend + - name: Inject Backend IPs into Frontend main.js + run: | + echo "Injecting IPs into frontend/static/js/main.js" + # Ensure frontend/main.js is directly in the path for sed + sed -i "s|http://localhost:5000|http://${{ env.NOTES_IP }}:${{ env.NOTES_PORT }}|g" frontend/main.js + sed -i "s|http://localhost:5001|http://${{ env.USERS_IP }}:${{ env.USERS_PORT }}|g" frontend/main.js + + # Display the modified file content for debugging + echo "--- Modified main.js content ---" + cat frontend/main.js + echo "---------------------------------" + + - name: Deploy Frontend to AKS + run: | + # Update image tag in deployment manifest, using the specific git SHA version + echo "Updating image tag in deployment manifest..." + sed -i "s|_IMAGE_NAME_WITH_TAG_|${{ env.SHARED_ACR_LOGIN_SERVER }}/${{ needs.build-images.outputs.FRONTEND_IMAGE }}|g" k8s/staging/frontend-deployment.yaml + + # Apply frontend deployment + echo "Deploying frontend to AKS..." + kubectl apply -f k8s/staging/frontend-deployment.yaml + + - name: Wait for Frontend LoadBalancer IP + env: + ENVIRONMENT: staging + run: | + # chmod +x .github/scripts/get_frontend_ip.sh + # ./.github/scripts/get_frontend_ip.sh + + echo "Assigning sample IP..." + echo "FRONTEND_IP=https://www.google.com/" >> $GITHUB_ENV + echo "FRONTEND_PORT=" >> $GITHUB_ENV + + - name: Capture Frontend IP for Workflow Output + id: get_frontend_ip + run: | + echo "frontend_ip=${{ env.FRONTEND_IP }}" >> $GITHUB_OUTPUT + echo "frontend_port=${{ env.FRONTEND_PORT }}" >> $GITHUB_OUTPUT + + backend-smoke-tests: + name: Backend smoke tests + runs-on: ubuntu-latest + needs: deploy-to-staging + + strategy: + matrix: + service: + - name: notes_service + external_ip: ${{ needs.deploy-to-staging.outputs.USERS_SERVICE_IP }} + service_port: ${{ needs.deploy-to-staging.outputs.USERS_SERVICE_PORT }} + expected_output: "Welcome to the Notes Service!" + - name: users_service + external_ip: ${{ needs.deploy-to-staging.outputs.USERS_SERVICE_IP }} + service_port: ${{ needs.deploy-to-staging.outputs.USERS_SERVICE_PORT }} + expected_output: "Welcome to the Users Service!" + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Backend Smoke Tests + env: + TEST_IP: ${{ matrix.service.external_ip }} + TEST_PORT: ${{ matrix.service.service_port }} + EXPECTED_MESSAGE: ${{ matrix.service.expected_output }} + run: | + # chmod +x .github/scripts/backend_smoke_tests.sh + # ./.github/scripts/backend_smoke_tests.sh + + echo "Running sample smoke tests..." + echo "Done!" + + frontend-smoke-tests: + name: Frontend smoke tests + runs-on: ubuntu-latest + needs: deploy-to-staging + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Backend Smoke Tests + env: + TEST_IP: ${{ needs.deploy-to-staging.outputs.FRONTEND_IP }} + TEST_PORT: ${{ needs.deploy-to-staging.outputs.FRONTEND_PORT }} + run: | + # chmod +x .github/scripts/frontend_smoke_tests.sh + # ./.github/scripts/frontend_smoke_tests.sh + + echo "Running sample smoke tests..." + echo "Done!" + + # Cleanup staging environment + cleanup-staging: + runs-on: ubuntu-latest + needs: [backend-smoke-tests, frontend-smoke-tests] + if: always() + + defaults: + run: + working-directory: ./infrastructure/staging + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Cleaning up staging environment + run: | + echo "Cleaning up staging" + + # - name: Log in to Azure + # uses: azure/login@v1 + # with: + # creds: {{ secrets.AZURE_CREDENTIALS }} + # enable-AzPSSession: true + + # - name: Delete staging environment + # run: | + # az group delete \ + # --name {{ env.RESOURCE_GROUP_STAGING }} \ + # --yes \ + # --no-wait + + # - name: Logout from Azure + # run: az logout + \ No newline at end of file diff --git a/.github/workflows/feature_test_notes_service.yml b/.github/workflows/feature_test_notes_service.yml new file mode 100644 index 0000000..9efc044 --- /dev/null +++ b/.github/workflows/feature_test_notes_service.yml @@ -0,0 +1,37 @@ +name: Feature Branch CI - Note Service + +on: + # Manual trigger + workflow_dispatch: + + # Workflow runs on any changes on Note Services, commited on feature or fix branches + push: + branches: + - "feature/**" + - "fix/**" + paths: + - "backend/notes_service/**" + - ".github/workflows/*notes_service*.yml" + + # Re-run the test when the new PR to develop is created + # pull_request: + # branches: + # - develop + # - main + +jobs: + quality-checks: + name: Quality Check for Notes Service + uses: ./.github/workflows/_reusable_quality_check_workflow.yml + secrets: inherit + with: + working-directory: "./backend/notes_service" + linting-threshold: 8.0 + + test: + name: Run Tests for Notes Service + uses: ./.github/workflows/_reusable_test_workflow.yml + secrets: inherit + with: + working-directory: "./backend/notes_service" + coverage-threshold: 80 \ No newline at end of file diff --git a/.github/workflows/feature_test_users_service.yml b/.github/workflows/feature_test_users_service.yml new file mode 100644 index 0000000..b95184c --- /dev/null +++ b/.github/workflows/feature_test_users_service.yml @@ -0,0 +1,37 @@ +name: Feature Branch CI - User Service + +on: + # Manual trigger + workflow_dispatch: + + # Workflow runs on any changes on Users Service, commited on feature or fix branches + push: + branches: + - "feature/**" + - "fix/**" + paths: + - "backend/users_service/**" + - ".github/workflows/*users_service*.yml" + + # Re-run the test when the new PR to develop is created + # pull_request: + # branches: + # - develop + # - main + +jobs: + quality-checks: + name: Quality Check for Users Service + uses: ./.github/workflows/_reusable_quality_check_workflow.yml + secrets: inherit + with: + working-directory: "./backend/users_service" + linting-threshold: 8.0 + + test: + name: Run Tests for Notes Service + uses: ./.github/workflows/_reusable_test_workflow.yml + secrets: inherit + with: + working-directory: "./backend/users_service" + coverage-threshold: 80 \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0490e50 --- /dev/null +++ b/.gitignore @@ -0,0 +1,244 @@ +# ----- Infrastructure files ------ # +# Local .terraform directories +.terraform/ + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log +crash.*.log + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Ignore transient lock info files created by terraform apply +.terraform.tfstate.lock.info + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc + +# Optional: ignore graph output files generated by `terraform graph` +# *.dot + +# Optional: ignore plan files saved before destroying Terraform configuration +# Uncomment the line below if you want to ignore planout files. +# planout + +# ----- Project files ----- # +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# MacOS +.DS_Store + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Abstra +# Abstra is an AI-powered process automation framework. +# Ignore directories containing user credentials, local state, and settings. +# Learn more at https://abstra.io/docs +.abstra/ + +# Visual Studio Code +# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore +# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore +# and can be added to the global gitignore or merged into this file. However, if you prefer, +# you could uncomment the following to ignore the enitre vscode folder +# .vscode/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Cursor +# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to +# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data +# refer to https://docs.cursor.com/context/ignore-files +.cursorignore +.cursorindexingignore \ No newline at end of file diff --git a/README.md b/README.md index 806b4b8..1859633 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,42 @@ # Microsoft Azure - Project with DevOps Feature -This project is a part of HD task for SIT722 - Software Deployment and Operations, focusing on learning DevOps Cycle and pipelines \ No newline at end of file +This project is a part of HD task for SIT722 - Software Deployment and Operations, focusing on learning DevOps Cycle and pipelines + +## Setup + +To run this CI/CD project, we must initialize some existing resource (as in real production, these resource always available) +- Initialize shared infrastructure, refer to section [Shared Azure Resource](#shared-existing) +- Initialize production infrastructure, refer to section [Production Azure Resource](#production-existing) + + +## Azure Infrastructure and Resources +### Staging (Dynamic and Automation) +The staging resource can either: +- Ephemeral environment where it is created, deploy, test and removed after the staging complete +- Remains active as a 1-1 replica of production for manual testing and troubleshooting + +To reduce cost for learning purpose only, this project follows the first approach. The staging infrastructure information can be found at `infrastructure/staging`, resources include: +- Staging resource group +- Staging AKS, with related deployment information (Kubernetes manifest) can be found at `k8s/staging` + +### Shared (Existing) +Shared resource is the existing resource on Azure, contains the resources that shared between staging and production. It is not created during CI-CD pipeline, and it requires manual review and manage since it relates to production. + +Shared resource setup can be found at `infrastructure/shared`, resources include: +- Shared resource group +- Shared container registry + +Commands +```bash +cd infrastructure/shared +tofu init +tofu plan +tofu apply +``` + +### Production (Existing) +Production environment is where we deliver the product to the user, it must pass the manual approvals and should only be merge with develop branch, after all tests and check passed. + +The production infrastructure information can be found at `infrastructure/production`, resources include: +- Staging resource group +- Staging AKS, with related deployment information (Kubernetes manifest) can be found at `k8s/production` \ No newline at end of file diff --git a/backend/notes_service/.pylintrc b/backend/notes_service/.pylintrc new file mode 100644 index 0000000..1f402fc --- /dev/null +++ b/backend/notes_service/.pylintrc @@ -0,0 +1,2 @@ +[MESSAGES CONTROL] +disable=logging-fstring-interpolation \ No newline at end of file diff --git a/backend/notes_service/Dockerfile b/backend/notes_service/Dockerfile new file mode 100644 index 0000000..aa7c4f3 --- /dev/null +++ b/backend/notes_service/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10-slim-buster + +WORKDIR /code + +# Copy requirements and install +COPY requirements.txt . + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r requirements.txt + +# Copy application code from app to /code/app +COPY app /code/app + +EXPOSE 8000 + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/backend/notes_service/app/__init__.py b/backend/notes_service/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/notes_service/app/db.py b/backend/notes_service/app/db.py new file mode 100644 index 0000000..ca48eb8 --- /dev/null +++ b/backend/notes_service/app/db.py @@ -0,0 +1,33 @@ +"""Database configuration and session management.""" + +import os + +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + + +POSTGRES_USER = os.getenv("POSTGRES_USER", "postgres") +POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD", "postgres") +POSTGRES_DB = os.getenv("POSTGRES_DB", "notes") +POSTGRES_HOST = os.getenv("POSTGRES_HOST", "localhost") +POSTGRES_PORT = os.getenv("POSTGRES_PORT", "5432") + +DATABASE_URL = ( + "postgresql://" + f"{POSTGRES_USER}:{POSTGRES_PASSWORD}@" + f"{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}" +) + +# --- SQLAlchemy Engine and Session Setup --- +engine = create_engine(DATABASE_URL) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) +Base = declarative_base() + + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() diff --git a/backend/notes_service/app/main.py b/backend/notes_service/app/main.py new file mode 100644 index 0000000..fa5e201 --- /dev/null +++ b/backend/notes_service/app/main.py @@ -0,0 +1,254 @@ +""" +Notes Service API. + +FastAPI application for managing notes in a multi-user note-taking platform. +""" + +import logging +import sys +import time +from typing import List + +from fastapi import ( + Depends, + FastAPI, + HTTPException, + Query, + Response, + status, +) +from fastapi.middleware.cors import CORSMiddleware +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import Session + +from .db import Base, engine, get_db +from .models import Note +from .schemas import NoteCreate, NoteResponse, NoteUpdate + +# --- Logging Configuration --- +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + handlers=[logging.StreamHandler(sys.stdout)], +) +logger = logging.getLogger(__name__) + +# Suppress noisy logs from third-party libraries for cleaner output +logging.getLogger("uvicorn.access").setLevel(logging.WARNING) +logging.getLogger("uvicorn.error").setLevel(logging.INFO) + +# --- FastAPI Application Setup --- +app = FastAPI( + title="Notes Service API", + description="Manages notes for multi-user note-taking application", + version="1.0.0", +) + +# Enable CORS +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Use specific origins in Notesion + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +# --- Startup Event --- +@app.on_event("startup") +async def startup_event(): + """Initialize database connection on application startup.""" + max_retries = 10 + retry_delay_seconds = 5 + for i in range(max_retries): + try: + logger.info( + f"Notes Service: Attempting to connect to PostgreSQL and create tables (attempt {i+1}/{max_retries})..." + ) + Base.metadata.create_all(bind=engine) + logger.info( + "Notes Service: Successfully connected to PostgreSQL and ensured tables exist." + ) + break # Exit loop if successful + except OperationalError as e: + logger.warning(f"Notes Service: Failed to connect to PostgreSQL: {e}") + if i < max_retries - 1: + logger.info( + f"Notes Service: Retrying in {retry_delay_seconds} seconds..." + ) + time.sleep(retry_delay_seconds) + else: + logger.critical( + f"Notes Service: Failed to connect to PostgreSQL after {max_retries} attempts. Exiting application." + ) + sys.exit(1) # Critical failure: exit if DB connection is unavailable + except Exception as e: + logger.critical( + f"Notes Service: An unexpected error occurred during database startup: {e}", + exc_info=True, + ) + sys.exit(1) + + +# --- Root Endpoint --- +@app.get("/", status_code=status.HTTP_200_OK, summary="Root endpoint") +async def read_root(): + """Return welcome message.""" + return {"message": "Welcome to the Notes Service!"} + + +# --- Health Check Endpoint --- +@app.get("/health", status_code=status.HTTP_200_OK, summary="Health check") +async def health_check(): + """Health check endpoint for monitoring.""" + return {"status": "ok", "service": "notes-service"} + + +# --- CRUD Endpoints --- +# Create new note +# [POST] http://localhost:8000/notes/ +@app.post( + "/notes/", + response_model=NoteResponse, + status_code=status.HTTP_201_CREATED, + summary="Create a new note", +) +async def create_note(note: NoteCreate, db: Session = Depends(get_db)): + """Create a new note""" + logger.info(f"Notes Service: Creating note: {note.title}") + try: + db_note = Note(**note.model_dump()) + db.add(db_note) + db.commit() + db.refresh(db_note) + logger.info( + f"Notes Service: Note '{db_note.title}' (ID: {db_note.id}) created successfully." + ) + return db_note + except Exception as e: + db.rollback() + logger.error(f"Notes Service: Error creating note: {e}", exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Could not create note.", + ) + + +# Get all note for specific user +# [GET] http://localhost:8000/notes/?user_id={user_id} +@app.get( + "/notes/", + response_model=List[NoteResponse], + summary="Get all notes for a user", +) +def list_notes( + user_id: int = Query(..., description="User ID to fetch notes for"), + db: Session = Depends(get_db), + skip: int = Query(0, ge=0), + limit: int = Query(100, ge=1, le=100), +): + """Retrieve all notes for a specific user""" + logger.info(f"Notes Service: Listing notes for user {user_id}") + notes = ( + db.query(Note).filter(Note.user_id == user_id).offset(skip).limit(limit).all() + ) + logger.info(f"Notes Service: Retrieved {len(notes)} notes for user {user_id}") + return notes + + +# Get specific note by note_id +# [GET] http://localhost:8000/notes/{note_id} +@app.get( + "/notes/{note_id}", + response_model=NoteResponse, + summary="Get a single note by ID", +) +def get_note(note_id: int, db: Session = Depends(get_db)): + """Retrieve a specific note by ID""" + logger.info(f"Notes Service: Fetching note with ID: {note_id}") + note = db.query(Note).filter(Note.id == note_id).first() + + if not note: + logger.warning(f"Notes Service: Note with ID {note_id} not found.") + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Note not found" + ) + + logger.info(f"Notes Service: Retrieved note with ID {note_id}") + return note + + +# Update specific note by note_id +# [PUT] http://localhost:8000/notes/{note_id} + + +@app.put( + "/notes/{note_id}", + response_model=NoteResponse, + summary="Update a note by ID", +) +async def update_note(note_id: int, note: NoteUpdate, db: Session = Depends(get_db)): + """Update an existing note""" + logger.info(f"Notes Service: Updating note with ID: {note_id}") + db_note = db.query(Note).filter(Note.id == note_id).first() + + if not db_note: + logger.warning(f"Notes Service: Note with ID {note_id} not found for update.") + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Note not found" + ) + + update_data = note.model_dump(exclude_unset=True) + for key, value in update_data.items(): + setattr(db_note, key, value) + + try: + db.add(db_note) + db.commit() + db.refresh(db_note) + logger.info(f"Notes Service: Note {note_id} updated successfully.") + return db_note + except Exception as e: + db.rollback() + logger.error( + f"Notes Service: Error updating note {note_id}: {e}", exc_info=True + ) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Could not update note.", + ) + + +# Delete specific note by note_id +# [DELETE] http://localhost:8000/notes/{note_id} +@app.delete( + "/notes/{note_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete a note by ID", +) +def delete_note(note_id: int, db: Session = Depends(get_db)): + """Delete a note""" + logger.info(f"Notes Service: Attempting to delete note with ID: {note_id}") + note = db.query(Note).filter(Note.id == note_id).first() + + if not note: + logger.warning(f"Notes Service: Note with ID {note_id} not found for deletion.") + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Note not found" + ) + + try: + db.delete(note) + db.commit() + logger.info(f"Notes Service: Note {note_id} deleted successfully.") + except Exception as e: + db.rollback() + logger.error( + f"Notes Service: Error deleting note {note_id}: {e}", exc_info=True + ) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Could not delete note.", + ) + + return Response(status_code=status.HTTP_204_NO_CONTENT) diff --git a/backend/notes_service/app/models.py b/backend/notes_service/app/models.py new file mode 100644 index 0000000..b43543d --- /dev/null +++ b/backend/notes_service/app/models.py @@ -0,0 +1,22 @@ +"""SQLAlchemy database models.""" + +from sqlalchemy import Column, DateTime, Integer, String, Text +from sqlalchemy.sql import func + +from .db import Base + + +class Note(Base): + """Note model for storing user notes.""" + + __tablename__ = "notes" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + title = Column(String(255), nullable=False, index=True) + content = Column(Text, nullable=False) + user_id = Column(Integer, nullable=False, index=True) + created_at = Column(DateTime(timezone=True), server_default=func.now()) + updated_at = Column(DateTime(timezone=True), onupdate=func.now()) + + def __repr__(self): + return f"" diff --git a/backend/notes_service/app/schemas.py b/backend/notes_service/app/schemas.py new file mode 100644 index 0000000..4bd22b2 --- /dev/null +++ b/backend/notes_service/app/schemas.py @@ -0,0 +1,36 @@ +"""Pydantic schemas for request/response validation.""" + +from datetime import datetime +from typing import Optional +from pydantic import BaseModel, ConfigDict, Field + + +class NoteBase(BaseModel): + """Base note schema with common fields.""" + + title: str = Field(..., min_length=1, max_length=255) + content: str = Field(..., min_length=1) + user_id: int = Field(..., gt=0) + + +class NoteCreate(NoteBase): + """Schema for creating a new note.""" + + pass + + +class NoteUpdate(BaseModel): + """Schema for updating an existing note.""" + + title: Optional[str] = Field(None, min_length=1, max_length=255) + content: Optional[str] = Field(None, min_length=1) + + +class NoteResponse(NoteBase): + """Schema for note response.""" + + id: int + created_at: datetime + updated_at: Optional[datetime] = None + + model_config = ConfigDict(from_attributes=True) diff --git a/backend/notes_service/requirements-dev.txt b/backend/notes_service/requirements-dev.txt new file mode 100644 index 0000000..c54fcc6 --- /dev/null +++ b/backend/notes_service/requirements-dev.txt @@ -0,0 +1,18 @@ +fastapi +uvicorn +sqlalchemy +psycopg2-binary +python-multipart +pydantic +azure-storage-blob +aio-pika + +# Testing and coverage report +pytest +pytest-cov +httpx + +# Code quality +black # Linting & format code +pylint # Code quality +bandit # Security linting \ No newline at end of file diff --git a/backend/notes_service/requirements.txt b/backend/notes_service/requirements.txt new file mode 100644 index 0000000..c4cb782 --- /dev/null +++ b/backend/notes_service/requirements.txt @@ -0,0 +1,9 @@ +fastapi +uvicorn +sqlalchemy +psycopg2-binary +python-multipart +pydantic +azure-storage-blob +aio-pika +setuptools>=78.1.1 \ No newline at end of file diff --git a/backend/notes_service/tests/__init__.py b/backend/notes_service/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/notes_service/tests/conftest.py b/backend/notes_service/tests/conftest.py new file mode 100644 index 0000000..f39ccd7 --- /dev/null +++ b/backend/notes_service/tests/conftest.py @@ -0,0 +1,101 @@ +import logging +import os +import time +import pytest +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import Session +from fastapi.testclient import TestClient + +from app.main import app +from app.db import Base, engine, SessionLocal, get_db +from app.models import Note + +# Suppress noisy logs from SQLAlchemy/FastAPI during tests for cleaner output +logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING) +logging.getLogger("uvicorn.access").setLevel(logging.WARNING) +logging.getLogger("uvicorn.error").setLevel(logging.WARNING) +logging.getLogger("fastapi").setLevel(logging.WARNING) +logging.getLogger("app.main").setLevel(logging.WARNING) + + +@pytest.fixture(scope="session", autouse=True) +def setup_database_for_tests(): + """Set up test database with retry logic""" + max_retries = 10 + retry_delay_seconds = 3 + + for i in range(max_retries): + try: + logging.info( + f"Notes Service Tests: Attempting to connect to PostgreSQL for test setup (attempt {i+1}/{max_retries})..." + ) + + # Explicitly drop all tables first to ensure a clean slate for the session + Base.metadata.drop_all(bind=engine) + logging.info( + "Notes Service Tests: Successfully dropped all tables in PostgreSQL for test setup." + ) + + # Then create all tables required by the application + Base.metadata.create_all(bind=engine) + logging.info( + "Notes Service Tests: Successfully created all tables in PostgreSQL for test setup." + ) + break + except OperationalError as e: + logging.warning( + f"Notes Service Tests: Test setup DB connection failed: {e}. Retrying in {retry_delay_seconds} seconds..." + ) + time.sleep(retry_delay_seconds) + if i == max_retries - 1: + pytest.fail( + f"Could not connect to PostgreSQL for Product Service test setup after {max_retries} attempts: {e}" + ) + except Exception as e: + pytest.fail( + f"Notes Service Tests: An unexpected error occurred during test DB setup: {e}", + pytrace=True, + ) + yield + + +@pytest.fixture(scope="function") +def db_session_for_test(): + """Provide isolated database session for each test""" + connection = engine.connect() + transaction = connection.begin() + db = SessionLocal(bind=connection) + + def override_get_db(): + yield db + + app.dependency_overrides[get_db] = override_get_db + + try: + yield db + finally: + transaction.rollback() + db.close() + connection.close() + app.dependency_overrides.pop(get_db, None) + + +@pytest.fixture(scope="module") +def client(): + """ + Provides a TestClient for making HTTP requests to the FastAPI application. + The TestClient automatically manages the app's lifespan events (startup/shutdown). + """ + os.environ["AZURE_STORAGE_ACCOUNT_NAME"] = "testaccount" + os.environ["AZURE_STORAGE_ACCOUNT_KEY"] = "testkey" + os.environ["AZURE_STORAGE_CONTAINER_NAME"] = "test-images" + os.environ["AZURE_SAS_TOKEN_EXPIRY_HOURS"] = "1" + + with TestClient(app) as test_client: + yield test_client + + # Clean up environment variables after tests + del os.environ["AZURE_STORAGE_ACCOUNT_NAME"] + del os.environ["AZURE_STORAGE_ACCOUNT_KEY"] + del os.environ["AZURE_STORAGE_CONTAINER_NAME"] + del os.environ["AZURE_SAS_TOKEN_EXPIRY_HOURS"] diff --git a/backend/notes_service/tests/integration/__init__.py b/backend/notes_service/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/notes_service/tests/integration/test_notes_api.py b/backend/notes_service/tests/integration/test_notes_api.py new file mode 100644 index 0000000..c47f733 --- /dev/null +++ b/backend/notes_service/tests/integration/test_notes_api.py @@ -0,0 +1,104 @@ +from fastapi.testclient import TestClient +from sqlalchemy.orm import Session + + +def test_read_root(client: TestClient): + response = client.get("/") + assert response.status_code == 200 + assert response.json() == {"message": "Welcome to the Notes Service!"} + + +def test_health_check(client: TestClient): + response = client.get("/health") + assert response.status_code == 200 + assert response.json() == {"status": "ok", "service": "notes-service"} + + +def test_create_note_success(client: TestClient, db_session_for_test: Session): + test_data = {"title": "Test Note", "content": "Test content", "user_id": 1} + response = client.post("/notes/", json=test_data) + + assert response.status_code == 201 + data = response.json() + assert data["title"] == test_data["title"] + assert data["content"] == test_data["content"] + assert data["user_id"] == test_data["user_id"] + assert "id" in data + assert "created_at" in data + + +def test_create_note_invalid_user_id(client: TestClient): + invalid_data = { + "title": "Invalid Note", + "content": "Content", + "user_id": -1, + } # Invalid user_id + response = client.post("/notes/", json=invalid_data) + assert response.status_code == 422 + + +def test_list_notes_empty(client: TestClient): + response = client.get("/notes/?user_id=999") + assert response.status_code == 200 + assert response.json() == [] + + +def test_list_notes_with_data(client: TestClient, db_session_for_test: Session): + # Create note + note_data = {"title": "List Test", "content": "Content", "user_id": 1} + client.post("/notes/", json=note_data) + + # List notes + response = client.get("/notes/?user_id=1") + assert response.status_code == 200 + assert len(response.json()) >= 1 + assert any(n["title"] == "List Test" for n in response.json()) + + +def test_get_note_success(client: TestClient, db_session_for_test: Session): + # Create note + create_response = client.post( + "/notes/", json={"title": "Get Test", "content": "Content", "user_id": 1} + ) + note_id = create_response.json()["id"] + + # Get note + response = client.get(f"/notes/{note_id}") + assert response.status_code == 200 + assert response.json()["id"] == note_id + + +def test_get_note_not_found(client: TestClient): + response = client.get("/notes/99999") + assert response.status_code == 404 + + +def test_update_note_partial(client: TestClient, db_session_for_test: Session): + # Create note + create_resp = client.post( + "/notes/", + json={"title": "Original", "content": "Original content", "user_id": 1}, + ) + note_id = create_resp.json()["id"] + + # Update + update_data = {"title": "Updated Title"} + response = client.put(f"/notes/{note_id}", json=update_data) + assert response.status_code == 200 + assert response.json()["title"] == "Updated Title" + + +def test_delete_note_success(client: TestClient, db_session_for_test: Session): + # Create note + create_resp = client.post( + "/notes/", json={"title": "Delete Me", "content": "Content", "user_id": 1} + ) + note_id = create_resp.json()["id"] + + # Delete + response = client.delete(f"/notes/{note_id}") + assert response.status_code == 204 + + # Verify deletion + get_response = client.get(f"/notes/{note_id}") + assert get_response.status_code == 404 diff --git a/backend/notes_service/tests/unit/__init__.py b/backend/notes_service/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/notes_service/tests/unit/test_models.py b/backend/notes_service/tests/unit/test_models.py new file mode 100644 index 0000000..1408d36 --- /dev/null +++ b/backend/notes_service/tests/unit/test_models.py @@ -0,0 +1,8 @@ +from app.models import Note + + +def test_note_repr(): + note = Note(id=1, title="Test", content="Content", user_id=1) + repr_str = repr(note) + assert "Note" in repr_str + assert "id=1" in repr_str diff --git a/backend/notes_service/tests/unit/test_schemas.py b/backend/notes_service/tests/unit/test_schemas.py new file mode 100644 index 0000000..46c8281 --- /dev/null +++ b/backend/notes_service/tests/unit/test_schemas.py @@ -0,0 +1,25 @@ +import pytest +from pydantic import ValidationError +from app.schemas import NoteCreate, NoteUpdate + + +def test_note_create_valid(): + note = NoteCreate(title="Test", content="Content", user_id=1) + assert note.title == "Test" + assert note.user_id == 1 + + +def test_note_create_invalid_user_id(): + with pytest.raises(ValidationError): + NoteCreate(title="Test", content="Content", user_id=-1) + + +def test_note_create_empty_title(): + with pytest.raises(ValidationError): + NoteCreate(title="", content="Content", user_id=1) + + +def test_note_update_partial(): + update = NoteUpdate(title="New Title") + assert update.title == "New Title" + assert update.content is None diff --git a/backend/users_service/.pylintrc b/backend/users_service/.pylintrc new file mode 100644 index 0000000..1f402fc --- /dev/null +++ b/backend/users_service/.pylintrc @@ -0,0 +1,2 @@ +[MESSAGES CONTROL] +disable=logging-fstring-interpolation \ No newline at end of file diff --git a/backend/users_service/Dockerfile b/backend/users_service/Dockerfile new file mode 100644 index 0000000..aa7c4f3 --- /dev/null +++ b/backend/users_service/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.10-slim-buster + +WORKDIR /code + +# Copy requirements and install +COPY requirements.txt . + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r requirements.txt + +# Copy application code from app to /code/app +COPY app /code/app + +EXPOSE 8000 + +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/backend/users_service/app/__init__.py b/backend/users_service/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/users_service/app/db.py b/backend/users_service/app/db.py new file mode 100644 index 0000000..cce2c59 --- /dev/null +++ b/backend/users_service/app/db.py @@ -0,0 +1,31 @@ +import os + +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + + +POSTGRES_USER = os.getenv("POSTGRES_USER", "postgres") +POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD", "postgres") +POSTGRES_DB = os.getenv("POSTGRES_DB", "users") +POSTGRES_HOST = os.getenv("POSTGRES_HOST", "localhost") +POSTGRES_PORT = os.getenv("POSTGRES_PORT", "5432") + +DATABASE_URL = ( + "postgresql://" + f"{POSTGRES_USER}:{POSTGRES_PASSWORD}@" + f"{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}" +) + +# --- SQLAlchemy Engine and Session Setup --- +engine = create_engine(DATABASE_URL) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) +Base = declarative_base() + + +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() diff --git a/backend/users_service/app/main.py b/backend/users_service/app/main.py new file mode 100644 index 0000000..b212e4c --- /dev/null +++ b/backend/users_service/app/main.py @@ -0,0 +1,196 @@ +""" +Users Service API + +FastAPI application for user authentication and management +""" + +import logging +import sys +import time +from typing import List + +from fastapi import ( + Depends, + FastAPI, + HTTPException, + Query, + status, +) +from fastapi.middleware.cors import CORSMiddleware +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import Session + +from .db import Base, engine, get_db +from .models import User +from .schemas import UserCreate, UserResponse + +# --- Logging Configuration --- +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + handlers=[logging.StreamHandler(sys.stdout)], +) +logger = logging.getLogger(__name__) + +# Suppress noisy logs from third-party libraries for cleaner output +logging.getLogger("uvicorn.access").setLevel(logging.WARNING) +logging.getLogger("uvicorn.error").setLevel(logging.INFO) + +# --- FastAPI Application Setup --- +app = FastAPI( + title="Users Service API", + description="Manages users for multi-user note-taking application", + version="1.0.0", +) + +# Enable CORS +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +# --- Startup Event --- +@app.on_event("startup") +async def startup_event(): + max_retries = 10 + retry_delay_seconds = 5 + for i in range(max_retries): + try: + logger.info( + f"Users Service: Attempting to connect to PostgreSQL and create tables (attempt {i+1}/{max_retries})..." + ) + Base.metadata.create_all(bind=engine) + logger.info( + "Users Service: Successfully connected to PostgreSQL and ensured tables exist." + ) + break # Exit loop if successful + except OperationalError as e: + logger.warning(f"Users Service: Failed to connect to PostgreSQL: {e}") + if i < max_retries - 1: + logger.info( + f"Users Service: Retrying in {retry_delay_seconds} seconds..." + ) + time.sleep(retry_delay_seconds) + else: + logger.critical( + f"Users Service: Failed to connect to PostgreSQL after {max_retries} attempts. Exiting application." + ) + sys.exit(1) # Critical failure: exit if DB connection is unavailable + except Exception as e: + logger.critical( + f"Users Service: An unexpected error occurred during database startup: {e}", + exc_info=True, + ) + sys.exit(1) + + +# --- Root Endpoint --- +@app.get("/", status_code=status.HTTP_200_OK, summary="Root endpoint") +async def read_root(): + return {"message": "Welcome to the Users Service!"} + + +# --- Health Check Endpoint --- +@app.get("/health", status_code=status.HTTP_200_OK, summary="Health check") +async def health_check(): + return {"status": "ok", "service": "users-service"} + + +# --- CRUD Endpoints --- +# Create new user (Register) +# [POST] http://localhost:8001/users/ +""" +{ + "username": "johndoe", + "email": "john@example.com" +} +""" + + +@app.post( + "/users/", + response_model=UserResponse, + status_code=status.HTTP_201_CREATED, + summary="Register a new user", +) +async def create_user(user: UserCreate, db: Session = Depends(get_db)): + """Register a new user.""" + logger.info(f"Users Service: Creating user: {user.username}") + + # Check if username exists + existing_user = db.query(User).filter(User.username == user.username).first() + if existing_user: + logger.warning(f"Users Service: Username {user.username} already exists") + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, detail="Username already exists" + ) + + # Check if email exists + existing_email = db.query(User).filter(User.email == user.email).first() + if existing_email: + logger.warning(f"Users Service: Email {user.email} already exists") + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, detail="Email already exists" + ) + + try: + db_user = User(username=user.username, email=user.email) + db.add(db_user) + db.commit() + db.refresh(db_user) + logger.info( + f"Users Service: User '{db_user.username}' (ID: {db_user.id}) created successfully." + ) + return db_user + except Exception as e: + db.rollback() + logger.error(f"Users Service: Error creating user: {e}", exc_info=True) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Could not create user.", + ) + + +# Get user by ID +# [GET] http://localhost:8001/users/{user_id} +@app.get( + "/users/{user_id}", + response_model=UserResponse, + summary="Get a single user by ID", +) +def get_user(user_id: int, db: Session = Depends(get_db)): + """Retrieve a specific user by ID.""" + logger.info(f"Users Service: Fetching user with ID: {user_id}") + user = db.query(User).filter(User.id == user_id).first() + + if not user: + logger.warning(f"Users Service: User with ID {user_id} not found.") + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="User not found" + ) + + logger.info(f"Users Service: Retrieved user with ID {user_id}") + return user + + +# Get all users +# [GET] http://localhost:8001/users/ +@app.get( + "/users/", + response_model=List[UserResponse], + summary="Get all users", +) +def list_users( + db: Session = Depends(get_db), + skip: int = Query(0, ge=0), + limit: int = Query(100, ge=1, le=100), +): + """Retrieve all users.""" + logger.info(f"Users Service: Listing users with skip={skip}, limit={limit}") + users = db.query(User).offset(skip).limit(limit).all() + logger.info(f"Users Service: Retrieved {len(users)} users") + return users diff --git a/backend/users_service/app/models.py b/backend/users_service/app/models.py new file mode 100644 index 0000000..6a1031e --- /dev/null +++ b/backend/users_service/app/models.py @@ -0,0 +1,21 @@ +"""SQLAlchemy models.""" + +from sqlalchemy import Column, DateTime, Integer, String +from sqlalchemy.sql import func +from .db import Base + + +class User(Base): # pylint: disable=too-few-public-methods + """User model.""" + + __tablename__ = "users" + + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + username = Column(String(50), unique=True, nullable=False, index=True) + email = Column(String(255), unique=True, nullable=False) + created_at = Column( + DateTime(timezone=True), server_default=func.now() + ) # pylint: disable=not-callable + + def __repr__(self): + return f"" diff --git a/backend/users_service/app/schemas.py b/backend/users_service/app/schemas.py new file mode 100644 index 0000000..f9c87bd --- /dev/null +++ b/backend/users_service/app/schemas.py @@ -0,0 +1,22 @@ +"""Pydantic schemas.""" + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, EmailStr, Field + + +class UserCreate(BaseModel): + """Schema for creating user.""" + + username: str = Field(..., min_length=3, max_length=50) + email: EmailStr + + +class UserResponse(BaseModel): + """Schema for user response.""" + + id: int + username: str + email: str + created_at: datetime + + model_config = ConfigDict(from_attributes=True) diff --git a/backend/users_service/requirements-dev.txt b/backend/users_service/requirements-dev.txt new file mode 100644 index 0000000..54aac29 --- /dev/null +++ b/backend/users_service/requirements-dev.txt @@ -0,0 +1,19 @@ +fastapi +uvicorn +sqlalchemy +psycopg2-binary +python-multipart +pydantic +azure-storage-blob +aio-pika +pydantic[email] + +# Testing and coverage report +pytest +pytest-cov +httpx + +# Code quality +black # Linting & format code +pylint # Code quality +bandit # Security linting \ No newline at end of file diff --git a/backend/users_service/requirements.txt b/backend/users_service/requirements.txt new file mode 100644 index 0000000..b015fc7 --- /dev/null +++ b/backend/users_service/requirements.txt @@ -0,0 +1,10 @@ +fastapi +uvicorn +sqlalchemy +psycopg2-binary +python-multipart +pydantic +azure-storage-blob +aio-pika +pydantic[email] +setuptools>=78.1.1 \ No newline at end of file diff --git a/backend/users_service/tests/__init__.py b/backend/users_service/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/users_service/tests/conftest.py b/backend/users_service/tests/conftest.py new file mode 100644 index 0000000..bebbd03 --- /dev/null +++ b/backend/users_service/tests/conftest.py @@ -0,0 +1,101 @@ +import logging +import os +import time +import pytest +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import Session +from fastapi.testclient import TestClient + +from app.main import app +from app.db import Base, engine, SessionLocal, get_db +from app.models import User + +# Suppress noisy logs from SQLAlchemy/FastAPI during tests for cleaner output +logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING) +logging.getLogger("uvicorn.access").setLevel(logging.WARNING) +logging.getLogger("uvicorn.error").setLevel(logging.WARNING) +logging.getLogger("fastapi").setLevel(logging.WARNING) +logging.getLogger("app.main").setLevel(logging.WARNING) + + +@pytest.fixture(scope="session", autouse=True) +def setup_database_for_tests(): + """Set up test database with retry logic""" + max_retries = 10 + retry_delay_seconds = 3 + + for i in range(max_retries): + try: + logging.info( + f"Users Service Tests: Attempting to connect to PostgreSQL for test setup (attempt {i+1}/{max_retries})..." + ) + + # Explicitly drop all tables first to ensure a clean slate for the session + Base.metadata.drop_all(bind=engine) + logging.info( + "Users Service Tests: Successfully dropped all tables in PostgreSQL for test setup." + ) + + # Then create all tables required by the application + Base.metadata.create_all(bind=engine) + logging.info( + "Users Service Tests: Successfully created all tables in PostgreSQL for test setup." + ) + break + except OperationalError as e: + logging.warning( + f"Users Service Tests: Test setup DB connection failed: {e}. Retrying in {retry_delay_seconds} seconds..." + ) + time.sleep(retry_delay_seconds) + if i == max_retries - 1: + pytest.fail( + f"Could not connect to PostgreSQL for Product Service test setup after {max_retries} attempts: {e}" + ) + except Exception as e: + pytest.fail( + f"Users Service Tests: An unexpected error occurred during test DB setup: {e}", + pytrace=True, + ) + yield + + +@pytest.fixture(scope="function") +def db_session_for_test(): + """Provide isolated database session for each test""" + connection = engine.connect() + transaction = connection.begin() + db = SessionLocal(bind=connection) + + def override_get_db(): + yield db + + app.dependency_overrides[get_db] = override_get_db + + try: + yield db + finally: + transaction.rollback() + db.close() + connection.close() + app.dependency_overrides.pop(get_db, None) + + +@pytest.fixture(scope="module") +def client(): + """ + Provides a TestClient for making HTTP requests to the FastAPI application. + The TestClient automatically manages the app's lifespan events (startup/shutdown). + """ + os.environ["AZURE_STORAGE_ACCOUNT_NAME"] = "testaccount" + os.environ["AZURE_STORAGE_ACCOUNT_KEY"] = "testkey" + os.environ["AZURE_STORAGE_CONTAINER_NAME"] = "test-images" + os.environ["AZURE_SAS_TOKEN_EXPIRY_HOURS"] = "1" + + with TestClient(app) as test_client: + yield test_client + + # Clean up environment variables after tests + del os.environ["AZURE_STORAGE_ACCOUNT_NAME"] + del os.environ["AZURE_STORAGE_ACCOUNT_KEY"] + del os.environ["AZURE_STORAGE_CONTAINER_NAME"] + del os.environ["AZURE_SAS_TOKEN_EXPIRY_HOURS"] diff --git a/backend/users_service/tests/integration/__init__.py b/backend/users_service/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/users_service/tests/integration/test_users_api.py b/backend/users_service/tests/integration/test_users_api.py new file mode 100644 index 0000000..742b639 --- /dev/null +++ b/backend/users_service/tests/integration/test_users_api.py @@ -0,0 +1,126 @@ +"""Integration tests for Users Service API.""" + +from fastapi.testclient import TestClient +from sqlalchemy.orm import Session +from app.models import User + + +def test_read_root(client: TestClient): + """Test root endpoint.""" + response = client.get("/") + assert response.status_code == 200 + assert response.json() == {"message": "Welcome to the Users Service!"} + + +def test_health_check(client: TestClient): + """Test health check endpoint.""" + response = client.get("/health") + assert response.status_code == 200 + assert response.json() == {"status": "ok", "service": "users-service"} + + +def test_create_user_success(client: TestClient, db_session_for_test: Session): + """Test successful user creation.""" + test_data = {"username": "johndoe", "email": "john@example.com"} + response = client.post("/users/", json=test_data) + + assert response.status_code == 201 + data = response.json() + assert data["username"] == test_data["username"] + assert data["email"] == test_data["email"] + assert "id" in data + assert "created_at" in data + + # Verify in database + db_user = db_session_for_test.query(User).filter(User.id == data["id"]).first() + assert db_user is not None + assert db_user.username == test_data["username"] + + +def test_create_user_duplicate_username( + client: TestClient, db_session_for_test: Session +): + """Test creating user with duplicate username.""" + test_data = {"username": "duplicate", "email": "user1@example.com"} + + # Create first user + response1 = client.post("/users/", json=test_data) + assert response1.status_code == 201 + + # Try to create second user with same username + test_data2 = {"username": "duplicate", "email": "user2@example.com"} + response2 = client.post("/users/", json=test_data2) + assert response2.status_code == 409 + assert "Username already exists" in response2.json()["detail"] + + +def test_create_user_duplicate_email(client: TestClient, db_session_for_test: Session): + """Test creating user with duplicate email.""" + test_data = {"username": "user1", "email": "duplicate@example.com"} + + # Create first user + response1 = client.post("/users/", json=test_data) + assert response1.status_code == 201 + + # Try to create second user with same email + test_data2 = {"username": "user2", "email": "duplicate@example.com"} + response2 = client.post("/users/", json=test_data2) + assert response2.status_code == 409 + assert "Email already exists" in response2.json()["detail"] + + +def test_create_user_invalid_email(client: TestClient): + """Test creating user with invalid email format.""" + invalid_data = {"username": "testuser", "email": "invalid-email"} + response = client.post("/users/", json=invalid_data) + assert response.status_code == 422 + + +def test_create_user_short_username(client: TestClient): + """Test creating user with too short username.""" + invalid_data = {"username": "ab", "email": "test@example.com"} + response = client.post("/users/", json=invalid_data) + assert response.status_code == 422 + + +def test_get_user_success(client: TestClient, db_session_for_test: Session): + """Test getting user by ID.""" + # Create user first + create_response = client.post( + "/users/", json={"username": "gettest", "email": "get@example.com"} + ) + user_id = create_response.json()["id"] + + # Get user + response = client.get(f"/users/{user_id}") + assert response.status_code == 200 + assert response.json()["id"] == user_id + assert response.json()["username"] == "gettest" + + +def test_get_user_not_found(client: TestClient): + """Test getting non-existent user.""" + response = client.get("/users/99999") + assert response.status_code == 404 + assert "User not found" in response.json()["detail"] + + +def test_list_users_empty(client: TestClient, db_session_for_test: Session): + """Test listing users when database is empty.""" + response = client.get("/users/") + assert response.status_code == 200 + # Note: may have users from other tests, so just check it's a list + assert isinstance(response.json(), list) + + +def test_list_users_with_data(client: TestClient, db_session_for_test: Session): + """Test listing users with data.""" + # Create users + client.post("/users/", json={"username": "user1", "email": "user1@example.com"}) + client.post("/users/", json={"username": "user2", "email": "user2@example.com"}) + + # List users + response = client.get("/users/") + assert response.status_code == 200 + assert isinstance(response.json(), list) + assert len(response.json()) >= 2 diff --git a/backend/users_service/tests/unit/__init__.py b/backend/users_service/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/backend/users_service/tests/unit/test_models.py b/backend/users_service/tests/unit/test_models.py new file mode 100644 index 0000000..b99eca2 --- /dev/null +++ b/backend/users_service/tests/unit/test_models.py @@ -0,0 +1,12 @@ +"""Unit tests for SQLAlchemy models.""" + +from app.models import User + + +def test_user_repr(): + """Test user model string representation.""" + user = User(id=1, username="testuser", email="test@example.com") + repr_str = repr(user) + assert "User" in repr_str + assert "id=1" in repr_str + assert "testuser" in repr_str diff --git a/backend/users_service/tests/unit/test_schemas.py b/backend/users_service/tests/unit/test_schemas.py new file mode 100644 index 0000000..caf4a74 --- /dev/null +++ b/backend/users_service/tests/unit/test_schemas.py @@ -0,0 +1,36 @@ +"""Unit tests for Pydantic schemas.""" + +import pytest +from pydantic import ValidationError +from app.schemas import UserCreate + + +def test_user_create_valid(): + """Test valid user creation schema.""" + user = UserCreate(username="testuser", email="test@example.com") + assert user.username == "testuser" + assert user.email == "test@example.com" + + +def test_user_create_invalid_email(): + """Test user creation with invalid email.""" + with pytest.raises(ValidationError): + UserCreate(username="testuser", email="invalid-email") + + +def test_user_create_short_username(): + """Test user creation with username too short.""" + with pytest.raises(ValidationError): + UserCreate(username="ab", email="test@example.com") + + +def test_user_create_long_username(): + """Test user creation with username too long.""" + with pytest.raises(ValidationError): + UserCreate(username="a" * 51, email="test@example.com") + + +def test_user_create_empty_username(): + """Test user creation with empty username.""" + with pytest.raises(ValidationError): + UserCreate(username="", email="test@example.com") diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..c3b15eb --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,73 @@ +version: '3.8' + +services: + postgres-notes: + image: postgres:15-alpine + container_name: postgres-notes + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=notes + ports: + - "5532:5432" + volumes: + - notes_db_data:/var/lib/postgresql/data + + notes-service: + build: ./backend/notes_service + ports: + - "5001:8000" + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=notes + - POSTGRES_HOST=postgres-notes + - POSTGRES_PORT=5432 + depends_on: + - postgres-notes + command: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload + volumes: + - ./backend/notes_service/app:/code/app + + postgres-users: + image: postgres:15-alpine + container_name: postgres-users + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=users + ports: + - "5533:5432" # Different host port to avoid conflict + volumes: + - users_db_data:/var/lib/postgresql/data + + users-service: + build: ./backend/users_service + ports: + - "5000:8000" + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=users + - POSTGRES_HOST=postgres-users + - POSTGRES_PORT=5432 + depends_on: + - postgres-users + command: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload + volumes: + - ./backend/users_service/app:/code/app + + frontend: + build: ./frontend + container_name: frontend + ports: + - "3000:80" + depends_on: + - notes-service + - users-service + + +# Persistent Volume +volumes: + notes_db_data: + users_db_data: \ No newline at end of file diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..185167c --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,11 @@ +FROM nginx:alpine + +RUN apk update && apk upgrade + +COPY nginx.conf /etc/nginx/conf.d/default.conf + +COPY . /usr/share/nginx/html + +EXPOSE 80 + +CMD ["nginx", "-g", "daemon off;"] diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..f7fcbb0 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,308 @@ + + + + + + Notes Application + + + + +
+

Notes Application

+

+ Multi-user note-taking platform with Notes Service and Users Service +

+ +
+ + +
+

User Management

+ +

Register New User

+
+
+ + +
+
+ + +
+ +
+ +

All Users

+
+

Loading users...

+
+
+ + +
+

Notes Management

+ +

Create New Note

+
+
+ + +
+
+ + +
+
+ + +
+ +
+ +

Filter Notes

+
+
+ + +
+ + +
+ +

All Notes

+
+

Loading notes...

+
+
+
+ + + + + \ No newline at end of file diff --git a/frontend/main.js b/frontend/main.js new file mode 100644 index 0000000..e54430c --- /dev/null +++ b/frontend/main.js @@ -0,0 +1,271 @@ +document.addEventListener('DOMContentLoaded', () => { + // API endpoints - these will be replaced during deployment + // const USERS_API_BASE_URL = '_USERS_API_URL_'; + // const NOTES_API_BASE_URL = '_NOTES_API_URL_'; + const USERS_API_BASE_URL = 'http://localhost:5000'; + const NOTES_API_BASE_URL = 'http://localhost:5001'; + + // DOM Elements + const messageBox = document.getElementById('message-box'); + const userForm = document.getElementById('user-form'); + const userListDiv = document.getElementById('user-list'); + const noteForm = document.getElementById('note-form'); + const noteListDiv = document.getElementById('note-list'); + const filterBtn = document.getElementById('filter-btn'); + const clearFilterBtn = document.getElementById('clear-filter-btn'); + const editModal = document.getElementById('edit-modal'); + const editNoteForm = document.getElementById('edit-note-form'); + const cancelEditBtn = document.getElementById('cancel-edit-btn'); + + let currentEditNoteId = null; + let currentFilter = null; + + // --- Utility Functions --- + function showMessage(message, type = 'info') { + messageBox.textContent = message; + messageBox.className = `message-box ${type}`; + messageBox.style.display = 'block'; + setTimeout(() => { + messageBox.style.display = 'none'; + }, 5000); + } + + // --- User Service Interactions --- + async function fetchUsers() { + userListDiv.innerHTML = '

Loading users...

'; + try { + const response = await fetch(`${USERS_API_BASE_URL}/users/`); + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.detail || `HTTP error! status: ${response.status}`); + } + const users = await response.json(); + + userListDiv.innerHTML = ''; + + if (users.length === 0) { + userListDiv.innerHTML = '

No users registered yet.

'; + return; + } + + users.forEach(user => { + const userCard = document.createElement('div'); + userCard.className = 'user-card'; + userCard.innerHTML = ` +

${user.username} (ID: ${user.id})

+

Email: ${user.email}

+

Created: ${new Date(user.created_at).toLocaleString()}

+ `; + userListDiv.appendChild(userCard); + }); + } catch (error) { + console.error('Error fetching users:', error); + showMessage(`Failed to load users: ${error.message}`, 'error'); + userListDiv.innerHTML = '

Could not load users.

'; + } + } + + userForm.addEventListener('submit', async (event) => { + event.preventDefault(); + + const username = document.getElementById('user-username').value; + const email = document.getElementById('user-email').value; + + try { + const response = await fetch(`${USERS_API_BASE_URL}/users/`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ username, email }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.detail || `HTTP error! status: ${response.status}`); + } + + const newUser = await response.json(); + showMessage(`User "${newUser.username}" registered successfully! ID: ${newUser.id}`, 'success'); + userForm.reset(); + fetchUsers(); + } catch (error) { + console.error('Error registering user:', error); + showMessage(`Error: ${error.message}`, 'error'); + } + }); + + // --- Notes Service Interactions --- + async function fetchNotes(userId = null) { + noteListDiv.innerHTML = '

Loading notes...

'; + try { + let url = `${NOTES_API_BASE_URL}/notes/`; + if (userId) { + url += `?user_id=${userId}`; + } else { + url += `?user_id=0`; + } + + const response = await fetch(url); + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.detail || `HTTP error! status: ${response.status}`); + } + const notes = await response.json(); + + noteListDiv.innerHTML = ''; + + if (notes.length === 0) { + noteListDiv.innerHTML = '

No notes found.

'; + return; + } + + notes.forEach(note => { + const noteCard = document.createElement('div'); + noteCard.className = 'note-card'; + noteCard.innerHTML = ` +

${note.title}

+

User ID: ${note.user_id} | Note ID: ${note.id}

+
${note.content}
+

Created: ${new Date(note.created_at).toLocaleString()}

+ ${note.updated_at ? `

Updated: ${new Date(note.updated_at).toLocaleString()}

` : ''} +
+ + +
+ `; + noteListDiv.appendChild(noteCard); + }); + } catch (error) { + console.error('Error fetching notes:', error); + showMessage(`Failed to load notes: ${error.message}`, 'error'); + noteListDiv.innerHTML = '

Could not load notes.

'; + } + } + + noteForm.addEventListener('submit', async (event) => { + event.preventDefault(); + + const user_id = parseInt(document.getElementById('note-user-id').value); + const title = document.getElementById('note-title').value; + const content = document.getElementById('note-content').value; + + try { + const response = await fetch(`${NOTES_API_BASE_URL}/notes/`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ user_id, title, content }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.detail || `HTTP error! status: ${response.status}`); + } + + const newNote = await response.json(); + showMessage(`Note "${newNote.title}" created successfully!`, 'success'); + noteForm.reset(); + fetchNotes(currentFilter); + } catch (error) { + console.error('Error creating note:', error); + showMessage(`Error: ${error.message}`, 'error'); + } + }); + + // Filter functionality + filterBtn.addEventListener('click', () => { + const userId = document.getElementById('filter-user-id').value; + currentFilter = userId ? parseInt(userId) : null; + fetchNotes(currentFilter); + }); + + clearFilterBtn.addEventListener('click', () => { + document.getElementById('filter-user-id').value = ''; + currentFilter = null; + fetchNotes(); + }); + + // Edit and Delete handlers + noteListDiv.addEventListener('click', async (event) => { + // Delete Note + if (event.target.classList.contains('delete-btn')) { + const noteId = event.target.dataset.id; + if (!confirm(`Delete note ID: ${noteId}?`)) return; + + try { + const response = await fetch(`${NOTES_API_BASE_URL}/notes/${noteId}`, { + method: 'DELETE', + }); + + if (response.status === 204) { + showMessage(`Note deleted successfully`, 'success'); + fetchNotes(currentFilter); + } else { + const errorData = await response.json(); + throw new Error(errorData.detail || 'Delete failed'); + } + } catch (error) { + console.error('Error deleting note:', error); + showMessage(`Error: ${error.message}`, 'error'); + } + } + + // Edit Note + if (event.target.classList.contains('edit-btn')) { + const noteId = event.target.dataset.id; + + try { + const response = await fetch(`${NOTES_API_BASE_URL}/notes/${noteId}`); + if (!response.ok) throw new Error('Failed to fetch note'); + + const note = await response.json(); + currentEditNoteId = noteId; + document.getElementById('edit-note-title').value = note.title; + document.getElementById('edit-note-content').value = note.content; + editModal.style.display = 'block'; + } catch (error) { + console.error('Error loading note for edit:', error); + showMessage(`Error: ${error.message}`, 'error'); + } + } + }); + + // Edit form submission + editNoteForm.addEventListener('submit', async (event) => { + event.preventDefault(); + + const title = document.getElementById('edit-note-title').value; + const content = document.getElementById('edit-note-content').value; + + try { + const response = await fetch(`${NOTES_API_BASE_URL}/notes/${currentEditNoteId}`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ title, content }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.detail || 'Update failed'); + } + + showMessage('Note updated successfully!', 'success'); + editModal.style.display = 'none'; + fetchNotes(currentFilter); + } catch (error) { + console.error('Error updating note:', error); + showMessage(`Error: ${error.message}`, 'error'); + } + }); + + cancelEditBtn.addEventListener('click', () => { + editModal.style.display = 'none'; + }); + + // Initial load + fetchUsers(); + fetchNotes(); + + // Auto-refresh every 15 seconds + setInterval(() => { + fetchNotes(currentFilter); + }, 15000); +}); \ No newline at end of file diff --git a/frontend/nginx.conf b/frontend/nginx.conf new file mode 100644 index 0000000..daf1da9 --- /dev/null +++ b/frontend/nginx.conf @@ -0,0 +1,12 @@ +server { + listen 80; + server_name localhost; # Can be an IP address or hostname + + root /usr/share/nginx/html; + + index index.html index.html; + + location / { + try_files $uri $uri/ =404; + } +} diff --git a/infrastructure/production/.terraform.lock.hcl b/infrastructure/production/.terraform.lock.hcl new file mode 100644 index 0000000..b5bb53a --- /dev/null +++ b/infrastructure/production/.terraform.lock.hcl @@ -0,0 +1,37 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/azurerm" { + version = "3.117.1" + constraints = "~> 3.0" + hashes = [ + "h1:OXBPoQpiwe519GeBfkmbfsDXO020v706RmWTYSuuUCE=", + "zh:1fedd2521c8ced1fbebd5d70fda376d42393cac5cc25c043c390b44d630d9e37", + "zh:634c16442fd8aaed6c3bccd0069f4a01399b141d2a993d85997e6a03f9f867cf", + "zh:637ae3787f87506e5b673f44a1b0f33cf75d7fa9c5353df6a2584488fc3d4328", + "zh:7c7741f66ff5b05051db4b6c3d9bad68c829f9e920a7f1debdca0ab8e50836a3", + "zh:9b454fa0b6c821db2c6a71e591a467a5b4802129509710b56f01ae7106058d86", + "zh:bb820ff92b4a77e9d70999ae30758d408728c6e782b4e1c8c4b6d53b8c3c8ff9", + "zh:d38cd7d5f99398fb96672cb27943b96ea2b7008f26d379a69e1c6c2f25051869", + "zh:d56f5a132181ab14e6be332996753cc11c0d3b1cfdd1a1b44ef484c67e38cc91", + "zh:d8a1e7cf218f46e6d0bd878ff70f92db7e800a15f01e96189a24864d10cde33b", + "zh:f67cf6d14d859a1d2a1dc615941a1740a14cb3f4ee2a34da672ff6729d81fa81", + ] +} + +provider "registry.opentofu.org/hashicorp/kubernetes" { + version = "2.38.0" + constraints = "~> 2.23" + hashes = [ + "h1:HGkB9bCmUqMRcR5/bAUOSqPBsx6DAIEnbT1fZ8vzI78=", + "zh:1096b41c4e5b2ee6c1980916fb9a8579bc1892071396f7a9432be058aabf3cbc", + "zh:2959fde9ae3d1deb5e317df0d7b02ea4977951ee6b9c4beb083c148ca8f3681c", + "zh:5082f98fcb3389c73339365f7df39fc6912bf2bd1a46d5f97778f441a67fd337", + "zh:620fd5d0fbc2d7a24ac6b420a4922e6093020358162a62fa8cbd37b2bac1d22e", + "zh:7f47c2de179bba35d759147c53082cad6c3449d19b0ec0c5a4ca8db5b06393e1", + "zh:89c3aa2a87e29febf100fd21cead34f9a4c0e6e7ae5f383b5cef815c677eb52a", + "zh:96eecc9f94938a0bc35b8a63d2c4a5f972395e44206620db06760b730d0471fc", + "zh:e15567c1095f898af173c281b66bffdc4f3068afdd9f84bb5b5b5521d9f29584", + "zh:ecc6b912629734a9a41a7cf1c4c73fb13b4b510afc9e7b2e0011d290bcd6d77f", + ] +} diff --git a/infrastructure/production/container_registry.tf b/infrastructure/production/container_registry.tf new file mode 100644 index 0000000..9fdd0ca --- /dev/null +++ b/infrastructure/production/container_registry.tf @@ -0,0 +1,7 @@ +# infrastructure/production/container_registry.tf + +# Reference the shared ACR from the shared resource group +data "azurerm_container_registry" "shared_acr" { + name = "${var.prefix}acr" + resource_group_name = "${var.prefix}-shared-rg" +} diff --git a/infrastructure/production/kubernetes_cluster.tf b/infrastructure/production/kubernetes_cluster.tf new file mode 100644 index 0000000..5f17b67 --- /dev/null +++ b/infrastructure/production/kubernetes_cluster.tf @@ -0,0 +1,59 @@ +# infrastructure/production/kubernetes_cluster.tf + +resource "azurerm_kubernetes_cluster" "production_aks" { + name = "${var.prefix}-${var.environment}-aks" + location = var.location + resource_group_name = azurerm_resource_group.production_rg.name + dns_prefix = "${var.prefix}-${var.environment}" + kubernetes_version = var.kubernetes_version + + default_node_pool { + name = "default" + node_count = var.node_count + vm_size = var.node_vm_size + + # Enable auto-scaling for cost optimization (optional for cost optimization) + # enable_auto_scaling = true + # min_count = 1 + # max_count = 3 + } + + # Use a system‐assigned managed identity + identity { + type = "SystemAssigned" + } + + tags = { + Environment = var.environment + ManagedBy = "Terraform" + GitSHA = var.git_sha + } + + # Uncomment if enabling auto-scaling above + # lifecycle { + # ignore_changes = [ + # default_node_pool[0].node_count + # ] + # } +} + +# Grant AKS permission to pull images from ACR +resource "azurerm_role_assignment" "aks_acr_pull" { + principal_id = azurerm_kubernetes_cluster.production_aks.kubelet_identity[0].object_id + role_definition_name = "AcrPull" + scope = data.azurerm_container_registry.shared_acr.id + skip_service_principal_aad_check = true +} + +# Create production namespace +resource "kubernetes_namespace" "production" { + metadata { + name = var.environment + labels = { + environment = var.environment + managed-by = "terraform" + } + } + + depends_on = [azurerm_kubernetes_cluster.production_aks] +} \ No newline at end of file diff --git a/infrastructure/production/outputs.tf b/infrastructure/production/outputs.tf new file mode 100644 index 0000000..c8ee3a5 --- /dev/null +++ b/infrastructure/production/outputs.tf @@ -0,0 +1,27 @@ +# infrastructure/production/outputs.tf + +output "resource_group_name" { + description = "Resource group name" + value = azurerm_resource_group.production_rg.name +} + +output "aks_cluster_name" { + description = "AKS cluster name" + value = azurerm_kubernetes_cluster.production_aks.name +} + +output "aks_kube_config" { + description = "AKS kubeconfig" + value = azurerm_kubernetes_cluster.production_aks.kube_config_raw + sensitive = true +} + +output "acr_login_server" { + description = "ACR login server" + value = data.azurerm_container_registry.shared_acr.login_server +} + +output "git_sha" { + description = "Git commit SHA" + value = var.git_sha +} diff --git a/infrastructure/production/provider.tf b/infrastructure/production/provider.tf new file mode 100644 index 0000000..fba66f3 --- /dev/null +++ b/infrastructure/production/provider.tf @@ -0,0 +1,30 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>3.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.23" + } + } + required_version = ">= 1.1.0" +} + +provider "azurerm" { + # Protect production + features { + resource_group { + prevent_deletion_if_contains_resources = true + } + } +} + +# Configure Kubernetes provider for production AKS +provider "kubernetes" { + host = azurerm_kubernetes_cluster.production_aks.kube_config[0].host + client_certificate = base64decode(azurerm_kubernetes_cluster.production_aks.kube_config[0].client_certificate) + client_key = base64decode(azurerm_kubernetes_cluster.production_aks.kube_config[0].client_key) + cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.production_aks.kube_config[0].cluster_ca_certificate) +} \ No newline at end of file diff --git a/infrastructure/production/resource_group.tf b/infrastructure/production/resource_group.tf new file mode 100644 index 0000000..a9cf8b1 --- /dev/null +++ b/infrastructure/production/resource_group.tf @@ -0,0 +1,13 @@ +# infrastructure/production/resource_group.tf + +resource "azurerm_resource_group" "production_rg" { + name = "${var.prefix}-${var.environment}-rg" + location = var.location + + tags = { + Environment = var.environment + ManagedBy = "Terraform" + GitSHA = var.git_sha + Critical = "true" + } +} \ No newline at end of file diff --git a/infrastructure/production/variables.tf b/infrastructure/production/variables.tf new file mode 100644 index 0000000..f183a3e --- /dev/null +++ b/infrastructure/production/variables.tf @@ -0,0 +1,44 @@ +# Specify the environment +variable "environment" { + description = "Environment name" + type = string + default = "production" +} + +# Specify the prefix, ensuring all resources have unique naming +variable "prefix" { + description = "Prefix for all resource names" + type = string + default = "sit722alicestd" +} + +# Resource configuration variables +variable "location" { + description = "Azure region" + type = string + default = "australiaeast" +} + +variable "kubernetes_version" { + description = "Kubernetes version" + type = string + default = "1.31.7" +} + +variable "node_count" { + description = "Number of AKS nodes" + type = number + default = 1 +} + +variable "node_vm_size" { + description = "VM size for AKS nodes" + type = string + default = "Standard_D2s_v3" +} + +variable "git_sha" { + description = "Git commit SHA for tagging" + type = string + default = "manual" +} \ No newline at end of file diff --git a/infrastructure/shared/.terraform.lock.hcl b/infrastructure/shared/.terraform.lock.hcl new file mode 100644 index 0000000..9a7d68c --- /dev/null +++ b/infrastructure/shared/.terraform.lock.hcl @@ -0,0 +1,20 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/azurerm" { + version = "3.117.1" + constraints = "~> 3.0" + hashes = [ + "h1:OXBPoQpiwe519GeBfkmbfsDXO020v706RmWTYSuuUCE=", + "zh:1fedd2521c8ced1fbebd5d70fda376d42393cac5cc25c043c390b44d630d9e37", + "zh:634c16442fd8aaed6c3bccd0069f4a01399b141d2a993d85997e6a03f9f867cf", + "zh:637ae3787f87506e5b673f44a1b0f33cf75d7fa9c5353df6a2584488fc3d4328", + "zh:7c7741f66ff5b05051db4b6c3d9bad68c829f9e920a7f1debdca0ab8e50836a3", + "zh:9b454fa0b6c821db2c6a71e591a467a5b4802129509710b56f01ae7106058d86", + "zh:bb820ff92b4a77e9d70999ae30758d408728c6e782b4e1c8c4b6d53b8c3c8ff9", + "zh:d38cd7d5f99398fb96672cb27943b96ea2b7008f26d379a69e1c6c2f25051869", + "zh:d56f5a132181ab14e6be332996753cc11c0d3b1cfdd1a1b44ef484c67e38cc91", + "zh:d8a1e7cf218f46e6d0bd878ff70f92db7e800a15f01e96189a24864d10cde33b", + "zh:f67cf6d14d859a1d2a1dc615941a1740a14cb3f4ee2a34da672ff6729d81fa81", + ] +} diff --git a/infrastructure/shared/container_registry.tf b/infrastructure/shared/container_registry.tf new file mode 100644 index 0000000..0e99431 --- /dev/null +++ b/infrastructure/shared/container_registry.tf @@ -0,0 +1,14 @@ +# infrastructure/shared/container_registry.tf + +resource "azurerm_container_registry" "acr" { + name = "${var.prefix}acr" + resource_group_name = azurerm_resource_group.shared_rg.name + location = var.location + sku = "Basic" + admin_enabled = true + + tags = { + Environment = "Shared" + ManagedBy = "Terraform" + } +} diff --git a/infrastructure/shared/outputs.tf b/infrastructure/shared/outputs.tf new file mode 100644 index 0000000..e546246 --- /dev/null +++ b/infrastructure/shared/outputs.tf @@ -0,0 +1,38 @@ +# infrastructure/shared/outputs.tf + +output "resource_group_name" { + description = "Shared resource group name" + value = azurerm_resource_group.shared_rg.name +} + +output "acr_name" { + description = "Azure Container Registry name" + value = azurerm_container_registry.acr.name +} + +output "acr_login_server" { + description = "ACR login server" + value = azurerm_container_registry.acr.login_server +} + +output "acr_admin_username" { + description = "ACR admin username" + value = azurerm_container_registry.acr.admin_username + sensitive = true +} + +output "acr_admin_password" { + description = "ACR admin password" + value = azurerm_container_registry.acr.admin_password + sensitive = true +} + +# output "tfstate_storage_account_name" { +# description = "Storage account name for Terraform state" +# value = azurerm_storage_account.tfstate.name +# } + +# output "tfstate_container_name" { +# description = "Container name for Terraform state" +# value = azurerm_storage_container.tfstate.name +# } \ No newline at end of file diff --git a/infrastructure/shared/provider.tf b/infrastructure/shared/provider.tf new file mode 100644 index 0000000..7f028c3 --- /dev/null +++ b/infrastructure/shared/provider.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>3.0" + } + } + required_version = ">= 1.1.0" +} + +provider "azurerm" { + features {} +} \ No newline at end of file diff --git a/infrastructure/shared/resource_group.tf b/infrastructure/shared/resource_group.tf new file mode 100644 index 0000000..ccb2011 --- /dev/null +++ b/infrastructure/shared/resource_group.tf @@ -0,0 +1,12 @@ +# infrastructure/shared/resource_group.tf + +resource "azurerm_resource_group" "shared_rg" { + name = "${var.prefix}-shared-rg" + location = var.location + + tags = { + Environment = "Shared" + ManagedBy = "Terraform" + Purpose = "Shared resources across all environments" + } +} \ No newline at end of file diff --git a/infrastructure/shared/variables.tf b/infrastructure/shared/variables.tf new file mode 100644 index 0000000..238a038 --- /dev/null +++ b/infrastructure/shared/variables.tf @@ -0,0 +1,13 @@ +# infrastructure/shared/variables.tf + +variable "prefix" { + description = "Prefix for all resource names" + type = string + default = "sit722alicestd" +} + +variable "location" { + description = "Azure region" + type = string + default = "australiaeast" +} diff --git a/infrastructure/staging/.terraform.lock.hcl b/infrastructure/staging/.terraform.lock.hcl new file mode 100644 index 0000000..b5bb53a --- /dev/null +++ b/infrastructure/staging/.terraform.lock.hcl @@ -0,0 +1,37 @@ +# This file is maintained automatically by "tofu init". +# Manual edits may be lost in future updates. + +provider "registry.opentofu.org/hashicorp/azurerm" { + version = "3.117.1" + constraints = "~> 3.0" + hashes = [ + "h1:OXBPoQpiwe519GeBfkmbfsDXO020v706RmWTYSuuUCE=", + "zh:1fedd2521c8ced1fbebd5d70fda376d42393cac5cc25c043c390b44d630d9e37", + "zh:634c16442fd8aaed6c3bccd0069f4a01399b141d2a993d85997e6a03f9f867cf", + "zh:637ae3787f87506e5b673f44a1b0f33cf75d7fa9c5353df6a2584488fc3d4328", + "zh:7c7741f66ff5b05051db4b6c3d9bad68c829f9e920a7f1debdca0ab8e50836a3", + "zh:9b454fa0b6c821db2c6a71e591a467a5b4802129509710b56f01ae7106058d86", + "zh:bb820ff92b4a77e9d70999ae30758d408728c6e782b4e1c8c4b6d53b8c3c8ff9", + "zh:d38cd7d5f99398fb96672cb27943b96ea2b7008f26d379a69e1c6c2f25051869", + "zh:d56f5a132181ab14e6be332996753cc11c0d3b1cfdd1a1b44ef484c67e38cc91", + "zh:d8a1e7cf218f46e6d0bd878ff70f92db7e800a15f01e96189a24864d10cde33b", + "zh:f67cf6d14d859a1d2a1dc615941a1740a14cb3f4ee2a34da672ff6729d81fa81", + ] +} + +provider "registry.opentofu.org/hashicorp/kubernetes" { + version = "2.38.0" + constraints = "~> 2.23" + hashes = [ + "h1:HGkB9bCmUqMRcR5/bAUOSqPBsx6DAIEnbT1fZ8vzI78=", + "zh:1096b41c4e5b2ee6c1980916fb9a8579bc1892071396f7a9432be058aabf3cbc", + "zh:2959fde9ae3d1deb5e317df0d7b02ea4977951ee6b9c4beb083c148ca8f3681c", + "zh:5082f98fcb3389c73339365f7df39fc6912bf2bd1a46d5f97778f441a67fd337", + "zh:620fd5d0fbc2d7a24ac6b420a4922e6093020358162a62fa8cbd37b2bac1d22e", + "zh:7f47c2de179bba35d759147c53082cad6c3449d19b0ec0c5a4ca8db5b06393e1", + "zh:89c3aa2a87e29febf100fd21cead34f9a4c0e6e7ae5f383b5cef815c677eb52a", + "zh:96eecc9f94938a0bc35b8a63d2c4a5f972395e44206620db06760b730d0471fc", + "zh:e15567c1095f898af173c281b66bffdc4f3068afdd9f84bb5b5b5521d9f29584", + "zh:ecc6b912629734a9a41a7cf1c4c73fb13b4b510afc9e7b2e0011d290bcd6d77f", + ] +} diff --git a/infrastructure/staging/container_registry.tf b/infrastructure/staging/container_registry.tf new file mode 100644 index 0000000..a5de0dc --- /dev/null +++ b/infrastructure/staging/container_registry.tf @@ -0,0 +1,7 @@ +# infrastructure/staging/container_registry.tf + +# Reference the shared ACR from the shared resource group +data "azurerm_container_registry" "shared_acr" { + name = "${var.prefix}acr" + resource_group_name = "${var.prefix}-shared-rg" +} diff --git a/infrastructure/staging/kubernetes_cluster.tf b/infrastructure/staging/kubernetes_cluster.tf new file mode 100644 index 0000000..73b0021 --- /dev/null +++ b/infrastructure/staging/kubernetes_cluster.tf @@ -0,0 +1,59 @@ +# infrastructure/staging/kubernetes_cluster.tf + +resource "azurerm_kubernetes_cluster" "staging_aks" { + name = "${var.prefix}-${var.environment}-aks" + location = var.location + resource_group_name = azurerm_resource_group.staging_rg.name + dns_prefix = "${var.prefix}-${var.environment}" + kubernetes_version = var.kubernetes_version + + default_node_pool { + name = "default" + node_count = var.node_count + vm_size = var.node_vm_size + + # Enable auto-scaling for cost optimization (optional for cost optimization) + # enable_auto_scaling = true + # min_count = 1 + # max_count = 3 + } + + # Use a system‐assigned managed identity + identity { + type = "SystemAssigned" + } + + tags = { + Environment = var.environment + ManagedBy = "Terraform" + GitSHA = var.git_sha + } + + # Uncomment if enabling auto-scaling above + # lifecycle { + # ignore_changes = [ + # default_node_pool[0].node_count + # ] + # } +} + +# Grant AKS permission to pull images from your ACR +resource "azurerm_role_assignment" "aks_acr_pull" { + principal_id = azurerm_kubernetes_cluster.staging_aks.kubelet_identity[0].object_id + role_definition_name = "AcrPull" + scope = data.azurerm_container_registry.shared_acr.id + skip_service_principal_aad_check = true +} + +# Create staging namespace +resource "kubernetes_namespace" "staging" { + metadata { + name = var.environment + labels = { + environment = var.environment + managed-by = "terraform" + } + } + + depends_on = [azurerm_kubernetes_cluster.staging_aks] +} \ No newline at end of file diff --git a/infrastructure/staging/outputs.tf b/infrastructure/staging/outputs.tf new file mode 100644 index 0000000..96480c8 --- /dev/null +++ b/infrastructure/staging/outputs.tf @@ -0,0 +1,27 @@ +# infrastructure/staging/outputs.tf + +output "resource_group_name" { + description = "Resource group name" + value = azurerm_resource_group.staging_rg.name +} + +output "aks_cluster_name" { + description = "AKS cluster name" + value = azurerm_kubernetes_cluster.staging_aks.name +} + +output "aks_kube_config" { + description = "AKS kubeconfig" + value = azurerm_kubernetes_cluster.staging_aks.kube_config_raw + sensitive = true +} + +output "acr_login_server" { + description = "ACR login server" + value = data.azurerm_container_registry.shared_acr.login_server +} + +output "git_sha" { + description = "Git commit SHA" + value = var.git_sha +} \ No newline at end of file diff --git a/infrastructure/staging/provider.tf b/infrastructure/staging/provider.tf new file mode 100644 index 0000000..4298aa4 --- /dev/null +++ b/infrastructure/staging/provider.tf @@ -0,0 +1,30 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>3.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.23" + } + } + required_version = ">= 1.1.0" +} + +provider "azurerm" { + # Allow resource delete on staging environment + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + } +} + +# Configure Kubernetes provider to manage namespace +provider "kubernetes" { + host = azurerm_kubernetes_cluster.staging_aks.kube_config[0].host + client_certificate = base64decode(azurerm_kubernetes_cluster.staging_aks.kube_config[0].client_certificate) + client_key = base64decode(azurerm_kubernetes_cluster.staging_aks.kube_config[0].client_key) + cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.staging_aks.kube_config[0].cluster_ca_certificate) +} \ No newline at end of file diff --git a/infrastructure/staging/resource_group.tf b/infrastructure/staging/resource_group.tf new file mode 100644 index 0000000..54da372 --- /dev/null +++ b/infrastructure/staging/resource_group.tf @@ -0,0 +1,13 @@ +# infrastructure/staging/resource_group.tf + +resource "azurerm_resource_group" "staging_rg" { + name = "${var.prefix}-${var.environment}-rg" + location = var.location + + tags = { + Environment = var.environment + ManagedBy = "Terraform" + GitSHA = var.git_sha + AutoDestroy = "true" + } +} \ No newline at end of file diff --git a/infrastructure/staging/variables.tf b/infrastructure/staging/variables.tf new file mode 100644 index 0000000..f71e060 --- /dev/null +++ b/infrastructure/staging/variables.tf @@ -0,0 +1,44 @@ +# Specify the environment +variable "environment" { + description = "Environment name" + type = string + default = "staging" +} + +# Specify the prefix, ensuring all resources have unique naming +variable "prefix" { + description = "Prefix for all resource names" + type = string + default = "sit722alicestd" +} + +# Resource configuration variables +variable "location" { + description = "Azure region" + type = string + default = "australiaeast" +} + +variable "kubernetes_version" { + description = "Kubernetes version" + type = string + default = "1.31.7" +} + +variable "node_count" { + description = "Number of AKS nodes" + type = number + default = 1 +} + +variable "node_vm_size" { + description = "VM size for AKS nodes" + type = string + default = "Standard_D2s_v3" +} + +variable "git_sha" { + description = "Git commit SHA for tagging" + type = string + default = "manual" +} \ No newline at end of file diff --git a/k8s/docker-desktop/configmaps.yaml b/k8s/docker-desktop/configmaps.yaml new file mode 100644 index 0000000..a985950 --- /dev/null +++ b/k8s/docker-desktop/configmaps.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: notes-config + namespace: staging +data: + # Database Configuration + NOTES_DB_HOST: notes-db-service + NOTES_DB_NAME: notes + + USERS_DB_HOST: users-db-service + USERS_DB_NAME: users + + # POSTGRES_DB: notesdb + # POSTGRES_HOST: postgres-service + POSTGRES_PORT: "5432" + + # Service URLs (internal cluster communication) + NOTES_SERVICE_URL: http://notes-service:5001 + USERS_SERVICE_URL: http://users-service:5000 + + # Application Configuration + ENVIRONMENT: staging + LOG_LEVEL: debug \ No newline at end of file diff --git a/k8s/docker-desktop/frontend.yaml b/k8s/docker-desktop/frontend.yaml new file mode 100644 index 0000000..f7accb3 --- /dev/null +++ b/k8s/docker-desktop/frontend.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend + namespace: staging + labels: + app: frontend +spec: + replicas: 3 # high availability, load distribution, and rolling update capabilities + selector: + matchLabels: + app: frontend + template: + metadata: + labels: + app: frontend + spec: + containers: + - name: frontend-container + image: hd-awesome-devops-frontend:latest + imagePullPolicy: Never # Crucial for local testing with Docker Desktop K8s + ports: + - containerPort: 80 # Nginx runs on port 80 inside the container + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: frontend # Service name matches + namespace: staging + labels: + app: frontend +spec: + selector: + app: frontend + ports: + - protocol: TCP + port: 80 # The port the service listens on inside the cluster + targetPort: 80 # The port on the Pod (containerPort where Nginx runs) + type: LoadBalancer # Exposes the service on a port on each Node's IP \ No newline at end of file diff --git a/k8s/docker-desktop/namespace.yaml b/k8s/docker-desktop/namespace.yaml new file mode 100644 index 0000000..d5d94ac --- /dev/null +++ b/k8s/docker-desktop/namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: staging + labels: + environment: staging + managed-by: kubectl \ No newline at end of file diff --git a/k8s/docker-desktop/notes-db-deployment.yaml b/k8s/docker-desktop/notes-db-deployment.yaml new file mode 100644 index 0000000..92366be --- /dev/null +++ b/k8s/docker-desktop/notes-db-deployment.yaml @@ -0,0 +1,83 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: notes-db-pvc + namespace: staging +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notes-db-deployment + namespace: staging + labels: + app: notes-db +spec: + replicas: 1 + selector: + matchLabels: + app: notes-db + template: + metadata: + labels: + app: notes-db + spec: + containers: + - name: postgres + image: postgres:15-alpine # Use the same PosgreSQL image as in Docker Compose + ports: + - containerPort: 5432 # Default PosgreSQL port + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config # ConfigMap name matches + key: NOTES_DB_NAME # Point to the database name + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_PASSWORD + volumeMounts: + - name: notes-db-storage + mountPath: /var/lib/postgresql/data + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + cpu: "500m" + volumes: + - name: notes-db-storage + # persistentVolumeClaim: + # claimName: notes-db-pvc + hostPath: + path: /tmp/notes-db-data + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: Service +metadata: + name: notes-db-service # Internal DNS name for the Order DB + namespace: staging + labels: + app: notes-db +spec: + selector: + app: notes-db # Selects pods with the label app + ports: + - protocol: TCP + port: 5432 # The port the service listens on (default PosgreSQL) + targetPort: 5432 # The port on the Pod (containerPort) + type: ClusterIP # Only accessible from within the cluster diff --git a/k8s/docker-desktop/notes-service-deployment.yaml b/k8s/docker-desktop/notes-service-deployment.yaml new file mode 100644 index 0000000..0a3989a --- /dev/null +++ b/k8s/docker-desktop/notes-service-deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notes-service + namespace: staging + labels: + app: notes-service +spec: + replicas: 1 + selector: + matchLabels: + app: notes-service + template: + metadata: + labels: + app: notes-service + spec: + containers: + - name: notes-service-container + image: hd-awesome-devops-notes-service:latest + imagePullPolicy: Never + ports: + - containerPort: 8000 + env: + - name: POSTGRES_HOST + valueFrom: + configMapKeyRef: + name: notes-config + key: NOTES_DB_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: notes-config + key: POSTGRES_PORT + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config + key: NOTES_DB_NAME + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: notes-config + key: ENVIRONMENT + - name: USERS_SERVICE_URL + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_SERVICE_URL + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_PASSWORD +--- +apiVersion: v1 +kind: Service +metadata: + name: notes-service + namespace: staging + labels: + app: notes-service +spec: + selector: + app: notes-service + ports: + - protocol: TCP + port: 5001 + targetPort: 8000 + type: LoadBalancer diff --git a/k8s/docker-desktop/secrets.yaml b/k8s/docker-desktop/secrets.yaml new file mode 100644 index 0000000..1089588 --- /dev/null +++ b/k8s/docker-desktop/secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: notes-secrets + namespace: staging +type: Opaque # Indicates arbitrary user-defined data +data: + # PostgreSQL Credentials + POSTGRES_USER: "cG9zdGdyZXM=" # Base64 for 'postgres' + POSTGRES_PASSWORD: "cG9zdGdyZXM=" # Base64 for 'postgres' + + # Azure Storage Account Credentials for Product Service image uploads + # REPLACE WITH YOUR ACTUAL BASE64 ENCODED VALUES from your Azure Storage Account + # Example: echo -n 'myblobstorageaccount' | base64 + # AZURE_STORAGE_ACCOUNT_NAME: "" + # Example: echo -n 'your_storage_account_key_string' | base64 + # AZURE_STORAGE_ACCOUNT_KEY: "" diff --git a/k8s/docker-desktop/users-db-deployment.yaml b/k8s/docker-desktop/users-db-deployment.yaml new file mode 100644 index 0000000..c3c08f8 --- /dev/null +++ b/k8s/docker-desktop/users-db-deployment.yaml @@ -0,0 +1,83 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: users-db-pvc + namespace: staging +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: users-db-deployment + namespace: staging + labels: + app: users-db +spec: + replicas: 1 + selector: + matchLabels: + app: users-db + template: + metadata: + labels: + app: users-db + spec: + containers: + - name: postgres + image: postgres:15-alpine # Use the same PosgreSQL image as in Docker Compose + ports: + - containerPort: 5432 # Default PosgreSQL port + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config # ConfigMap name matches + key: USERS_DB_NAME # Point to the database name + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_PASSWORD + volumeMounts: + - name: users-db-storage + mountPath: /var/lib/postgresql/data + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + cpu: "500m" + volumes: + - name: users-db-storage + # persistentVolumeClaim: + # claimName: users-db-pvc + hostPath: + path: /tmp/users-db-data + type: DirectoryOrCreate +--- +apiVersion: v1 +kind: Service +metadata: + name: users-db-service # Internal DNS name for the Order DB + namespace: staging + labels: + app: users-db +spec: + selector: + app: users-db # Selects pods with the label app + ports: + - protocol: TCP + port: 5432 # The port the service listens on (default PosgreSQL) + targetPort: 5432 # The port on the Pod (containerPort) + type: ClusterIP # Only accessible from within the cluster diff --git a/k8s/docker-desktop/users-service-deployment.yaml b/k8s/docker-desktop/users-service-deployment.yaml new file mode 100644 index 0000000..a0d716b --- /dev/null +++ b/k8s/docker-desktop/users-service-deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: users-service # Deployment name matches + namespace: staging + labels: + app: users-service +spec: + replicas: 1 + selector: + matchLabels: + app: users-service + template: + metadata: + labels: + app: users-service + spec: + containers: + - name: users-service-container + image: hd-awesome-devops-users-service:latest + imagePullPolicy: Never + ports: + - containerPort: 8000 + env: + - name: POSTGRES_HOST + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_DB_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: notes-config + key: POSTGRES_PORT + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_DB_NAME + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: notes-config + key: ENVIRONMENT + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_PASSWORD + resources: + requests: + memory: "256Mi" + cpu: "250m" + limits: + memory: "512Mi" + cpu: "500m" +--- +apiVersion: v1 +kind: Service +metadata: + name: users-service + namespace: staging + labels: + app: users-service +spec: + selector: + app: users-service + ports: + - protocol: TCP + port: 5000 + targetPort: 8000 + type: LoadBalancer diff --git a/k8s/production/configmaps.yaml b/k8s/production/configmaps.yaml new file mode 100644 index 0000000..32a767c --- /dev/null +++ b/k8s/production/configmaps.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: notes-config + namespace: production +data: + # Database Configuration + NOTES_DB_HOST: notes-db-service + NOTES_DB_NAME: notes + + USERS_DB_HOST: users-db-service + USERS_DB_NAME: users + + # POSTGRES_DB: notesdb + # POSTGRES_HOST: postgres-service + POSTGRES_PORT: "5432" + + # Service URLs (internal cluster communication) + NOTES_SERVICE_URL: http://notes-service:5001 + USERS_SERVICE_URL: http://users-service:5000 + + # Application Configuration + ENVIRONMENT: production + LOG_LEVEL: debug \ No newline at end of file diff --git a/k8s/production/frontend-deployment.yaml b/k8s/production/frontend-deployment.yaml new file mode 100644 index 0000000..9864209 --- /dev/null +++ b/k8s/production/frontend-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend + namespace: production + labels: + app: frontend +spec: + replicas: 1 + selector: + matchLabels: + app: frontend + template: + metadata: + labels: + app: frontend + spec: + containers: + - name: frontend-container + image: _IMAGE_NAME_WITH_TAG_ # Placeholder for sit722aliceacr.azurecr.io/users_service: + imagePullPolicy: Always + ports: + - containerPort: 80 # Nginx runs on port 80 inside the container + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "250m" + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: frontend # Service name matches + namespace: production + labels: + app: frontend +spec: + selector: + app: frontend + ports: + - protocol: TCP + port: 80 # The port the service listens on inside the cluster + targetPort: 80 # The port on the Pod (containerPort where Nginx runs) + type: LoadBalancer # Exposes the service on a port on each Node's IP diff --git a/k8s/production/notes-db-deployment.yaml b/k8s/production/notes-db-deployment.yaml new file mode 100644 index 0000000..9959fa7 --- /dev/null +++ b/k8s/production/notes-db-deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notes-db-deployment + namespace: production + labels: + app: notes-db +spec: + replicas: 1 + selector: + matchLabels: + app: notes-db + template: + metadata: + labels: + app: notes-db + spec: + containers: + - name: postgres + image: postgres:15-alpine # Use the same PosgreSQL image as in Docker Compose + ports: + - containerPort: 5432 # Default PosgreSQL port + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config # ConfigMap name matches + key: NOTES_DB_NAME # Point to the database name + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" +--- +apiVersion: v1 +kind: Service +metadata: + name: notes-db-service # Internal DNS name for the Order DB + namespace: production + labels: + app: notes-db +spec: + selector: + app: notes-db # Selects pods with the label app + ports: + - protocol: TCP + port: 5432 # The port the service listens on (default PosgreSQL) + targetPort: 5432 # The port on the Pod (containerPort) + type: ClusterIP # Only accessible from within the cluster diff --git a/k8s/production/notes-service-deployment.yaml b/k8s/production/notes-service-deployment.yaml new file mode 100644 index 0000000..da456c3 --- /dev/null +++ b/k8s/production/notes-service-deployment.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notes-service + namespace: production + labels: + app: notes-service +spec: + replicas: 1 + selector: + matchLabels: + app: notes-service + template: + metadata: + labels: + app: notes-service + spec: + containers: + - name: notes-service-container + image: _IMAGE_NAME_WITH_TAG_ # Placeholder for sit722aliceacr.azurecr.io/users_service: + imagePullPolicy: Always + ports: + - containerPort: 8000 + env: + - name: POSTGRES_HOST + valueFrom: + configMapKeyRef: + name: notes-config + key: NOTES_DB_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: notes-config + key: POSTGRES_PORT + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config + key: NOTES_DB_NAME + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: notes-config + key: ENVIRONMENT + - name: USERS_SERVICE_URL + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_SERVICE_URL + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "250m" +--- +apiVersion: v1 +kind: Service +metadata: + name: notes-service + namespace: production + labels: + app: notes-service +spec: + selector: + app: notes-service + ports: + - protocol: TCP + port: 5001 + targetPort: 8000 + type: LoadBalancer diff --git a/k8s/production/secrets.yaml b/k8s/production/secrets.yaml new file mode 100644 index 0000000..fb4b9bf --- /dev/null +++ b/k8s/production/secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: notes-secrets + namespace: production +type: Opaque # Indicates arbitrary user-defined data +data: + # PostgreSQL Credentials + POSTGRES_USER: "cG9zdGdyZXM=" # Base64 for 'postgres' + POSTGRES_PASSWORD: "cG9zdGdyZXM=" # Base64 for 'postgres' + + # Azure Storage Account Credentials for Product Service image uploads + # REPLACE WITH YOUR ACTUAL BASE64 ENCODED VALUES from your Azure Storage Account + # Example: echo -n 'myblobstorageaccount' | base64 + # AZURE_STORAGE_ACCOUNT_NAME: "" + # Example: echo -n 'your_storage_account_key_string' | base64 + # AZURE_STORAGE_ACCOUNT_KEY: "" diff --git a/k8s/production/users-db-deployment.yaml b/k8s/production/users-db-deployment.yaml new file mode 100644 index 0000000..a42545f --- /dev/null +++ b/k8s/production/users-db-deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: users-db-deployment + namespace: production + labels: + app: users-db +spec: + replicas: 1 + selector: + matchLabels: + app: users-db + template: + metadata: + labels: + app: users-db + spec: + containers: + - name: postgres + image: postgres:15-alpine # Use the same PosgreSQL image as in Docker Compose + ports: + - containerPort: 5432 # Default PosgreSQL port + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config # ConfigMap name matches + key: USERS_DB_NAME # Point to the database name + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" +--- +apiVersion: v1 +kind: Service +metadata: + name: users-db-service # Internal DNS name for the Order DB + namespace: production + labels: + app: users-db +spec: + selector: + app: users-db # Selects pods with the label app + ports: + - protocol: TCP + port: 5432 # The port the service listens on (default PosgreSQL) + targetPort: 5432 # The port on the Pod (containerPort) + type: ClusterIP # Only accessible from within the cluster diff --git a/k8s/production/users-service-deployment.yaml b/k8s/production/users-service-deployment.yaml new file mode 100644 index 0000000..4f2fad6 --- /dev/null +++ b/k8s/production/users-service-deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: users-service # Deployment name matches + namespace: production + labels: + app: users-service +spec: + replicas: 1 + selector: + matchLabels: + app: users-service + template: + metadata: + labels: + app: users-service + spec: + containers: + - name: users-service-container + image: _IMAGE_NAME_WITH_TAG_ # Placeholder for sit722aliceacr.azurecr.io/users_service: + imagePullPolicy: Always + ports: + - containerPort: 8000 + env: + - name: POSTGRES_HOST + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_DB_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: notes-config + key: POSTGRES_PORT + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_DB_NAME + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: notes-config + key: ENVIRONMENT + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "250m" +--- +apiVersion: v1 +kind: Service +metadata: + name: users-service + namespace: production + labels: + app: users-service +spec: + selector: + app: users-service + ports: + - protocol: TCP + port: 5000 + targetPort: 8000 + type: LoadBalancer diff --git a/k8s/staging/configmaps.yaml b/k8s/staging/configmaps.yaml new file mode 100644 index 0000000..a985950 --- /dev/null +++ b/k8s/staging/configmaps.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: notes-config + namespace: staging +data: + # Database Configuration + NOTES_DB_HOST: notes-db-service + NOTES_DB_NAME: notes + + USERS_DB_HOST: users-db-service + USERS_DB_NAME: users + + # POSTGRES_DB: notesdb + # POSTGRES_HOST: postgres-service + POSTGRES_PORT: "5432" + + # Service URLs (internal cluster communication) + NOTES_SERVICE_URL: http://notes-service:5001 + USERS_SERVICE_URL: http://users-service:5000 + + # Application Configuration + ENVIRONMENT: staging + LOG_LEVEL: debug \ No newline at end of file diff --git a/k8s/staging/frontend-deployment.yaml b/k8s/staging/frontend-deployment.yaml new file mode 100644 index 0000000..7dd040f --- /dev/null +++ b/k8s/staging/frontend-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend + namespace: staging + labels: + app: frontend +spec: + replicas: 1 + selector: + matchLabels: + app: frontend + template: + metadata: + labels: + app: frontend + spec: + containers: + - name: frontend-container + image: _IMAGE_NAME_WITH_TAG_ # Placeholder for sit722aliceacr.azurecr.io/users_service: + imagePullPolicy: Always + ports: + - containerPort: 80 # Nginx runs on port 80 inside the container + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "250m" + restartPolicy: Always +--- +apiVersion: v1 +kind: Service +metadata: + name: frontend # Service name matches + namespace: staging + labels: + app: frontend +spec: + selector: + app: frontend + ports: + - protocol: TCP + port: 80 # The port the service listens on inside the cluster + targetPort: 80 # The port on the Pod (containerPort where Nginx runs) + type: LoadBalancer # Exposes the service on a port on each Node's IP diff --git a/k8s/staging/notes-db-deployment.yaml b/k8s/staging/notes-db-deployment.yaml new file mode 100644 index 0000000..cd66052 --- /dev/null +++ b/k8s/staging/notes-db-deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notes-db-deployment + namespace: staging + labels: + app: notes-db +spec: + replicas: 1 + selector: + matchLabels: + app: notes-db + template: + metadata: + labels: + app: notes-db + spec: + containers: + - name: postgres + image: postgres:15-alpine # Use the same PosgreSQL image as in Docker Compose + ports: + - containerPort: 5432 # Default PosgreSQL port + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config # ConfigMap name matches + key: NOTES_DB_NAME # Point to the database name + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" +--- +apiVersion: v1 +kind: Service +metadata: + name: notes-db-service # Internal DNS name for the Order DB + namespace: staging + labels: + app: notes-db +spec: + selector: + app: notes-db # Selects pods with the label app + ports: + - protocol: TCP + port: 5432 # The port the service listens on (default PosgreSQL) + targetPort: 5432 # The port on the Pod (containerPort) + type: ClusterIP # Only accessible from within the cluster diff --git a/k8s/staging/notes-service-deployment.yaml b/k8s/staging/notes-service-deployment.yaml new file mode 100644 index 0000000..31ebb7b --- /dev/null +++ b/k8s/staging/notes-service-deployment.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: notes-service + namespace: staging + labels: + app: notes-service +spec: + replicas: 1 + selector: + matchLabels: + app: notes-service + template: + metadata: + labels: + app: notes-service + spec: + containers: + - name: notes-service-container + image: _IMAGE_NAME_WITH_TAG_ # Placeholder for sit722aliceacr.azurecr.io/users_service: + imagePullPolicy: Always + ports: + - containerPort: 8000 + env: + - name: POSTGRES_HOST + valueFrom: + configMapKeyRef: + name: notes-config + key: NOTES_DB_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: notes-config + key: POSTGRES_PORT + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config + key: NOTES_DB_NAME + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: notes-config + key: ENVIRONMENT + - name: USERS_SERVICE_URL + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_SERVICE_URL + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "250m" +--- +apiVersion: v1 +kind: Service +metadata: + name: notes-service + namespace: staging + labels: + app: notes-service +spec: + selector: + app: notes-service + ports: + - protocol: TCP + port: 5001 + targetPort: 8000 + type: LoadBalancer diff --git a/k8s/staging/secrets.yaml b/k8s/staging/secrets.yaml new file mode 100644 index 0000000..1089588 --- /dev/null +++ b/k8s/staging/secrets.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Secret +metadata: + name: notes-secrets + namespace: staging +type: Opaque # Indicates arbitrary user-defined data +data: + # PostgreSQL Credentials + POSTGRES_USER: "cG9zdGdyZXM=" # Base64 for 'postgres' + POSTGRES_PASSWORD: "cG9zdGdyZXM=" # Base64 for 'postgres' + + # Azure Storage Account Credentials for Product Service image uploads + # REPLACE WITH YOUR ACTUAL BASE64 ENCODED VALUES from your Azure Storage Account + # Example: echo -n 'myblobstorageaccount' | base64 + # AZURE_STORAGE_ACCOUNT_NAME: "" + # Example: echo -n 'your_storage_account_key_string' | base64 + # AZURE_STORAGE_ACCOUNT_KEY: "" diff --git a/k8s/staging/users-db-deployment.yaml b/k8s/staging/users-db-deployment.yaml new file mode 100644 index 0000000..288857a --- /dev/null +++ b/k8s/staging/users-db-deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: users-db-deployment + namespace: staging + labels: + app: users-db +spec: + replicas: 1 + selector: + matchLabels: + app: users-db + template: + metadata: + labels: + app: users-db + spec: + containers: + - name: postgres + image: postgres:15-alpine # Use the same PosgreSQL image as in Docker Compose + ports: + - containerPort: 5432 # Default PosgreSQL port + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config # ConfigMap name matches + key: USERS_DB_NAME # Point to the database name + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets # Secret name matches + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "200m" +--- +apiVersion: v1 +kind: Service +metadata: + name: users-db-service # Internal DNS name for the Order DB + namespace: staging + labels: + app: users-db +spec: + selector: + app: users-db # Selects pods with the label app + ports: + - protocol: TCP + port: 5432 # The port the service listens on (default PosgreSQL) + targetPort: 5432 # The port on the Pod (containerPort) + type: ClusterIP # Only accessible from within the cluster diff --git a/k8s/staging/users-service-deployment.yaml b/k8s/staging/users-service-deployment.yaml new file mode 100644 index 0000000..135586e --- /dev/null +++ b/k8s/staging/users-service-deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: users-service # Deployment name matches + namespace: staging + labels: + app: users-service +spec: + replicas: 1 + selector: + matchLabels: + app: users-service + template: + metadata: + labels: + app: users-service + spec: + containers: + - name: users-service-container + image: _IMAGE_NAME_WITH_TAG_ # Placeholder for sit722aliceacr.azurecr.io/users_service: + imagePullPolicy: Always + ports: + - containerPort: 8000 + env: + - name: POSTGRES_HOST + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_DB_HOST + - name: POSTGRES_PORT + valueFrom: + configMapKeyRef: + name: notes-config + key: POSTGRES_PORT + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: notes-config + key: USERS_DB_NAME + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: notes-config + key: ENVIRONMENT + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: notes-secrets + key: POSTGRES_PASSWORD + resources: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "256Mi" + cpu: "250m" +--- +apiVersion: v1 +kind: Service +metadata: + name: users-service + namespace: staging + labels: + app: users-service +spec: + selector: + app: users-service + ports: + - protocol: TCP + port: 5000 + targetPort: 8000 + type: LoadBalancer diff --git a/playwright-python/conftest.py b/playwright-python/conftest.py new file mode 100644 index 0000000..1aa2a9d --- /dev/null +++ b/playwright-python/conftest.py @@ -0,0 +1,18 @@ +"""Pytest configuration for Playwright tests.""" +import pytest +from playwright.sync_api import Page + + +@pytest.fixture(scope="session") +def browser_context_args(browser_context_args): + """Configure browser context.""" + return { + **browser_context_args, + "viewport": {"width": 1280, "height": 720}, + } + + +@pytest.fixture(scope='session') +def base_url(): + """Base URL for the application.""" + return "http://localhost:80" \ No newline at end of file diff --git a/playwright-python/pytest.ini b/playwright-python/pytest.ini new file mode 100644 index 0000000..9154fe2 --- /dev/null +++ b/playwright-python/pytest.ini @@ -0,0 +1,9 @@ +[pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +; addopts = -v --base-url=http://localhost:3000 +markers = + acceptance: acceptance tests + smoke: smoke tests \ No newline at end of file diff --git a/playwright-python/requirements.txt b/playwright-python/requirements.txt new file mode 100644 index 0000000..c44b431 --- /dev/null +++ b/playwright-python/requirements.txt @@ -0,0 +1,3 @@ +pytest +pytest-playwright +pytest-base-url \ No newline at end of file diff --git a/playwright-python/test_example.py b/playwright-python/test_example.py new file mode 100644 index 0000000..1c5d455 --- /dev/null +++ b/playwright-python/test_example.py @@ -0,0 +1,17 @@ +import re +from playwright.sync_api import Page, expect + +def test_has_title(page: Page): + page.goto("https://playwright.dev/") + + # Expect a title "to contain" a substring. + expect(page).to_have_title(re.compile("Playwright")) + +def test_get_started_link(page: Page): + page.goto("https://playwright.dev/") + + # Click the get started link. + page.get_by_role("link", name="Get started").click() + + # Expects page to have a heading with the name of Installation. + expect(page.get_by_role("heading", name="Installation")).to_be_visible() \ No newline at end of file diff --git a/playwright-python/tests/test_acceptance.py b/playwright-python/tests/test_acceptance.py new file mode 100644 index 0000000..d426ee4 --- /dev/null +++ b/playwright-python/tests/test_acceptance.py @@ -0,0 +1,114 @@ +# Acceptance test +# This is an example test file only, for demonstration of successful running the acceptance test +# Real test involves more complex end-to-end user interaction with frontend UI +import pytest +import os +from playwright.sync_api import Page, expect + +FRONTEND_URL = os.getenv('FRONTEND_URL', 'http://localhost:3000') +USERS_SERVICE_URL = os.getenv('USERS_SERVICE_URL', 'http://localhost:5000') +NOTES_SERVICE_URL = os.getenv('NOTES_SERVICE_URL', 'http://localhost:5001') + +# Fixture should be outside the class +@pytest.fixture(scope="session") +def browser_context_args(browser_context_args): + """Configure browser context""" + return { + **browser_context_args, + "ignore_https_errors": True, + } + +@pytest.mark.smoke +class TestEndToEndUserFlow: + """Acceptance testing to verify correct end-to-end user flow.""" + + def test_frontend_loads(self, page: Page): + """Test that frontend page loads successfully""" + # Navigate to frontend + print(FRONTEND_URL) + page.goto(FRONTEND_URL) + + # # Wait for page to load + page.wait_for_load_state('networkidle') + + # Check page content + expect(page.locator('text=Notes Application')).to_be_visible(timeout=5000) + + def test_add_user_workflow(self, page: Page): + """Test complete add note workflow""" + # Navigate to frontend + page.goto(FRONTEND_URL) + + # Wait for page to load + page.wait_for_load_state('networkidle') + + # Fill note form (adjust selectors to match your actual form) + page.fill('input[id="user-username"]', 'User') + page.fill('input[id="user-email"]', 'anotheruser@gmail.com') + + # Submit form + page.click('button:has-text("Register User")') + + # Wait for response + page.wait_for_timeout(1000) + + # Verify note appears in list (adjust selector based on your HTML) + expect(page.locator('text=anotheruser@gmail.com')).to_be_visible(timeout=5000) + + def test_add_note_workflow(self, page: Page): + """Test complete add note workflow""" + # Navigate to frontend + page.goto(FRONTEND_URL) + + # Wait for page to load + page.wait_for_load_state('networkidle') + + # Fill note form (adjust selectors to match your actual form) + page.fill('input[id="note-user-id"]', '1') + page.fill('input[id="note-title"]', 'Test Note') + page.fill('textarea[id="note-content"]', 'Test note content for acceptance testing') + + # Submit form + page.click('button:has-text("Create Note")') + # Wait for response + page.wait_for_timeout(1000) + + page.fill('input[id="filter-user-id"]', '1') + page.click('button[id="filter-btn"]') + # Wait for response + page.wait_for_timeout(1000) + + # Verify note appears in list (adjust selector based on your HTML) + expect(page.locator('h3:has-text("Test Note")')).to_be_visible(timeout=5000) + + def test_notes_api_health_check(self, page: Page): + """Test Notes API endpoint is accessible""" + response = page.request.get(f"{NOTES_SERVICE_URL}/") + assert response.status == 200 + + data = response.json() + assert 'message' in data or 'status' in data + + def test_users_api_health_check(self, page: Page): + """Test Users API endpoint is accessible""" + response = page.request.get(f"{USERS_SERVICE_URL}/") + assert response.status == 200 + + data = response.json() + assert 'message' in data or 'status' in data + + def test_notes_service_health_endpoint(self, page: Page): + """Test Notes service health endpoint""" + response = page.request.get(f"{NOTES_SERVICE_URL}/health") + assert response.status == 200 + + data = response.json() + assert data.get('status') == 'ok' + + def test_users_service_health_endpoint(self, page: Page): + """Test Users service health endpoint""" + response = page.request.get(f"{USERS_SERVICE_URL}/health") + assert response.status == 200 + + data = response.json() + assert data.get('status') == 'ok' \ No newline at end of file diff --git a/playwright-python/tests/test_service_availability.py b/playwright-python/tests/test_service_availability.py new file mode 100644 index 0000000..b6ef0ba --- /dev/null +++ b/playwright-python/tests/test_service_availability.py @@ -0,0 +1,39 @@ +"""Service availability smoke tests.""" +import pytest +from playwright.sync_api import Page, expect + + +@pytest.mark.smoke +class TestServiceAvailability: + """Quick smoke tests to verify all services are running.""" + + def test_frontend_loads(self, page: Page, base_url: str): + """Test frontend is accessible.""" + page.goto(base_url) + expect(page).to_have_title("Notes Application") + expect(page.locator("h1")).to_contain_text("Notes Application") + + def test_users_service_accessible(self, page: Page, base_url: str): + """Test Users Service is responding.""" + page.goto(base_url) + + # Check that user list loads (not showing error) + user_list = page.locator("#user-list") + expect(user_list).not_to_contain_text("An error occurred", timeout=10000) + + def test_notes_service_accessible(self, page: Page, base_url: str): + """Test Notes Service is responding.""" + page.goto(base_url) + + # Check that note list loads (not showing error) + note_list = page.locator("#note-list") + expect(note_list).not_to_contain_text("An error occurred", timeout=10000) + + def test_all_sections_visible(self, page: Page, base_url: str): + """Test all major sections are rendered.""" + page.goto(base_url) + + expect(page.locator("h2:has-text('User Management')")).to_be_visible() + expect(page.locator("h2:has-text('Notes Management')")).to_be_visible() + expect(page.locator("#user-form")).to_be_visible() + expect(page.locator("#note-form")).to_be_visible() \ No newline at end of file