From f29dc2214b8a42499ea6d28297b429f5f04d8dbf Mon Sep 17 00:00:00 2001 From: skiddder Date: Wed, 31 Dec 2025 16:12:29 +0100 Subject: [PATCH 1/3] Checkpoint from VS Code for coding agent session --- .gitignore | 3 + .../challenges/challenge-04.md | 37 ++++ .../lab/k8s-cluster.tf | 3 + .../lab/readme.md | 4 +- .../lab/scripts/az_connect_k8s.sh | 98 ++++++++++ .../scripts/bootstrap-connected-cluster.sh | 133 ++++++++++++++ .../lab/scripts/dataservice.old.sh | 78 ++++++++ .../lab/variables.tf | 13 +- .../challenge-01/az_connect_k8s.sh | 3 - .../walkthroughs/challenge-01/solution.md | 1 + .../challenge-04/01-enable-dataservice.sh | 167 +++++++++++++++--- .../challenge-04/aks-data-controller.sh.bak | 50 ------ .../walkthroughs/challenge-04/dataservice.sh | 119 +++++++++++++ .../walkthroughs/challenge-04/solution.md | 19 +- .../walkthroughs/challenge-04/spid | 1 - 15 files changed, 629 insertions(+), 100 deletions(-) create mode 100644 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh create mode 100644 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/bootstrap-connected-cluster.sh create mode 100644 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/dataservice.old.sh delete mode 100644 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/aks-data-controller.sh.bak create mode 100644 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/dataservice.sh delete mode 100644 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/spid diff --git a/.gitignore b/.gitignore index ff82ae8f..12919623 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,9 @@ MANIFEST # exclude the provider.tf file on arc-enabled k8s microhack (contains subscription id) 03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/**/provider.tf +# Exclude Arc credentials configuration files +03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/**/arc-data-credentials.yaml + # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/challenges/challenge-04.md b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/challenges/challenge-04.md index cbee56ab..b3f7b599 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/challenges/challenge-04.md +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/challenges/challenge-04.md @@ -1,12 +1,49 @@ # Challenge 4 - Deploy SQL Managed Instance to your cluster +In this challenge, you'll deploy Azure Arc-enabled data services to your K3s cluster, specifically focusing on SQL Managed Instance. This enables you to run Azure SQL Database services directly on your on-premises Kubernetes cluster while maintaining cloud-connected management, monitoring, and security capabilities. + +Azure Arc-enabled data services provide: +* **Cloud-connected database services** running on your own infrastructure +* **Centralized management** through Azure portal, Azure CLI, and Azure Resource Manager +* **Automatic updates and patching** managed through Azure Arc +* **Built-in monitoring and observability** with Log Analytics integration +* **Enterprise-grade security** with Azure Active Directory integration + +πŸ’‘*Hint*: Arc data services require a data controller that acts as the control plane for all data services in the cluster. This controller manages the lifecycle, updates, and monitoring of database instances. + +πŸ’‘*Hint*: Custom locations allow you to use your Arc-enabled Kubernetes cluster as a deployment target for Azure services, creating a seamless hybrid cloud experience. + ## Goal +* Deploy Azure Arc data controller to enable data services on your K3s cluster +* Create a SQL Managed Instance running on your on-premises Kubernetes cluster +* Configure monitoring and management capabilities for the data services ## Actions +* Install required Azure CLI extensions for Arc data services (`arcdata`) +* Enable custom locations feature on your Arc-enabled Kubernetes cluster +* Create a custom location that represents your cluster as an Azure deployment target +* Deploy the Azure Arc data controller with appropriate configuration for K3s +* Configure Log Analytics workspace integration for monitoring and telemetry +* Set up authentication credentials for monitoring dashboards (Grafana and Kibana) +* Create a SQL Managed Instance using the data controller +* Verify connectivity and management capabilities ## Success Criteria +* Azure Arc data controller is successfully deployed and running in your cluster (`kubectl get datacontrollers`) +* Custom location is created and visible in Azure portal under Azure Arc > Infrastructure > Custom locations +* Data controller appears in Azure portal under Azure Arc > Data services > Data controllers +* SQL Managed Instance is deployed and shows as "Ready" in both Kubernetes (`kubectl get sqlmi`) and Azure portal +* Monitoring dashboards (Grafana for metrics, Kibana for logs) are accessible and showing data +* You can connect to the SQL Managed Instance using Azure Data Studio or SQL Server Management Studio +* Telemetry and logs are flowing to the configured Log Analytics workspace ## Learning Resources +* [What are Azure Arc-enabled data services?](https://learn.microsoft.com/en-us/azure/azure-arc/data/overview) +* [Create Azure Arc data services cluster extension](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-extensions) +* [Create a custom location on your arc-enabled k8s](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/custom-locations#create-custom-location) +* [Create the Arc data controller](https://learn.microsoft.com/en-us/azure/azure-arc/data/create-data-controller-direct-cli) +* [Deploy SQL Managed Instance on Arc-enabled Kubernetes](https://learn.microsoft.com/en-us/azure/azure-arc/data/create-sql-managed-instance) +* [Connect to SQL Managed Instance on Arc](https://learn.microsoft.com/en-us/azure/azure-arc/data/connect-managed-instance) ## Solution - Spoilerwarning [Solution Steps](../walkthroughs/challenge-04/solution.md) diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/k8s-cluster.tf b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/k8s-cluster.tf index df7c58fa..5edcd52c 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/k8s-cluster.tf +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/k8s-cluster.tf @@ -188,6 +188,7 @@ resource "azurerm_linux_virtual_machine" "onprem_master" { os_disk { caching = "ReadWrite" storage_account_type = "Premium_LRS" + disk_size_gb = 128 # P10 managed disk for better IOPS (500) and throughput (100 MB/s) } source_image_reference { @@ -226,6 +227,7 @@ resource "azurerm_linux_virtual_machine" "onprem_worker" { os_disk { caching = "ReadWrite" storage_account_type = "Premium_LRS" + disk_size_gb = 128 # P10 managed disk for better IOPS (500) and throughput (100 MB/s) } source_image_reference { @@ -267,6 +269,7 @@ resource "azurerm_linux_virtual_machine" "onprem_worker2" { os_disk { caching = "ReadWrite" storage_account_type = "Premium_LRS" + disk_size_gb = 128 # P10 managed disk for better IOPS (500) and throughput (100 MB/s) } source_image_reference { diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/readme.md b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/readme.md index 8c404ef1..ce60b957 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/readme.md +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/readme.md @@ -128,7 +128,7 @@ terraform apply tfplan - Master node: Installs K3s server, configures networking, sets up kubeconfig - Worker nodes: Wait for master, then join the cluster as K3s agents 3. **Cluster becomes ready** in ~5-10 minutes after VM deployment -4. **SSH access** is available immediately with the mhadmin user and your password +4. **SSH access** is available immediately with your user and your password The expected output looks approximately like this depending on the start_index and end_index parameters: ```bash @@ -173,7 +173,7 @@ rg_names_onprem = { ### 1. Access your cluster ```bash # Set admin username (must match the admin_user value in fixtures.tfvars) -admin_user="" # e.g., "mhadmin" +admin_user="" # Extract user number from Azure username (e.g., LabUser-37 -> 37) azure_user=$(az account show --query user.name --output tsv) diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh new file mode 100644 index 00000000..61d40c07 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh @@ -0,0 +1,98 @@ +#!/bin/bash +# This script connects an existing K3s cluster to Azure Arc with Azure RBAC enabled +echo "Exporting environment variables" + +# Extract user number from Azure username (e.g., LabUser-37 -> 37) +azure_user=$(az account show --query user.name --output tsv) +user_number=$(echo $azure_user | sed -n 's/.*LabUser-\([0-9]\+\).*/\1/p') + +if [ -z "$user_number" ]; then + echo "Error: Could not extract user number from Azure username: $azure_user" + echo "Please make sure you're logged in as LabUser-XX" + exit 1 +fi + +echo "Detected user number: $user_number" + +echo "Setting up kubectl access to the K3s cluster..." +# Get puplic ip of master node via Azure cli according to user-number +master_pip=$(az vm list-ip-addresses --resource-group "${user_number}-k8s-onprem" --name "${user_number}-k8s-master" --query "[0].virtualMachine.network.publicIpAddresses[0].ipAddress" --output tsv) +# Create .kube directory if it doesn't exist +mkdir -p ~/.kube +# Copy the kubeconfig to standard location +# TODO: scp prompts for password - retrieve and use password from fixtures.tfvars silently +# TODO: on first ssh connection to master node, you may need to accept the host key fingerprint. Ensure this happens silently +scp mhadmin@$master_pip:/home/mhadmin/.kube/config ~/.kube/config +# replace localhost address with the public ip of master node +sed -i "s/127.0.0.1/$master_pip/g" ~/.kube/config +# Now kubectl works directly on your local client - no need to ssh into the master node anymore +kubectl get nodes + +# Set variables based on detected user number +export onprem_resource_group="${user_number}-k8s-onprem" +export arc_resource_group="${user_number}-k8s-arc" +export arc_cluster_name="${user_number}-k8s-arc-enabled" +export location="westeurope" + +echo "Using resource groups: $onprem_resource_group (onprem) and $arc_resource_group (arc)" + +# Registering Azure Arc providers +echo "Registering Azure Arc providers" +az provider register --namespace Microsoft.Kubernetes --wait +az provider register --namespace Microsoft.KubernetesConfiguration --wait +az provider register --namespace Microsoft.ExtendedLocation --wait + +az provider show -n Microsoft.Kubernetes -o table +az provider show -n Microsoft.KubernetesConfiguration -o table +az provider show -n Microsoft.ExtendedLocation -o table + +echo "Clear cached helm Azure Arc Helm Charts" +rm -rf ~/.azure/AzureArcCharts + +# Installing Azure Arc k8s CLI extensions +echo "Checking if you have up-to-date Azure Arc AZ CLI 'connectedk8s' extension..." +az extension show --name "connectedk8s" &> extension_output +if cat extension_output | grep -q "not installed"; then + az extension add --name "connectedk8s" +else + az extension update --name "connectedk8s" +fi +rm extension_output +echo "" + +echo "Checking if you have up-to-date Azure Arc AZ CLI 'k8s-configuration' extension..." +az extension show --name "k8s-configuration" &> extension_output +if cat extension_output | grep -q "not installed"; then + az extension add --name "k8s-configuration" +else + az extension update --name "k8s-configuration" +fi +rm extension_output +echo "" + +echo "Connecting the cluster to Azure Arc" +az connectedk8s connect --name $arc_cluster_name \ + --resource-group $arc_resource_group \ + --location $location \ + --infrastructure 'generic' \ + --distribution 'k3s' + +echo "Waiting for Arc connection to be established..." +sleep 30 + +echo "Verifying Arc connection status..." +az connectedk8s show --resource-group $arc_resource_group --name $arc_cluster_name --query "{name:name, connectivityStatus:connectivityStatus}" + +echo "Creating a clusterRoleBinding for the user..." +kubectl create clusterrolebinding demo-user-binding --clusterrole cluster-admin --user=$azure_user + +echo "" +echo "βœ… Azure Arc connection completed successfully!" +echo "" +echo "πŸ“‹ Summary:" +echo " - Cluster: $arc_cluster_name" +echo " - Resource Group: $arc_resource_group" +echo " - Status: Connected" +echo "" +echo "🌐 You can view the cluster in Azure Portal:" +echo " https://portal.azure.com/#@/resource/subscriptions/$(az account show --query id --output tsv)/resourceGroups/$arc_resource_group/providers/Microsoft.Kubernetes/connectedClusters/$arc_cluster_name" diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/bootstrap-connected-cluster.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/bootstrap-connected-cluster.sh new file mode 100644 index 00000000..a0ec6c01 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/bootstrap-connected-cluster.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +# Bootstrap script for complete K3s + Azure Arc deployment +# This script: +# 1. Deploys K3s cluster using Terraform +# 2. Connects the cluster to Azure Arc +# 3. Provides verification and status checks + +set -e # Exit on any error + +echo "πŸš€ Starting complete K3s + Azure Arc bootstrap deployment" +echo "==================================================" + +# Detect user information +azure_user=$(az account show --query user.name --output tsv) +user_number=$(echo $azure_user | sed -n 's/.*LabUser-\([0-9]\+\).*/\1/p') + +if [ -z "$user_number" ]; then + echo "❌ Error: Could not extract user number from Azure username: $azure_user" + echo "Please make sure you're logged in as LabUser-XX" + exit 1 +fi + +echo "βœ… Detected user number: $user_number" +echo "πŸ“§ Azure user: $azure_user" + +# Determine script locations +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LAB_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +TERRAFORM_DIR="$LAB_DIR" +ARC_CONNECT_SCRIPT="$SCRIPT_DIR/az_connect_k8s.sh" + +echo "πŸ“ Working directories:" +echo " Script dir: $SCRIPT_DIR" +echo " Lab dir: $LAB_DIR" +echo " Terraform dir: $TERRAFORM_DIR" + +# Validate prerequisites +echo "" +echo "πŸ” Validating prerequisites..." + +# Check if terraform is available +if ! command -v terraform &> /dev/null; then + echo "❌ Terraform is not installed or not in PATH" + exit 1 +fi + +# Check if terraform files exist +if [ ! -f "$TERRAFORM_DIR/main.tf" ]; then + echo "❌ Terraform files not found in $TERRAFORM_DIR" + exit 1 +fi + +# Check if Arc connection script exists +if [ ! -f "$ARC_CONNECT_SCRIPT" ]; then + echo "❌ Arc connection script not found at $ARC_CONNECT_SCRIPT" + exit 1 +fi + +echo "βœ… All prerequisites validated" + +# Change to terraform directory +cd "$TERRAFORM_DIR" + +echo "" +echo "πŸ—οΈ Phase 1: Deploying K3s cluster with Terraform" +echo "================================================" + +# Setup terraform provider with current subscription +subscription_id=$(az account show --query id --output tsv) +echo "πŸ“‹ Using subscription ID: $subscription_id" + +echo "πŸ”§ Updating provider.tf with current subscription..." +sed -i "s|subscription_id = \".*\"|subscription_id = \"$subscription_id\"|" provider.tf + +# Initialize Terraform if needed +if [ ! -d ".terraform" ]; then + echo "βš™οΈ Initializing Terraform..." + terraform init +fi + +# Plan and apply terraform +echo "πŸ“‹ Creating Terraform plan..." +terraform plan -var-file=fixtures.tfvars -out=tfplan + +echo "πŸš€ Applying Terraform deployment..." +terraform apply -parallelism=3 tfplan + +# Verify deployment +echo "βœ… Terraform deployment completed" + +# Wait for VMs to be fully ready +echo "⏳ Waiting for VMs to be fully provisioned (60 seconds)..." +sleep 60 + +echo "" +echo "πŸ”— Phase 2: Connecting cluster to Azure Arc" +echo "============================================" + +# Execute the Arc connection script +echo "πŸš€ Running Azure Arc connection script..." +bash "$ARC_CONNECT_SCRIPT" + +echo "" +echo "πŸ” Phase 3: Final verification and status" +echo "=========================================" + +# Additional verification steps +echo "πŸ“Š Cluster status:" +kubectl get nodes -o wide + +echo "" +echo "🌐 Azure Arc status:" +az connectedk8s show --resource-group "${user_number}-k8s-arc" --name "${user_number}-k8s-arc-enabled" --query "{name:name, connectivityStatus:connectivityStatus, kubernetesVersion:kubernetesVersion}" -o table + +echo "" +echo "πŸŽ‰ Bootstrap deployment completed successfully!" +echo "==============================================" +echo "" +echo "πŸ“‹ Summary:" +echo " πŸ‘€ User: $azure_user ($user_number)" +echo " πŸ—οΈ On-premises RG: ${user_number}-k8s-onprem" +echo " ☁️ Azure Arc RG: ${user_number}-k8s-arc" +echo " πŸ”— Arc Cluster: ${user_number}-k8s-arc-enabled" +echo "" +echo "🌐 View your cluster in Azure Portal:" +echo " https://portal.azure.com/#@/resource/subscriptions/$subscription_id/resourceGroups/${user_number}-k8s-arc/providers/Microsoft.Kubernetes/connectedClusters/${user_number}-k8s-arc-enabled" +echo "" +echo "πŸ’‘ Next steps:" +echo " β€’ Your K3s cluster is now running and connected to Azure Arc" +echo " β€’ You can deploy Arc-enabled data services using the dataservice.sh script" +echo " β€’ Use kubectl commands to interact with your cluster" +echo " β€’ Explore Azure Arc features in the Azure Portal" diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/dataservice.old.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/dataservice.old.sh new file mode 100644 index 00000000..2d5ef562 --- /dev/null +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/dataservice.old.sh @@ -0,0 +1,78 @@ +#!/bin/bash +export arc_resource_group='37-k8s-arc' # <-- you can change this according to your naming convention +export arc_cluster_name='aks-test-arc' +export customlocation_name='onprem-aks-cl' # <-- you can change this according to your naming convention +export extensionInstanceName="arc-data-services" # <-- you can change this according to your naming convention +export arc_data_namespace="arc-data-controller" # <-- you can change this according to your naming convention +#export storageclass="managed-csi-premium" +export arc_data_profile_name='azure-arc-aks-default-storage' + + +# add an array with the names of required extensions +required_extensions=("connectedk8s" "k8s-extension" "customlocation") + +# loop through the array and check if each extension is installed +for extension in "${required_extensions[@]}"; do + echo "Checking if you have up-to-date Azure Arc AZ CLI '$extension' extension..." + az extension show --name "$extension" &> extension_output + if cat extension_output | grep -q "not installed"; then + az extension add --name "$extension" + else + az extension update --name "$extension" + fi + rm extension_output + echo "" +done + +# check if extended location provider is registered +echo "Checking if Extended Location provider is registered..." +az provider show --namespace Microsoft.ExtendedLocation &> provider_output +if cat provider_output | grep -q "NotRegistered"; then + echo "Registering Extended Location provider..." + az provider register --namespace Microsoft.ExtendedLocation --wait + echo "Extended Location provider registered." +else + echo "Extended Location provider is already registered." +fi +rm provider_output +echo "" + +# enable custom location feature +echo "Enabling custom location feature..." +# using the service principal id +#az ad sp show --id bc313c14-388c-4e7d-a58e-70017303ee3b --query id -o tsv &> spid +#az connectedk8s enable-features -n $arc_cluster_name -g $arc_resource_group --custom-locations-oid $(cat spid) --features cluster-connect custom-locations +# using entra user +az connectedk8s enable-features -n $arc_cluster_name \ + -g $arc_resource_group \ + --features cluster-connect custom-locations + +## deploy cluster extension for arc-enabled data services +echo "Creating Azure Arc data services extension..." +az arcdata dc create --name $extensionInstanceName \ + -g $arc_resource_group \ + --custom-location $customlocation_name \ + --cluster-name $arc_cluster_name \ + --connectivity-mode direct \ + --profile-name $arc_data_profile_name \ + --auto-upload-metrics true --auto-upload-logs true \ + #--storage-class $storageclass <-- optional, uncomment if you want to override the storage class provided by the profile + + +# THE BLOCK BELOW IS NOT REQUIRED ANYMORE AS THE EXTENSION CREATION COMMAND ABOVE PROVISIONS THE CUSTOM LOCATION AUTOMATICALLY + +# # get arc-enabled kubernetes resourcemanager id +# connectedClusterId=$(az connectedk8s show -n $arc_cluster_name -g $arc_resource_group --query id -o tsv) +# # get arc-enabled data services extension id +# extensionId=$(az k8s-extension show --name $extensionInstanceName --cluster-type connectedClusters -c $arc_cluster_name -g $arc_resource_group --query id -o tsv) +# # create custom location +# echo "Creating custom location..." +# az customlocation create -n $customlocation_name -g $arc_resource_group --namespace $arc_data_namespace --host-resource-id $connectedClusterId --cluster-extension-ids $extensionId + +# validate if custom location provisioned successfully +echo "Validating if the feature is enabled..." +az customlocation show -g $arc_resource_group -n $customlocation_name + +echo "" +echo "Validating if the arc datacontroller is created..." +kubectl get datacontrollers -n onprem-aks-cl \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/variables.tf b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/variables.tf index 5407057f..60ec2594 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/variables.tf +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/variables.tf @@ -17,7 +17,7 @@ variable "arc_location" { variable "onprem_resources" { description = "The Azure Regions in which K3s cluster VMs should be provisioned" - default = ["italynorth", "francecentral", "swedencentral", "norwayeast", "germanywestcentral", "switzerlandnorth", "austriaeast", "northeurope", "polandcentral", "uksouth"] + default = ["francecentral", "germanywestcentral", "northeurope", "uksouth"] } variable "resource_group_base_name" { @@ -59,18 +59,9 @@ variable "admin_password" { variable "vm_size" { description = "The Azure VM size for K3s nodes" - default = "Standard_D2as_v5" # For arc-enabled Managed SQL Instances, AMD cores are a hard requirement at the time of creating this microhack, so scale-up if required, but stick to AMD-based SKUs + default = "Standard_D4ds_v6" # For arc-enabled Managed SQL Instances, AMD cores are a hard requirement at the time of creating this microhack, so scale-up if required, but stick to AMD-based SKUs } -# variable "client_id" { -# description = "The Client ID for the Service Principal to use for this AKS Managed Kubernetes Cluster" -# } - -# variable "client_secret" { -# description = "The Client Secret for the Service Principal to use for this AKS Managed Kubernetes Cluster" -# sensitive = true -# } - # container reguistry variables for gitops challenge variable "acr_name" { description = "The name of the Azure Container Registry" diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/az_connect_k8s.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/az_connect_k8s.sh index 1ebd5060..e38fca6c 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/az_connect_k8s.sh +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/az_connect_k8s.sh @@ -79,6 +79,3 @@ echo " - Status: Connected" echo "" echo "🌐 You can view the cluster in Azure Portal:" echo " https://portal.azure.com/#@/resource/subscriptions/$(az account show --query id --output tsv)/resourceGroups/$arc_resource_group/providers/Microsoft.Kubernetes/connectedClusters/$arc_cluster_name" -echo "" -echo "πŸ“ Note: You will need to provide an access token to view Kubernetes resources in the portal." - diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/solution.md b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/solution.md index f82ca1e0..42a50b04 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/solution.md +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-01/solution.md @@ -23,6 +23,7 @@ In case you are prompted to select a subscription, please do so. In the microhac Validate that you can see your two resource groups in the [Azure portal](https://portal.azure.com) depending on your LabUser number. I.e. if you are LabUser-37, you should see the resource groups "37-k8s-arc" and "37-k8s-onprem". Click on your onprem resource group's name (i.e. 37-k8s-onprem). There should be 3 VMs in this resource group. Make sure that all VMs are in state 'running'. For each VM, click on its name + ![img-start-vm](img/vm-start.png) To connect to your k8s cluster, we first need to merge the cluster credentials into your local ~/.kube/config file. You can use the following bash script for this: diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/01-enable-dataservice.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/01-enable-dataservice.sh index 791ea6f9..e4f79ae0 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/01-enable-dataservice.sh +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/01-enable-dataservice.sh @@ -1,15 +1,39 @@ #!/bin/bash -export arc_resource_group='mh-arc-aks' -export arc_cluster_name='mh-arc-enabled-K8s' -export customlocation_name='onprem-aks-cl' # <-- you can change this according to your naming convention -export extensionInstanceName="arc-data-services" # <-- you can change this according to your naming convention -export arc_data_namespace="arc-data-controller" # <-- you can change this according to your naming convention -#export storageclass="managed-csi-premium" -export arc_data_profile_name='azure-arc-aks-default-storage' + +# Extract user number from Azure username (e.g., LabUser-37 -> 37) +azure_user=$(az account show --query user.name --output tsv) +user_number=$(echo $azure_user | sed -n 's/.*LabUser-\([0-9]\+\).*/\1/p') + +if [ -z "$user_number" ]; then + echo "Error: Could not extract user number from Azure username: $azure_user" + echo "Please make sure you're logged in as LabUser-XX" + exit 1 +fi + +echo "Detected user number: $user_number" + +# Set variables based on detected user number +export arc_resource_group="${user_number}-k8s-arc" +export arc_cluster_name="${user_number}-k8s-arc-enabled" +export customlocation_name="${user_number}-customlocation" +export extension_instance_name="arc-data-services" +export arc_data_namespace="arc-data-controller" +# For K3s, the default storage class is typically 'local-path' +export storage_class="local-path" +export arc_data_profile_name="azure-arc-kubeadm" + +# Try to get Log Analytics workspace (optional - will be configured post-deployment if needed) +echo "Checking for Log Analytics workspace..." +law_resource_id=$(az monitor log-analytics workspace show --resource-group $arc_resource_group --workspace-name "${user_number}-law" --query 'id' -o tsv 2>/dev/null || echo "") +if [ -n "$law_resource_id" ]; then + echo "Found Log Analytics workspace: ${user_number}-law" +else + echo "Log Analytics workspace not found - monitoring can be configured later" +fi # add an array with the names of required extensions -required_extensions=("connectedk8s" "k8s-extension" "customlocation") +required_extensions=("connectedk8s" "k8s-extension" "customlocation" "arcdata") # loop through the array and check if each extension is installed for extension in "${required_extensions[@]}"; do @@ -37,37 +61,130 @@ fi rm provider_output echo "" +# Verify storage class availability for K3s +echo "Checking available storage classes..." + +# Verify the expected storage class exists +if ! kubectl get storageclass $storage_class &>/dev/null; then + echo "Warning: Storage class '$storage_class' not found. Available storage classes:" + kubectl get storageclass + echo "Please ensure a suitable storage class is available before proceeding." + echo "For K3s, you may need to use 'local-path' or create a custom storage class." +else + echo "Storage class '$storage_class' is available." +fi +echo "" + # enable custom location feature echo "Enabling custom location feature..." -# using the service principal id -#az ad sp show --id bc313c14-388c-4e7d-a58e-70017303ee3b --query id -o tsv &> spid -#az connectedk8s enable-features -n $arc_cluster_name -g $arc_resource_group --custom-locations-oid $(cat spid) --features cluster-connect custom-locations + # using entra user -az connectedk8s enable-features -n $arc_cluster_name \ - -g $arc_resource_group \ +az connectedk8s enable-features \ + --name $arc_cluster_name \ + --resource-group $arc_resource_group \ --features cluster-connect custom-locations ## deploy cluster extension for arc-enabled data services echo "Creating Azure Arc data services extension..." -az arcdata dc create --name $extensionInstanceName \ - -g $arc_resource_group \ + +# Set environment variables for Log Analytics integration if workspace is available +if [ -n "$law_resource_id" ]; then + echo "Setting up Log Analytics integration..." + export WORKSPACE_ID=$law_resource_id + # Get the workspace shared key + law_shared_key=$(az monitor log-analytics workspace get-shared-keys --resource-group $arc_resource_group --workspace-name "${user_number}-law" --query 'primarySharedKey' -o tsv 2>/dev/null || echo "") + if [ -n "$law_shared_key" ]; then + export WORKSPACE_SHARED_KEY=$law_shared_key + echo "Log Analytics workspace credentials configured." + else + echo "Could not retrieve workspace shared key - logs may need manual configuration." + fi +fi + +# Load credentials from YAML configuration file +CREDS_FILE="$(pwd)/arc-data-credentials.yaml" +echo "Loading Arc Data Services credentials from YAML..." + +# Simple YAML parsing using basic shell tools (no external dependencies) +export AZDATA_LOGSUI_USERNAME=$(grep -A1 "logs:" "$CREDS_FILE" | grep "username:" | sed 's/.*username: *//; s/[\"'\'']//g' | tr -d '\r\n' | xargs) +export AZDATA_LOGSUI_PASSWORD=$(grep -A2 "logs:" "$CREDS_FILE" | grep "password:" | sed 's/.*password: *//; s/[\"'\'']//g' | tr -d '\r\n' | xargs) +export AZDATA_METRICSUI_USERNAME=$(grep -A1 "metrics:" "$CREDS_FILE" | grep "username:" | sed 's/.*username: *//; s/[\"'\'']//g' | tr -d '\r\n' | xargs) +export AZDATA_METRICSUI_PASSWORD=$(grep -A2 "metrics:" "$CREDS_FILE" | grep "password:" | sed 's/.*password: *//; s/[\"'\'']//g' | tr -d '\r\n' | xargs) +export AZDATA_USERNAME=$(grep -A1 "fallback:" "$CREDS_FILE" | grep "username:" | sed 's/.*username: *//; s/[\"'\'']//g' | tr -d '\r\n' | xargs) +export AZDATA_PASSWORD=$(grep -A2 "fallback:" "$CREDS_FILE" | grep "password:" | sed 's/.*password: *//; s/[\"'\'']//g' | tr -d '\r\n' | xargs) + +echo "Credentials loaded from YAML file." +echo " Logs UI User: $AZDATA_LOGSUI_USERNAME" +echo " Metrics UI User: $AZDATA_METRICSUI_USERNAME" +echo " Fallback User: $AZDATA_USERNAME" + + +# Create the Arc Data Services extension instance +echo "Creating Arc Data Services data controller..." + +# Set extended timeout for Azure CLI to avoid premature timeout (default is typically 10-20 minutes) +export AZURE_CLI_CORE_TIMEOUT_IN_MINUTES=60 +echo "Azure CLI timeout set to $AZURE_CLI_CORE_TIMEOUT_IN_MINUTES minutes" + +az arcdata dc create \ + --name $extension_instance_name \ + --resource-group $arc_resource_group \ --custom-location $customlocation_name \ --cluster-name $arc_cluster_name \ --connectivity-mode direct \ --profile-name $arc_data_profile_name \ --auto-upload-metrics true --auto-upload-logs true \ - #--storage-class $storageclass <-- optional, uncomment if you want to override the storage class provided by the profile + --storage-class $storage_class \ + --infrastructure onpremises \ + --k8s-namespace $arc_data_namespace \ + --no-wait + +echo "" +echo "Data controller creation initiated. Monitoring deployment progress..." +echo "This process can take 15-30 minutes depending on cluster resources and network speed." +echo "" +# Monitor the deployment progress using Azure CLI status command +timeout_minutes=60 +timeout_seconds=$((timeout_minutes * 60)) +start_time=$(date +%s) -# THE BLOCK BELOW IS NOT REQUIRED ANYMORE AS THE EXTENSION CREATION COMMAND ABOVE PROVISIONS THE CUSTOM LOCATION AUTOMATICALLY +while true; do + current_time=$(date +%s) + elapsed=$((current_time - start_time)) + + if [ $elapsed -gt $timeout_seconds ]; then + echo "Monitoring timeout reached after $timeout_minutes minutes. Deployment may still be in progress." + echo "Check status manually with: az arcdata dc status show -n $extension_instance_name -g $arc_resource_group --query properties.k8SRaw.status" + break + fi + + # Check data controller status using Azure CLI + echo "Checking data controller status... (elapsed: $((elapsed / 60)) minutes)" + dc_status=$(az arcdata dc status show -n $extension_instance_name -g $arc_resource_group --query properties.k8SRaw.status.state -o tsv 2>/dev/null || echo "") + + if [ -n "$dc_status" ]; then + echo "Data controller status: $dc_status" + + if [ "$dc_status" = "Ready" ]; then + echo "βœ… Data controller deployment completed successfully!" + echo "" + echo "Final status:" + az arcdata dc status show -n $extension_instance_name -g $arc_resource_group --query properties.k8SRaw.status + break + elif [ "$dc_status" = "Failed" ] || [ "$dc_status" = "Error" ]; then + echo "❌ Data controller deployment failed!" + echo "Full status details:" + az arcdata dc status show -n $extension_instance_name -g $arc_resource_group --query properties.k8SRaw.status + break + fi + else + echo "Data controller not yet visible in Azure. Still initializing..." + fi + + sleep 60 # Check every minute since Azure CLI calls are more expensive +done -# # get arc-enabled kubernetes resourcemanager id -# connectedClusterId=$(az connectedk8s show -n $arc_cluster_name -g $arc_resource_group --query id -o tsv) -# # get arc-enabled data services extension id -# extensionId=$(az k8s-extension show --name $extensionInstanceName --cluster-type connectedClusters -c $arc_cluster_name -g $arc_resource_group --query id -o tsv) -# # create custom location -# echo "Creating custom location..." -# az customlocation create -n $customlocation_name -g $arc_resource_group --namespace $arc_data_namespace --host-resource-id $connectedClusterId --cluster-extension-ids $extensionId # validate if custom location provisioned successfully echo "Validating if the feature is enabled..." @@ -75,4 +192,4 @@ az customlocation show -g $arc_resource_group -n $customlocation_name echo "" echo "Validating if the arc datacontroller is created..." -kubectl get datacontrollers -n onprem-aks-cl \ No newline at end of file +kubectl get datacontrollers -n $arc_data_namespace \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/aks-data-controller.sh.bak b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/aks-data-controller.sh.bak deleted file mode 100644 index 5863ca37..00000000 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/aks-data-controller.sh.bak +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -## variables for Azure subscription, resource group, cluster name, location, extension, and namespace. -export arc_resource_group='mh-arc-aks' -export arc_cluster_name='mh-arc-enabled-K8s' -export custom_location_name='mh-custom-data-location' - -## variables for Log Analytics -# todo: Where do these come from? Fetch automatically -#export log_analytics_workspace_guid='e52c449f-e086-472c-b8dc-b51fd51f2650' -#export log_analytics_key='QhivRkp/e6rSzZSFBE6jENJvLlBSpjoKRWo5S/MZEVTTb7/UCi8H764pxdUxOOLG8hCwCxUDi8XEWPeiJ0LZcw==' - -## variables for logs and metrics dashboard credentials -export AZDATA_LOGSUI_USERNAME='adm-simon' -export AZDATA_LOGSUI_PASSWORD='#Start12345!' -export AZDATA_METRICSUI_USERNAME='adm-simon' -export AZDATA_METRICSUI_PASSWORD='#Start12345!' - -## variables for SQL Managed Instance -export sql_mi_name='mh-sql-mi-arc' - -subscription_id=$(az account show --query id --output tsv) - -# todo: use variables and clean up -#export workspaceId=$(az resource show --resource-group mh-arc-cloud --name mh-arc-law --resource-type "Microsoft.OperationalInsights/workspaces" --query properties.customerId -o tsv) - -echo "Creating Azure Arc Data Controller 'arc-data-controller' (including custom location '$custom_location_name')..." -az arcdata dc create \ ---name arc-data-controller \ --g $arc_resource_group \ ---connectivity-mode indirect \ ---profile-name azure-arc-aks-premium-storage \ ---storage-class managed-csi-premium \ ---location westeurope \ ---use-k8s \ ---k8s-namespace arc-data-controller \ ---infrastructure azure -#--custom-location $custom_location_name \ -#--cluster-name $arc_cluster_name \ -#--auto-upload-metrics true \ -#--auto-upload-logs true \ - -az arcdata dc status show -n arc-data-controller -g $arc_resource_group --query properties.k8SRaw.status.state -o tsv - -echo "Createing SQL MI..." -az sql mi-arc create \ ---name $sql_mi_name \ ---resource-group $arc_resource_group \ --–subscription $subscription_id \ ---custom-location $custom_location_name \ No newline at end of file diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/dataservice.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/dataservice.sh new file mode 100644 index 00000000..bab29e9b --- /dev/null +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/dataservice.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +# Extract user number from Azure username (e.g., LabUser-37 -> 37) +azure_user=$(az account show --query user.name --output tsv) +user_number=$(echo $azure_user | sed -n 's/.*LabUser-\([0-9]\+\).*/\1/p') + +connected_cluster_name="$user_number-k8s-arc-enabled" +resource_group="$user_number-k8s-arc" +custom_location="$user_number-onprem" +target_namespace="arc-data-services" + +# Try to get Log Analytics workspace (optional - will be configured post-deployment if needed) +echo "Checking for Log Analytics workspace..." +law_resource_id=$(az monitor log-analytics workspace show --resource-group $resource_group --workspace-name "${user_number}-law" --query 'id' -o tsv) +law_shared_key=$(az monitor log-analytics workspace get-shared-keys --resource-group $resource_group --workspace-name "${user_number}-law" --query primarySharedKey -o tsv) + +# Making extension install dynamic +# az config set extension.use_dynamic_install=yes_without_prompt + +# required_extensions=("connectedk8s" "k8s-extension" "customlocation" "arcdata" "k8s-configuration") + +# # loop through the array and check if each extension is installed +# for extension in "${required_extensions[@]}"; do +# echo "Checking if you have up-to-date Azure Arc AZ CLI '$extension' extension..." +# az extension show --name "$extension" &> extension_output +# if cat extension_output | grep -q "not installed"; then +# az extension add --name "$extension" +# else +# az extension update --name "$extension" +# fi +# rm extension_output +# echo "" +# done + +# echo "Registering required resource providers..." +# az provider register --namespace Microsoft.Kubernetes --wait +# az provider register --namespace Microsoft.KubernetesConfiguration --wait +# az provider register --namespace Microsoft.ExtendedLocation --wait +# az provider register --namespace Microsoft.AzureArcData --wait +# az provider register --namespace Microsoft.RedHatOpenShift --wait + +# echo Installing Azure Arc-enabled data services extension +# az k8s-extension create \ +# --name arc-data-services \ +# --extension-type microsoft.arcdataservices \ +# --cluster-type connectedClusters \ +# --cluster-name $connected_cluster_name \ +# --resource-group $resource_group \ +# --auto-upgrade false \ +# --scope cluster \ +# --version 1.18.0 \ +# --release-namespace $target_namespace \ +# --config Microsoft.CustomLocation.ServiceAccount=sa-arc-bootstrapper +# # TODO: check whether service account can be found if it's in another namespace +# #TODO: check whether this version is the latest stable version + +# echo "Waiting for extension to be ready..." +# sleep 15 + +# echo "Getting connected cluster and extension IDs..." +# connected_cluster_id=$(az connectedk8s show --name $connected_cluster_name --resource-group $resource_group --query id -o tsv) +# extension_id=$(az k8s-extension show --name arc-data-services --cluster-type connectedClusters --cluster-name $connected_cluster_name --resource-group $resource_group --query id -o tsv) + +echo "Enabling custom location feature..." +# az customlocation create \ +# --name $custom_location \ +# --resource-group $resource_group \ +# --namespace $target_namespace \ +# --host-resource-id $connected_cluster_id \ +# --cluster-extension-ids $extension_id +az connectedk8s enable-features + --name $connected_cluster_name \ + --resource-group $resource_group \ + --features cluster-connect custom-locations + +# echo "Waiting for custom location to be ready..." +# sleep 15 +# custom_location_id=$(az customlocation show --name $custom_location --resource-group $resource_group --query id -o tsv) + +echo "Setting up credentials for data controller..." +#TODO: Parse from arc-data-credentials.yaml instead of hardcoding credentials +export AZDATA_USERNAME="data_user" +export AZDATA_PASSWORD="ComplexSecurePassword123!" +# export AZDATA_LOGSUI_USERNAME="logs_user" +# export AZDATA_LOGSUI_PASSWORD="ComplexSecurePassword123!" +# export AZDATA_METRICSUI_USERNAME="metrics_user" +# export AZDATA_METRICSUI_PASSWORD="ComplexSecurePassword123!" + +export AZDATA_LAW_WORKSPACE_ID="$law_resource_id" +export AZDATA_LAW_SHARED_KEY="$law_shared_key" + +echo "Creating Arc Data Controller..." +az arcdata dc create \ + --name arc-data-controller \ + --resource-group $resource_group \ + --cluster-name $connected_cluster_name \ + --connectivity-mode direct \ + --profile-name azure-arc-kubeadm \ + --auto-upload-metrics true \ + --auto-upload-logs true \ + --custom-location $custom_location \ + --storage-class local-path +# az arcdata dc create \ +# --name arc-data-controller \ +# --resource-group $resource_group \ +# --custom-location $custom_location \ +# --cluster-name $connected_cluster_name \ +# --connectivity-mode direct \ +# --profile-name azure-arc-kubeadm \ +# --auto-upload-metrics true --auto-upload-logs true \ +# --storage-class local-path \ +# --infrastructure onpremises \ +# --k8s-namespace $target_namespace + +# az deployment group create \ +# --resource-group $resource_group \ +# --name "$user_number-dc-depl" \ +# --template-file "./dataController.json" \ +# --parameters "./dataController.parameters.json" diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/solution.md b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/solution.md index 24deaf30..03356da1 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/solution.md +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/solution.md @@ -3,16 +3,19 @@ [Back to challenge](../../challenges/challenge-04.md) - [Next Challenge's Solution](../challenge-05/solution.md) ## prerequisites -- [client tools](https://learn.microsoft.com/en-us/azure/azure-arc/data/install-client-tools) -- Provider reqistration -```shell +* You have an arc-connected k8s cluster/finisched challenge 01. +* [client tools](https://learn.microsoft.com/en-us/azure/azure-arc/data/install-client-tools) +* A Log Analytics workspace (law). (If you used the terraform to deploy the microhack environment, each participant already has a law in his arc resource group.) +* You must be logged in to az cli (az login) +* kubectl defaults to your arc-enabled k8s cluster +* Provider reqistration +```bash az provider register --namespace Microsoft.AzureArcData ``` - -## Read about the prerequisites and concepts -1. Create Azure Arc [data services cluster extension](https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/conceptual-extensions) -2. Create a [custom location] on your arc-enabled k8s(https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/custom-locations#create-custom-location) -3. create the Arc data controller +* yq yammel query tool +```bash +sudo snap install yq +``` ## Create arc data services controller Open the file '01-enable-dataservice.sh' in your editor. diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/spid b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/spid deleted file mode 100644 index fd98b1b6..00000000 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/walkthroughs/challenge-04/spid +++ /dev/null @@ -1 +0,0 @@ -369672cd-bcf5-47e6-b815-8356e94abd60 From 9f7359f14df4caab79e5f55e383317a62ab5c55e Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 31 Dec 2025 15:18:30 +0000 Subject: [PATCH 2/3] Implement silent password retrieval from fixtures.tfvars for SCP in az_connect_k8s.sh Co-authored-by: skiddder <56867233+skiddder@users.noreply.github.com> --- .../lab/scripts/az_connect_k8s.sh | 38 +++++++++++++++++-- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh index 61d40c07..61b6b71a 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh @@ -17,12 +17,42 @@ echo "Detected user number: $user_number" echo "Setting up kubectl access to the K3s cluster..." # Get puplic ip of master node via Azure cli according to user-number master_pip=$(az vm list-ip-addresses --resource-group "${user_number}-k8s-onprem" --name "${user_number}-k8s-master" --query "[0].virtualMachine.network.publicIpAddresses[0].ipAddress" --output tsv) + +# Retrieve admin_user and admin_password from fixtures.tfvars +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LAB_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +FIXTURES_FILE="$LAB_DIR/fixtures.tfvars" + +if [ ! -f "$FIXTURES_FILE" ]; then + echo "Error: fixtures.tfvars not found at $FIXTURES_FILE" + echo "Please ensure fixtures.tfvars exists in the lab directory" + exit 1 +fi + +# Extract admin_user and admin_password from fixtures.tfvars +admin_user=$(grep -E '^\s*admin_user\s*=' "$FIXTURES_FILE" | sed -E 's/.*=\s*"(.*)".*/\1/') +admin_password=$(grep -E '^\s*admin_password\s*=' "$FIXTURES_FILE" | sed -E 's/.*=\s*"(.*)".*/\1/') + +if [ -z "$admin_user" ] || [ -z "$admin_password" ]; then + echo "Error: Could not extract admin_user or admin_password from fixtures.tfvars" + echo "Please ensure fixtures.tfvars contains admin_user and admin_password variables" + exit 1 +fi + +echo "Using admin user: $admin_user" + # Create .kube directory if it doesn't exist mkdir -p ~/.kube -# Copy the kubeconfig to standard location -# TODO: scp prompts for password - retrieve and use password from fixtures.tfvars silently -# TODO: on first ssh connection to master node, you may need to accept the host key fingerprint. Ensure this happens silently -scp mhadmin@$master_pip:/home/mhadmin/.kube/config ~/.kube/config + +# Copy the kubeconfig to standard location using sshpass for silent authentication +# and SSH options to accept host keys automatically +if ! command -v sshpass &> /dev/null; then + echo "Error: sshpass is not installed. Installing sshpass..." + sudo apt-get update -qq && sudo apt-get install -y -qq sshpass +fi + +sshpass -p "$admin_password" scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ + ${admin_user}@${master_pip}:/home/${admin_user}/.kube/config ~/.kube/config # replace localhost address with the public ip of master node sed -i "s/127.0.0.1/$master_pip/g" ~/.kube/config # Now kubectl works directly on your local client - no need to ssh into the master node anymore From 5e418966558f761529474dbb8dac554ea798481d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 31 Dec 2025 15:20:19 +0000 Subject: [PATCH 3/3] Fix typo: change 'puplic' to 'public' in comment Co-authored-by: skiddder <56867233+skiddder@users.noreply.github.com> --- .../lab/scripts/az_connect_k8s.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh index 61b6b71a..d4c31967 100644 --- a/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh +++ b/03-Azure/01-03-Infrastructure/03_Hybrid_Azure_Arc_Kubernetes/lab/scripts/az_connect_k8s.sh @@ -15,7 +15,7 @@ fi echo "Detected user number: $user_number" echo "Setting up kubectl access to the K3s cluster..." -# Get puplic ip of master node via Azure cli according to user-number +# Get public ip of master node via Azure cli according to user-number master_pip=$(az vm list-ip-addresses --resource-group "${user_number}-k8s-onprem" --name "${user_number}-k8s-master" --query "[0].virtualMachine.network.publicIpAddresses[0].ipAddress" --output tsv) # Retrieve admin_user and admin_password from fixtures.tfvars