diff --git a/.ci/python-keras-scoring.yml b/.ci/python-keras-scoring.yml
index eec5c85..1936ae6 100644
--- a/.ci/python-keras-scoring.yml
+++ b/.ci/python-keras-scoring.yml
@@ -103,4 +103,4 @@ jobs:
fieldMappings: |
Description=Branch: Branch $(Build.SourceBranch) failed to build. Go to Boards>WorkItems and tag the failure type.
displayName: 'Create work item on failure'
- condition: failed()
\ No newline at end of file
+ condition: failed()
diff --git a/.ci/python-ml-scoring.yml b/.ci/python-ml-scoring.yml
index 55a6003..2c7697b 100644
--- a/.ci/python-ml-scoring.yml
+++ b/.ci/python-ml-scoring.yml
@@ -68,4 +68,4 @@ jobs:
fieldMappings: |
Description=Branch: Branch $(Build.SourceBranch) failed to build. Go to Boards>WorkItems and tag the failure type.
displayName: 'Create work item on failure'
- condition: failed()
\ No newline at end of file
+ condition: failed()
diff --git a/.ci/python-ml-training.yml b/.ci/python-ml-training.yml
index 74e4a10..938bc9a 100644
--- a/.ci/python-ml-training.yml
+++ b/.ci/python-ml-training.yml
@@ -115,4 +115,4 @@ jobs:
fieldMappings: |
Description=Branch: Branch $(Build.SourceBranch) failed to build. Go to Boards>WorkItems and tag the failure type.
displayName: 'Create work item on failure'
- condition: failed()
\ No newline at end of file
+ condition: failed()
diff --git a/.ci/realtime-serving-dl-dev.yml b/.ci/realtime-serving-dl-dev.yml
index 406a0f6..71a6544 100644
--- a/.ci/realtime-serving-dl-dev.yml
+++ b/.ci/realtime-serving-dl-dev.yml
@@ -1,4 +1,3 @@
-
# Starter pipeline
# Start with a minimal pipeline that you can customize to build and deploy your code.
# Add steps that build, run tests, deploy, and more:
diff --git a/.ci/scripts/SetResource.ps1 b/.ci/scripts/SetResource.ps1
index 74d74bd..e115a15 100644
--- a/.ci/scripts/SetResource.ps1
+++ b/.ci/scripts/SetResource.ps1
@@ -79,4 +79,4 @@ $clusterResources = Get-AzResource -ResourceType "Microsoft.ContainerService/man
foreach($cluster in $clusterResources)
{
Update-GroupResources -resGroup $cluster.Properties.nodeResourceGroup -tags $projectTags
-}
+}
diff --git a/.ci/steps/DLAKSDeployAMLJob.yml b/.ci/steps/DLAKSDeployAMLJob.yml
index 27798f4..45275fc 100644
--- a/.ci/steps/DLAKSDeployAMLJob.yml
+++ b/.ci/steps/DLAKSDeployAMLJob.yml
@@ -1,5 +1,3 @@
-
-
parameters:
azureSubscription: ''
azure_subscription: ''
diff --git a/.ci/steps/MLAKSDeployAMLJob.yml b/.ci/steps/MLAKSDeployAMLJob.yml
index f60b8a2..499c5b0 100644
--- a/.ci/steps/MLAKSDeployAMLJob.yml
+++ b/.ci/steps/MLAKSDeployAMLJob.yml
@@ -1,5 +1,3 @@
-
-
parameters:
azureSubscription: ''
azure_subscription: ''
@@ -93,4 +91,3 @@ steps:
conda: ${{parameters.conda}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
doCleanup: ${{parameters.doCleanup}}
-
diff --git a/.ci/steps/MLBatchDeployAMLJob.yml b/.ci/steps/MLBatchDeployAMLJob.yml
index 4954761..7740dcf 100644
--- a/.ci/steps/MLBatchDeployAMLJob.yml
+++ b/.ci/steps/MLBatchDeployAMLJob.yml
@@ -1,4 +1,3 @@
-
parameters:
azureSubscription: ''
azure_subscription: ''
diff --git a/.ci/steps/RecoPySparkRTS.yml b/.ci/steps/RecoPySparkRTS.yml
index 2e4aa10..99ed88f 100644
--- a/.ci/steps/RecoPySparkRTS.yml
+++ b/.ci/steps/RecoPySparkRTS.yml
@@ -1,4 +1,3 @@
-
parameters:
azureSubscription: ''
azure_subscription: ''
diff --git a/.ci/steps/az-ml-realtime-score.yml b/.ci/steps/az-ml-realtime-score.yml
index 9fde99b..7cf25cd 100644
--- a/.ci/steps/az-ml-realtime-score.yml
+++ b/.ci/steps/az-ml-realtime-score.yml
@@ -1,5 +1,3 @@
-
-
parameters:
azureSubscription: ''
azure_subscription: ''
diff --git a/.ci/steps/azpapermill.yml b/.ci/steps/azpapermill.yml
index 48a3557..b450ea7 100644
--- a/.ci/steps/azpapermill.yml
+++ b/.ci/steps/azpapermill.yml
@@ -1,5 +1,3 @@
-
-
parameters:
notebook: 01_DataPrep.ipynb # defaults for any parameters that aren't specified
location: "x"
diff --git a/.ci/steps/azpapermill_iterator.yml b/.ci/steps/azpapermill_iterator.yml
index ccba492..0b78759 100644
--- a/.ci/steps/azpapermill_iterator.yml
+++ b/.ci/steps/azpapermill_iterator.yml
@@ -1,5 +1,3 @@
-
-
parameters:
notebooks: 01_DataPrep.ipynb # defaults for any parameters that aren't specified
location: "x"
diff --git a/.ci/steps/azure_r.yml b/.ci/steps/azure_r.yml
index f2675ef..f456352 100644
--- a/.ci/steps/azure_r.yml
+++ b/.ci/steps/azure_r.yml
@@ -1,4 +1,3 @@
-
parameters:
notebook: # defaults for any parameters that aren't specified
location: "."
diff --git a/.ci/steps/bash_r.yml b/.ci/steps/bash_r.yml
index 79f5ab0..a388b85 100644
--- a/.ci/steps/bash_r.yml
+++ b/.ci/steps/bash_r.yml
@@ -1,4 +1,3 @@
-
parameters:
notebook: # defaults for any parameters that aren't specified
location: "."
diff --git a/.ci/steps/cleanuptask.yml b/.ci/steps/cleanuptask.yml
index 4efc7bb..5f4d39e 100644
--- a/.ci/steps/cleanuptask.yml
+++ b/.ci/steps/cleanuptask.yml
@@ -22,4 +22,3 @@ steps:
echo Project resource group did not exist
fi
echo Done Cleanup
-
diff --git a/.ci/steps/config_conda.yml b/.ci/steps/config_conda.yml
index eb186ae..7aa15a3 100644
--- a/.ci/steps/config_conda.yml
+++ b/.ci/steps/config_conda.yml
@@ -1,4 +1,3 @@
-
parameters:
conda_location: .
azureSubscription: #
@@ -105,4 +104,3 @@ steps:
pip install -U "azureml-core<0.1.5" "azureml-contrib-services<0.1.5" "azureml-pipeline<0.1.5" \
--extra-index-url https://azuremlsdktestpypi.azureedge.net/sdk-release/master/588E708E0DF342C4A80BD954289657CF
-
diff --git a/.ci/steps/deploy_rts.yml b/.ci/steps/deploy_rts.yml
index df62962..5d27669 100644
--- a/.ci/steps/deploy_rts.yml
+++ b/.ci/steps/deploy_rts.yml
@@ -108,4 +108,3 @@ steps:
location: ${{parameters.location}}
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
-
diff --git a/.ci/steps/deploy_steps.yml b/.ci/steps/deploy_steps.yml
index 57be9d5..21b7ad3 100644
--- a/.ci/steps/deploy_steps.yml
+++ b/.ci/steps/deploy_steps.yml
@@ -73,4 +73,3 @@ steps:
ScriptArguments: '-resourceGroupName ''${{parameters.azureresourcegroup}}'' -tagId ''deployment-id'' -deploymentId ''${{parameters.deploymentguidtag}}'''
azurePowerShellVersion: 'LatestVersion'
displayName: 'Tag All Resources'
-
diff --git a/.ci/steps/deploy_steps_v2.yml b/.ci/steps/deploy_steps_v2.yml
index 7dfe404..0987152 100644
--- a/.ci/steps/deploy_steps_v2.yml
+++ b/.ci/steps/deploy_steps_v2.yml
@@ -85,4 +85,3 @@ steps:
ScriptArguments: '-resourceGroupName ''${{parameters.azureresourcegroup}}'' -tagId ''deployment-id'' -deploymentId ''${{parameters.deploymentguidtag}}'''
azurePowerShellVersion: 'LatestVersion'
displayName: 'Tag All Resources'
-
diff --git a/.ci/steps/docker_clean_step.yml b/.ci/steps/docker_clean_step.yml
index d5c4940..586a010 100644
--- a/.ci/steps/docker_clean_step.yml
+++ b/.ci/steps/docker_clean_step.yml
@@ -1,4 +1,3 @@
-
steps:
- script: |
docker stop $(docker ps -a -q)
diff --git a/.ci/steps/papermill.yml b/.ci/steps/papermill.yml
index 5f55f71..6ce82bc 100644
--- a/.ci/steps/papermill.yml
+++ b/.ci/steps/papermill.yml
@@ -1,5 +1,3 @@
-
-
parameters:
notebook: 01_DataPrep.ipynb # defaults for any parameters that aren't specified
location: "{{cookiecutter.project_name}}"
diff --git a/.ci/steps/reco_conda_clean_win.yml b/.ci/steps/reco_conda_clean_win.yml
index a9f60f0..c136939 100644
--- a/.ci/steps/reco_conda_clean_win.yml
+++ b/.ci/steps/reco_conda_clean_win.yml
@@ -17,4 +17,4 @@ steps:
del /q /S %LOCALAPPDATA%\Temp\*
for /d %%i in (%LOCALAPPDATA%\Temp\*) do @rmdir /s /q "%%i"
displayName: 'Remove Temp Files'
- condition: succeededOrFailed()
+ condition: succeededOrFailed()
diff --git a/.ci/steps/shorten_string.yml b/.ci/steps/shorten_string.yml
index 8bdd17d..c5b3cc8 100644
--- a/.ci/steps/shorten_string.yml
+++ b/.ci/steps/shorten_string.yml
@@ -1,4 +1,3 @@
-
parameters:
Input_String: ""
Output_Variable: ""
diff --git a/.docs/python_scoring.md b/.docs/python_scoring.md
index 6c29cde..9f85450 100644
--- a/.docs/python_scoring.md
+++ b/.docs/python_scoring.md
@@ -4,4 +4,4 @@
-When deploying ML models in Python there are two core questions. The first is will it be real time and whether the model is a deep learning model. For deploying deep learning models that require real time we recommend Azure Kubernetes Services (AKS) with GPUs. For a tutorial on how to do that look at [AKS w/GPU](https://github.com/Microsoft/AKSDeploymentTutorialAML). For deploying deep learning models for batch scoring we recommend using AzureML pipelines with GPUs, for a tutorial on how to do that look [AzureML Pipelines w/GPU](https://github.com/Azure/Batch-Scoring-Deep-Learning-Models-With-AML). For non deep learning models we recommend you use the same services but without GPUs. For a tutorial on deploying classical ML models for real time scoring look [AKS](https://github.com/Microsoft/MLAKSDeployAML) and for batch scoring [AzureML Pipelines](https://github.com/Microsoft/AMLBatchScoringPipeline)
\ No newline at end of file
+When deploying ML models in Python there are two core questions. The first is will it be real time and whether the model is a deep learning model. For deploying deep learning models that require real time we recommend Azure Kubernetes Services (AKS) with GPUs. For a tutorial on how to do that look at [AKS w/GPU](https://github.com/Microsoft/AKSDeploymentTutorialAML). For deploying deep learning models for batch scoring we recommend using AzureML pipelines with GPUs, for a tutorial on how to do that look [AzureML Pipelines w/GPU](https://github.com/Azure/Batch-Scoring-Deep-Learning-Models-With-AML). For non deep learning models we recommend you use the same services but without GPUs. For a tutorial on deploying classical ML models for real time scoring look [AKS](https://github.com/Microsoft/MLAKSDeployAML) and for batch scoring [AzureML Pipelines](https://github.com/Microsoft/AMLBatchScoringPipeline)
diff --git a/.docs/python_training.md b/.docs/python_training.md
index cc934f7..0e253e3 100644
--- a/.docs/python_training.md
+++ b/.docs/python_training.md
@@ -7,4 +7,4 @@
There are many options for training ML models in Python on Azure. The most straight forward way is to train your model on a [DSVM](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/). You can either do this in local model straight on the VM or through attaching it in AzureML as a compute target. If you want to have AzureML manage the compute for you and scale it up and down based on whether jobs are waiting in the queue then you should AzureML Compute.
Now if you are going to run multiple jobs for hyperparameter tuning or other purposes then we would recommend using [Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters), [Azure automated ML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml) or AzureML Compute dependent on your requirements.
-For a tutorial on how to use Hyperdrive go [here](https://github.com/Microsoft/MLHyperparameterTuning).
\ No newline at end of file
+For a tutorial on how to use Hyperdrive go [here](https://github.com/Microsoft/MLHyperparameterTuning).
diff --git a/.github/ISSUE_TEMPLATE/scenario_request.md b/.github/ISSUE_TEMPLATE/scenario_request.md
index 29f0a98..e359ef8 100644
--- a/.github/ISSUE_TEMPLATE/scenario_request.md
+++ b/.github/ISSUE_TEMPLATE/scenario_request.md
@@ -13,4 +13,4 @@ assignees: ''
### Reasons for scenario
-### Other Comments
\ No newline at end of file
+### Other Comments
diff --git a/.images/decision_python_scoring.png b/.images/decision_python_scoring.png
index 91534d4..2612566 100644
Binary files a/.images/decision_python_scoring.png and b/.images/decision_python_scoring.png differ
diff --git a/.images/demo.svg b/.images/demo.svg
index 0fe4bf7..b808937 100644
--- a/.images/demo.svg
+++ b/.images/demo.svg
@@ -1 +1 @@
-ai@azure:~$ ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitectures.git Cloning into 'AIReferenceArchitectures'... remote: Enumerating objects: 28, done. remote: Counting objects: 100% (28/28), done. remote: Compressing objects: 100% (23/23), done. remote: Total 28 (delta 12), reused 13 (delta 4), pack-reused 0 Unpacking objects: 100% (28/28), done. Checking connectivity... Checking connectivity... done. Submodule 'DeployDeepModelKubernetes' (https://github.com/Microsoft/AKSDeploymentTutorialAML) registered for path 'DeployDeepM odelKubernetes' Submodule 'DeployDeepModelPipelines' (https://github.com/Azure/Batch-Scoring-Deep-Learning-Models-With-AML) registered for pat h 'DeployDeepModelPipelines' Submodule 'DeployMLModelKubernetes' (https://github.com/Microsoft/MLAKSDeployAML) registered for path 'DeployMLModelKubernetes ' Submodule 'DeployMLModelPipelines' (https://github.com/Microsoft/AMLBatchScoringPipeline) registered for path 'DeployMLModelPi pelines' Submodule 'TrainMLModelHyperdrive' (https://github.com/Microsoft/MLHyperparameterTuning) registered for path 'TrainMLModelHype rdrive' Cloning into 'DeployDeepModelKubernetes'... remote: Enumerating objects: 110, done. remote: Counting objects: 100% (110/110), done. remote: Compressing objects: 100% (67/67), done. remote: Total 727 (delta 65), reused 81 (delta 43), pack-reused 617 Receiving objects: 100% (727/727), 3.32 MiB | 0 bytes/s, done. Resolving deltas: 100% (490/490), done. Submodule path 'DeployDeepModelKubernetes': checked out '72e4804adaaa5047739c06a1ed1d442ca714af66' Cloning into 'DeployDeepModelPipelines'... remote: Enumerating objects: 3, done. remote: Counting objects: 100% (3/3), done. remote: Compressing objects: 100% (3/3), done. remote: Total 178 (delta 0), reused 3 (delta 0), pack-reused 175 Receiving objects: 100% (178/178), 6.04 MiB | 0 bytes/s, done. Resolving deltas: 100% (94/94), done. Submodule path 'DeployDeepModelPipelines': checked out 'c8624daf1670ddd396830774fee9e41b490ba2de' Cloning into 'DeployMLModelKubernetes'... remote: Enumerating objects: 305, done. remote: Total 305 (delta 0), reused 0 (delta 0), pack-reused 305 Receiving objects: 100% (305/305), 1.15 MiB | 0 bytes/s, done. Resolving deltas: 100% (188/188), done. Submodule path 'DeployMLModelKubernetes': checked out 'afb172bc65b48de1a139586730581639d2fdede1' Cloning into 'DeployMLModelPipelines'... remote: Enumerating objects: 183, done. remote: Total 183 (delta 0), reused 0 (delta 0), pack-reused 183 Receiving objects: 100% (183/183), 275.05 KiB | 0 bytes/s, done. Resolving deltas: 100% (108/108), done. Submodule path 'DeployMLModelPipelines': checked out '25b73b829c0b2593df24799f8213954e9f010b75' Cloning into 'TrainMLModelHyperdrive'... remote: Enumerating objects: 95, done. remote: Counting objects: 100% (95/95), done. remote: Compressing objects: 100% (73/73), done. remote: Total 772 (delta 48), reused 52 (delta 22), pack-reused 677 Receiving objects: 100% (772/772), 762.10 KiB | 0 bytes/s, done. Resolving deltas: 100% (441/441), done. Submodule path 'TrainMLModelHyperdrive': checked out '99e243209923f437e1143c7e5005c4f8ea7d6895' ai@azure:~$ gi ai@azure:~$ git clone -- ai@azure:~$ git clone --recu ai@azure:~$ git clone --recurs ai@azure:~$ git clone --recurse-subm ai@azure:~$ git clone --recurse-submodules ai@azure:~$ git clone --recurse-submodules h ai@azure:~$ git clone --recurse-submodules htt ai@azure:~$ git clone --recurse-submodules https ai@azure:~$ git clone --recurse-submodules https://gi ai@azure:~$ git clone --recurse-submodules https://gith ai@azure:~$ git clone --recurse-submodules https://github.co ai@azure:~$ git clone --recurse-submodules https://github.com ai@azure:~$ git clone --recurse-submodules https://github.com/Mic ai@azure:~$ git clone --recurse-submodules https://github.com/Microso ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/ ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIR ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIRe ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIRef ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIRefer ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenc ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceAr ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArch ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchite ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitect ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitectu ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitecture ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitectures.gi remote: Counting objects: 10% (3/28) remote: Counting objects: 25% (7/28) remote: Counting objects: 60% (17/28) remote: Counting objects: 92% (26/28) remote: Compressing objects: 8% (2/23) remote: Compressing objects: 21% (5/23) remote: Compressing objects: 30% (7/23) remote: Compressing objects: 34% (8/23) remote: Compressing objects: 60% (14/23) Unpacking objects: 3% (1/28) Unpacking objects: 7% (2/28) Unpacking objects: 10% (3/28) Unpacking objects: 14% (4/28) Unpacking objects: 17% (5/28) Unpacking objects: 21% (6/28) Unpacking objects: 25% (7/28) Unpacking objects: 28% (8/28) Unpacking objects: 32% (9/28) Unpacking objects: 35% (10/28) Unpacking objects: 39% (11/28) Unpacking objects: 42% (12/28) Unpacking objects: 46% (13/28) Unpacking objects: 50% (14/28) Unpacking objects: 53% (15/28) Unpacking objects: 57% (16/28) Unpacking objects: 60% (17/28) Unpacking objects: 64% (18/28) Unpacking objects: 67% (19/28) Unpacking objects: 71% (20/28) Unpacking objects: 75% (21/28) Unpacking objects: 78% (22/28) Unpacking objects: 82% (23/28) Unpacking objects: 85% (24/28) Unpacking objects: 89% (25/28) Unpacking objects: 92% (26/28) Unpacking objects: 96% (27/28) remote: Counting objects: 9% (10/110) remote: Counting objects: 30% (33/110) remote: Counting objects: 51% (57/110) remote: Counting objects: 63% (70/110) remote: Counting objects: 71% (79/110) remote: Counting objects: 73% (81/110) remote: Counting objects: 75% (83/110) remote: Counting objects: 85% (94/110) remote: Compressing objects: 2% (2/67) remote: Compressing objects: 13% (9/67) remote: Compressing objects: 14% (10/67) remote: Compressing objects: 16% (11/67) remote: Compressing objects: 17% (12/67) remote: Compressing objects: 19% (13/67) remote: Compressing objects: 20% (14/67) remote: Compressing objects: 23% (16/67) remote: Compressing objects: 25% (17/67) remote: Compressing objects: 26% (18/67) remote: Compressing objects: 28% (19/67) remote: Compressing objects: 29% (20/67) remote: Compressing objects: 32% (22/67) remote: Compressing objects: 34% (23/67) remote: Compressing objects: 35% (24/67) remote: Compressing objects: 37% (25/67) remote: Compressing objects: 38% (26/67) remote: Compressing objects: 40% (27/67) remote: Compressing objects: 41% (28/67) remote: Compressing objects: 49% (33/67) remote: Compressing objects: 53% (36/67) remote: Compressing objects: 56% (38/67) remote: Compressing objects: 61% (41/67) remote: Compressing objects: 64% (43/67) remote: Compressing objects: 68% (46/67) remote: Compressing objects: 71% (48/67) remote: Compressing objects: 74% (50/67) remote: Compressing objects: 77% (52/67) remote: Compressing objects: 80% (54/67) remote: Compressing objects: 88% (59/67) remote: Compressing objects: 94% (63/67) remote: Compressing objects: 95% (64/67) remote: Compressing objects: 98% (66/67) remote: Compressing objects: 100% (67/67) Receiving objects: 0% (1/727) Receiving objects: 2% (15/727) Receiving objects: 3% (22/727) Receiving objects: 4% (30/727) Receiving objects: 5% (37/727) Receiving objects: 6% (44/727) Receiving objects: 7% (51/727) Receiving objects: 8% (59/727) Receiving objects: 9% (66/727) Receiving objects: 10% (73/727) Receiving objects: 11% (80/727) Receiving objects: 12% (88/727) Receiving objects: 13% (95/727) Receiving objects: 14% (102/727) Receiving objects: 15% (110/727) Receiving objects: 16% (117/727) Receiving objects: 17% (124/727) Receiving objects: 18% (131/727) Receiving objects: 19% (139/727) Receiving objects: 20% (146/727) Receiving objects: 21% (153/727) Receiving objects: 22% (160/727) Receiving objects: 23% (168/727) Receiving objects: 24% (175/727) Receiving objects: 25% (182/727) Receiving objects: 26% (190/727) Receiving objects: 27% (197/727) Receiving objects: 28% (204/727) Receiving objects: 29% (211/727) Receiving objects: 31% (226/727) Receiving objects: 33% (240/727) Receiving objects: 35% (255/727) Receiving objects: 38% (277/727) Receiving objects: 40% (291/727) Receiving objects: 41% (299/727) Receiving objects: 43% (313/727) Receiving objects: 45% (328/727) Receiving objects: 46% (335/727) Receiving objects: 47% (342/727) Receiving objects: 48% (349/727) Receiving objects: 50% (364/727) Receiving objects: 51% (371/727) Receiving objects: 54% (393/727) Receiving objects: 55% (400/727) Receiving objects: 58% (422/727) Receiving objects: 59% (429/727) Receiving objects: 62% (451/727) Receiving objects: 63% (459/727) Receiving objects: 64% (466/727) Receiving objects: 65% (473/727) Receiving objects: 66% (480/727) Receiving objects: 67% (488/727) Receiving objects: 68% (495/727) Receiving objects: 69% (502/727) Receiving objects: 70% (509/727) Receiving objects: 71% (517/727) Receiving objects: 72% (524/727) Receiving objects: 73% (531/727) Receiving objects: 74% (538/727) Receiving objects: 75% (546/727) Receiving objects: 76% (553/727) Receiving objects: 78% (568/727) Receiving objects: 79% (575/727) Receiving objects: 80% (582/727) Receiving objects: 82% (597/727) Receiving objects: 84% (611/727) Receiving objects: 85% (618/727) Receiving objects: 86% (626/727) Receiving objects: 87% (633/727) Receiving objects: 88% (640/727) Receiving objects: 89% (648/727) Receiving objects: 90% (655/727) Receiving objects: 91% (662/727) Receiving objects: 92% (669/727) Receiving objects: 94% (684/727) Receiving objects: 95% (691/727) Receiving objects: 96% (698/727) Receiving objects: 97% (706/727) Receiving objects: 98% (713/727) Receiving objects: 99% (720/727) Resolving deltas: 0% (0/490) Resolving deltas: 1% (5/490) Resolving deltas: 2% (12/490) Resolving deltas: 12% (59/490) Resolving deltas: 24% (120/490) Resolving deltas: 28% (138/490) Resolving deltas: 33% (166/490) Resolving deltas: 34% (168/490) Resolving deltas: 35% (175/490) Resolving deltas: 37% (184/490) Resolving deltas: 38% (190/490) Resolving deltas: 39% (194/490) Resolving deltas: 43% (215/490) Resolving deltas: 45% (223/490) Resolving deltas: 51% (254/490) Resolving deltas: 52% (258/490) Resolving deltas: 60% (298/490) Resolving deltas: 63% (310/490) Resolving deltas: 65% (320/490) Resolving deltas: 66% (324/490) Resolving deltas: 69% (339/490) Resolving deltas: 72% (354/490) Resolving deltas: 77% (379/490) Resolving deltas: 78% (384/490) Resolving deltas: 81% (398/490) Resolving deltas: 82% (403/490) Resolving deltas: 86% (424/490) Resolving deltas: 88% (432/490) Resolving deltas: 89% (438/490) Resolving deltas: 90% (445/490) Resolving deltas: 92% (451/490) Resolving deltas: 93% (459/490) Resolving deltas: 94% (461/490) Resolving deltas: 95% (466/490) Resolving deltas: 99% (487/490) Resolving deltas: 100% (490/490) Receiving objects: 0% (1/178) Receiving objects: 4% (8/178) Receiving objects: 7% (13/178) Receiving objects: 8% (15/178) Receiving objects: 11% (20/178) Receiving objects: 14% (25/178) Receiving objects: 15% (27/178) Receiving objects: 17% (31/178) Receiving objects: 19% (34/178) Receiving objects: 20% (36/178) Receiving objects: 22% (40/178) Receiving objects: 28% (50/178) Receiving objects: 29% (52/178) Receiving objects: 32% (57/178) Receiving objects: 33% (59/178) Receiving objects: 34% (61/178) Receiving objects: 35% (63/178) Receiving objects: 39% (70/178) Receiving objects: 40% (72/178) Receiving objects: 41% (73/178) Receiving objects: 42% (75/178) Receiving objects: 44% (79/178) Receiving objects: 46% (82/178) Receiving objects: 47% (84/178) Receiving objects: 48% (86/178) Receiving objects: 50% (89/178) Receiving objects: 54% (97/178) Receiving objects: 63% (113/178) Receiving objects: 76% (136/178) Receiving objects: 83% (148/178) Receiving objects: 89% (159/178) Receiving objects: 92% (164/178) Receiving objects: 93% (166/178) Receiving objects: 97% (173/178) Receiving objects: 99% (177/178) Resolving deltas: 0% (0/94) Resolving deltas: 2% (2/94) Resolving deltas: 4% (4/94) Resolving deltas: 34% (32/94) Resolving deltas: 40% (38/94) Resolving deltas: 43% (41/94) Resolving deltas: 51% (48/94) Resolving deltas: 60% (57/94) Resolving deltas: 67% (63/94) Resolving deltas: 71% (67/94) Resolving deltas: 72% (68/94) Resolving deltas: 80% (76/94) Resolving deltas: 85% (80/94) Resolving deltas: 86% (81/94) Resolving deltas: 88% (83/94) Resolving deltas: 100% (94/94) Receiving objects: 1% (4/305) Receiving objects: 6% (19/305) Receiving objects: 7% (22/305) Receiving objects: 8% (25/305) Receiving objects: 9% (28/305) Receiving objects: 12% (37/305) Receiving objects: 15% (46/305) Receiving objects: 17% (52/305) Receiving objects: 18% (55/305) Receiving objects: 20% (61/305) Receiving objects: 21% (65/305) Receiving objects: 22% (68/305) Receiving objects: 24% (74/305) Receiving objects: 26% (80/305) Receiving objects: 27% (83/305) Receiving objects: 29% (89/305) Receiving objects: 30% (92/305) Receiving objects: 31% (95/305) Receiving objects: 32% (98/305) Receiving objects: 33% (101/305) Receiving objects: 34% (104/305) Receiving objects: 35% (107/305) Receiving objects: 37% (113/305) Receiving objects: 38% (116/305) Receiving objects: 39% (119/305) Receiving objects: 40% (122/305) Receiving objects: 42% (129/305) Receiving objects: 43% (132/305) Receiving objects: 44% (135/305) Receiving objects: 45% (138/305) Receiving objects: 47% (144/305) Receiving objects: 50% (153/305) Receiving objects: 57% (174/305) Receiving objects: 61% (187/305) Receiving objects: 62% (190/305) Receiving objects: 65% (199/305) Receiving objects: 68% (208/305) Receiving objects: 71% (217/305) Receiving objects: 72% (220/305) Receiving objects: 76% (232/305) Receiving objects: 77% (235/305) Receiving objects: 84% (257/305) Receiving objects: 87% (266/305) Receiving objects: 92% (281/305) Receiving objects: 94% (287/305) Receiving objects: 96% (293/305) Receiving objects: 98% (299/305) Resolving deltas: 0% (0/188) Resolving deltas: 1% (3/188) Resolving deltas: 3% (6/188) Resolving deltas: 22% (43/188) Resolving deltas: 29% (55/188) Resolving deltas: 52% (99/188) Resolving deltas: 57% (108/188) Resolving deltas: 62% (117/188) Resolving deltas: 63% (119/188) Resolving deltas: 70% (133/188) Resolving deltas: 71% (135/188) Resolving deltas: 79% (149/188) Resolving deltas: 85% (160/188) Resolving deltas: 90% (171/188) Resolving deltas: 93% (176/188) Resolving deltas: 95% (180/188) Resolving deltas: 96% (181/188) Resolving deltas: 98% (186/188) Resolving deltas: 100% (188/188) Receiving objects: 2% (4/183) Receiving objects: 12% (22/183) Receiving objects: 15% (28/183) Receiving objects: 18% (33/183) Receiving objects: 20% (37/183) Receiving objects: 22% (41/183) Receiving objects: 24% (44/183) Receiving objects: 25% (46/183) Receiving objects: 26% (48/183) Receiving objects: 27% (50/183) Receiving objects: 28% (52/183) Receiving objects: 30% (55/183) Receiving objects: 31% (57/183) Receiving objects: 33% (61/183) Receiving objects: 34% (63/183) Receiving objects: 36% (66/183) Receiving objects: 37% (68/183) Receiving objects: 43% (79/183) Receiving objects: 47% (87/183) Receiving objects: 59% (108/183) Receiving objects: 71% (130/183) Receiving objects: 77% (141/183) Receiving objects: 80% (147/183) Receiving objects: 87% (160/183) Receiving objects: 88% (162/183) Receiving objects: 90% (165/183) Receiving objects: 92% (169/183) Receiving objects: 93% (171/183) Receiving objects: 94% (173/183) Receiving objects: 96% (176/183) Receiving objects: 99% (182/183) Resolving deltas: 0% (0/108) Resolving deltas: 3% (4/108) Resolving deltas: 18% (20/108) Resolving deltas: 39% (43/108) Resolving deltas: 53% (58/108) Resolving deltas: 61% (66/108) Resolving deltas: 66% (72/108) Resolving deltas: 71% (77/108) Resolving deltas: 76% (83/108) Resolving deltas: 88% (96/108) Resolving deltas: 93% (101/108) Resolving deltas: 98% (106/108) Resolving deltas: 99% (107/108) Resolving deltas: 100% (108/108) remote: Counting objects: 8% (8/95) remote: Counting objects: 20% (19/95) remote: Counting objects: 38% (37/95) remote: Counting objects: 55% (53/95) remote: Counting objects: 80% (76/95) remote: Counting objects: 93% (89/95) remote: Compressing objects: 2% (2/73) remote: Compressing objects: 4% (3/73) remote: Compressing objects: 10% (8/73) remote: Compressing objects: 13% (10/73) remote: Compressing objects: 16% (12/73) remote: Compressing objects: 17% (13/73) remote: Compressing objects: 19% (14/73) remote: Compressing objects: 21% (16/73) remote: Compressing objects: 23% (17/73) remote: Compressing objects: 24% (18/73) remote: Compressing objects: 26% (19/73) remote: Compressing objects: 27% (20/73) remote: Compressing objects: 28% (21/73) remote: Compressing objects: 36% (27/73) remote: Compressing objects: 63% (46/73) remote: Compressing objects: 71% (52/73) remote: Compressing objects: 72% (53/73) remote: Compressing objects: 76% (56/73) remote: Compressing objects: 80% (59/73) remote: Compressing objects: 82% (60/73) remote: Compressing objects: 83% (61/73) remote: Compressing objects: 87% (64/73) remote: Compressing objects: 90% (66/73) remote: Compressing objects: 93% (68/73) remote: Compressing objects: 95% (70/73) Receiving objects: 0% (1/772) Receiving objects: 1% (8/772) Receiving objects: 2% (16/772) Receiving objects: 3% (24/772) Receiving objects: 4% (31/772) Receiving objects: 5% (39/772) Receiving objects: 6% (47/772) Receiving objects: 7% (55/772) Receiving objects: 8% (62/772) Receiving objects: 9% (70/772) Receiving objects: 10% (78/772) Receiving objects: 11% (85/772) Receiving objects: 12% (93/772) Receiving objects: 13% (101/772) Receiving objects: 14% (109/772) Receiving objects: 15% (116/772) Receiving objects: 16% (124/772) Receiving objects: 17% (132/772) Receiving objects: 18% (139/772) Receiving objects: 19% (147/772) Receiving objects: 20% (155/772) Receiving objects: 21% (163/772) Receiving objects: 22% (170/772) Receiving objects: 23% (178/772) Receiving objects: 24% (186/772) Receiving objects: 25% (193/772) Receiving objects: 26% (201/772) Receiving objects: 27% (209/772) Receiving objects: 28% (217/772) Receiving objects: 29% (224/772) Receiving objects: 30% (232/772) Receiving objects: 31% (240/772) Receiving objects: 32% (248/772) Receiving objects: 33% (255/772) Receiving objects: 34% (263/772) Receiving objects: 35% (271/772) Receiving objects: 36% (278/772) Receiving objects: 39% (302/772) Receiving objects: 40% (309/772) Receiving objects: 43% (332/772) Receiving objects: 45% (348/772) Receiving objects: 48% (371/772) Receiving objects: 49% (379/772) Receiving objects: 50% (386/772) Receiving objects: 51% (394/772) Receiving objects: 52% (402/772) Receiving objects: 53% (410/772) Receiving objects: 54% (417/772) Receiving objects: 56% (433/772) Receiving objects: 57% (441/772) Receiving objects: 58% (448/772) Receiving objects: 59% (456/772) Receiving objects: 62% (479/772) Receiving objects: 64% (495/772) Receiving objects: 67% (518/772) Receiving objects: 68% (525/772) Receiving objects: 69% (533/772) Receiving objects: 71% (549/772) Receiving objects: 72% (556/772) Receiving objects: 74% (572/772) Receiving objects: 75% (579/772) Receiving objects: 79% (610/772) Receiving objects: 80% (618/772) Receiving objects: 81% (626/772) Receiving objects: 82% (634/772) Receiving objects: 83% (641/772) Receiving objects: 84% (649/772) Receiving objects: 85% (657/772) Receiving objects: 86% (664/772) Receiving objects: 87% (672/772) Receiving objects: 88% (680/772) Receiving objects: 89% (688/772) Receiving objects: 90% (695/772) Receiving objects: 91% (703/772) Receiving objects: 92% (711/772) Receiving objects: 93% (718/772) Receiving objects: 94% (726/772) Receiving objects: 95% (734/772) Receiving objects: 97% (749/772) Receiving objects: 100% (772/772) Resolving deltas: 0% (0/441) Resolving deltas: 1% (5/441) Resolving deltas: 6% (30/441) Resolving deltas: 18% (83/441) Resolving deltas: 19% (85/441) Resolving deltas: 24% (109/441) Resolving deltas: 28% (125/441) Resolving deltas: 30% (133/441) Resolving deltas: 32% (143/441) Resolving deltas: 35% (158/441) Resolving deltas: 38% (171/441) Resolving deltas: 39% (173/441) Resolving deltas: 41% (181/441) Resolving deltas: 65% (289/441) Resolving deltas: 66% (293/441) Resolving deltas: 69% (308/441) Resolving deltas: 71% (314/441) Resolving deltas: 73% (322/441) Resolving deltas: 75% (332/441) Resolving deltas: 76% (337/441) Resolving deltas: 78% (348/441) Resolving deltas: 83% (367/441) Resolving deltas: 84% (372/441) Resolving deltas: 87% (387/441) Resolving deltas: 91% (403/441) Resolving deltas: 95% (423/441) Resolving deltas: 96% (426/441) Resolving deltas: 97% (430/441) Resolving deltas: 99% (438/441) Resolving deltas: 100% (441/441) ai@azure:~$ exit
\ No newline at end of file
+ai@azure:~$ ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitectures.git Cloning into 'AIReferenceArchitectures'... remote: Enumerating objects: 28, done. remote: Counting objects: 100% (28/28), done. remote: Compressing objects: 100% (23/23), done. remote: Total 28 (delta 12), reused 13 (delta 4), pack-reused 0 Unpacking objects: 100% (28/28), done. Checking connectivity... Checking connectivity... done. Submodule 'DeployDeepModelKubernetes' (https://github.com/Microsoft/AKSDeploymentTutorialAML) registered for path 'DeployDeepM odelKubernetes' Submodule 'DeployDeepModelPipelines' (https://github.com/Azure/Batch-Scoring-Deep-Learning-Models-With-AML) registered for pat h 'DeployDeepModelPipelines' Submodule 'DeployMLModelKubernetes' (https://github.com/Microsoft/MLAKSDeployAML) registered for path 'DeployMLModelKubernetes ' Submodule 'DeployMLModelPipelines' (https://github.com/Microsoft/AMLBatchScoringPipeline) registered for path 'DeployMLModelPi pelines' Submodule 'TrainMLModelHyperdrive' (https://github.com/Microsoft/MLHyperparameterTuning) registered for path 'TrainMLModelHype rdrive' Cloning into 'DeployDeepModelKubernetes'... remote: Enumerating objects: 110, done. remote: Counting objects: 100% (110/110), done. remote: Compressing objects: 100% (67/67), done. remote: Total 727 (delta 65), reused 81 (delta 43), pack-reused 617 Receiving objects: 100% (727/727), 3.32 MiB | 0 bytes/s, done. Resolving deltas: 100% (490/490), done. Submodule path 'DeployDeepModelKubernetes': checked out '72e4804adaaa5047739c06a1ed1d442ca714af66' Cloning into 'DeployDeepModelPipelines'... remote: Enumerating objects: 3, done. remote: Counting objects: 100% (3/3), done. remote: Compressing objects: 100% (3/3), done. remote: Total 178 (delta 0), reused 3 (delta 0), pack-reused 175 Receiving objects: 100% (178/178), 6.04 MiB | 0 bytes/s, done. Resolving deltas: 100% (94/94), done. Submodule path 'DeployDeepModelPipelines': checked out 'c8624daf1670ddd396830774fee9e41b490ba2de' Cloning into 'DeployMLModelKubernetes'... remote: Enumerating objects: 305, done. remote: Total 305 (delta 0), reused 0 (delta 0), pack-reused 305 Receiving objects: 100% (305/305), 1.15 MiB | 0 bytes/s, done. Resolving deltas: 100% (188/188), done. Submodule path 'DeployMLModelKubernetes': checked out 'afb172bc65b48de1a139586730581639d2fdede1' Cloning into 'DeployMLModelPipelines'... remote: Enumerating objects: 183, done. remote: Total 183 (delta 0), reused 0 (delta 0), pack-reused 183 Receiving objects: 100% (183/183), 275.05 KiB | 0 bytes/s, done. Resolving deltas: 100% (108/108), done. Submodule path 'DeployMLModelPipelines': checked out '25b73b829c0b2593df24799f8213954e9f010b75' Cloning into 'TrainMLModelHyperdrive'... remote: Enumerating objects: 95, done. remote: Counting objects: 100% (95/95), done. remote: Compressing objects: 100% (73/73), done. remote: Total 772 (delta 48), reused 52 (delta 22), pack-reused 677 Receiving objects: 100% (772/772), 762.10 KiB | 0 bytes/s, done. Resolving deltas: 100% (441/441), done. Submodule path 'TrainMLModelHyperdrive': checked out '99e243209923f437e1143c7e5005c4f8ea7d6895' ai@azure:~$ gi ai@azure:~$ git clone -- ai@azure:~$ git clone --recu ai@azure:~$ git clone --recurs ai@azure:~$ git clone --recurse-subm ai@azure:~$ git clone --recurse-submodules ai@azure:~$ git clone --recurse-submodules h ai@azure:~$ git clone --recurse-submodules htt ai@azure:~$ git clone --recurse-submodules https ai@azure:~$ git clone --recurse-submodules https://gi ai@azure:~$ git clone --recurse-submodules https://gith ai@azure:~$ git clone --recurse-submodules https://github.co ai@azure:~$ git clone --recurse-submodules https://github.com ai@azure:~$ git clone --recurse-submodules https://github.com/Mic ai@azure:~$ git clone --recurse-submodules https://github.com/Microso ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/ ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIR ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIRe ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIRef ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIRefer ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenc ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceAr ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArch ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchite ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitect ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitectu ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitecture ai@azure:~$ git clone --recurse-submodules https://github.com/Microsoft/AIReferenceArchitectures.gi remote: Counting objects: 10% (3/28) remote: Counting objects: 25% (7/28) remote: Counting objects: 60% (17/28) remote: Counting objects: 92% (26/28) remote: Compressing objects: 8% (2/23) remote: Compressing objects: 21% (5/23) remote: Compressing objects: 30% (7/23) remote: Compressing objects: 34% (8/23) remote: Compressing objects: 60% (14/23) Unpacking objects: 3% (1/28) Unpacking objects: 7% (2/28) Unpacking objects: 10% (3/28) Unpacking objects: 14% (4/28) Unpacking objects: 17% (5/28) Unpacking objects: 21% (6/28) Unpacking objects: 25% (7/28) Unpacking objects: 28% (8/28) Unpacking objects: 32% (9/28) Unpacking objects: 35% (10/28) Unpacking objects: 39% (11/28) Unpacking objects: 42% (12/28) Unpacking objects: 46% (13/28) Unpacking objects: 50% (14/28) Unpacking objects: 53% (15/28) Unpacking objects: 57% (16/28) Unpacking objects: 60% (17/28) Unpacking objects: 64% (18/28) Unpacking objects: 67% (19/28) Unpacking objects: 71% (20/28) Unpacking objects: 75% (21/28) Unpacking objects: 78% (22/28) Unpacking objects: 82% (23/28) Unpacking objects: 85% (24/28) Unpacking objects: 89% (25/28) Unpacking objects: 92% (26/28) Unpacking objects: 96% (27/28) remote: Counting objects: 9% (10/110) remote: Counting objects: 30% (33/110) remote: Counting objects: 51% (57/110) remote: Counting objects: 63% (70/110) remote: Counting objects: 71% (79/110) remote: Counting objects: 73% (81/110) remote: Counting objects: 75% (83/110) remote: Counting objects: 85% (94/110) remote: Compressing objects: 2% (2/67) remote: Compressing objects: 13% (9/67) remote: Compressing objects: 14% (10/67) remote: Compressing objects: 16% (11/67) remote: Compressing objects: 17% (12/67) remote: Compressing objects: 19% (13/67) remote: Compressing objects: 20% (14/67) remote: Compressing objects: 23% (16/67) remote: Compressing objects: 25% (17/67) remote: Compressing objects: 26% (18/67) remote: Compressing objects: 28% (19/67) remote: Compressing objects: 29% (20/67) remote: Compressing objects: 32% (22/67) remote: Compressing objects: 34% (23/67) remote: Compressing objects: 35% (24/67) remote: Compressing objects: 37% (25/67) remote: Compressing objects: 38% (26/67) remote: Compressing objects: 40% (27/67) remote: Compressing objects: 41% (28/67) remote: Compressing objects: 49% (33/67) remote: Compressing objects: 53% (36/67) remote: Compressing objects: 56% (38/67) remote: Compressing objects: 61% (41/67) remote: Compressing objects: 64% (43/67) remote: Compressing objects: 68% (46/67) remote: Compressing objects: 71% (48/67) remote: Compressing objects: 74% (50/67) remote: Compressing objects: 77% (52/67) remote: Compressing objects: 80% (54/67) remote: Compressing objects: 88% (59/67) remote: Compressing objects: 94% (63/67) remote: Compressing objects: 95% (64/67) remote: Compressing objects: 98% (66/67) remote: Compressing objects: 100% (67/67) Receiving objects: 0% (1/727) Receiving objects: 2% (15/727) Receiving objects: 3% (22/727) Receiving objects: 4% (30/727) Receiving objects: 5% (37/727) Receiving objects: 6% (44/727) Receiving objects: 7% (51/727) Receiving objects: 8% (59/727) Receiving objects: 9% (66/727) Receiving objects: 10% (73/727) Receiving objects: 11% (80/727) Receiving objects: 12% (88/727) Receiving objects: 13% (95/727) Receiving objects: 14% (102/727) Receiving objects: 15% (110/727) Receiving objects: 16% (117/727) Receiving objects: 17% (124/727) Receiving objects: 18% (131/727) Receiving objects: 19% (139/727) Receiving objects: 20% (146/727) Receiving objects: 21% (153/727) Receiving objects: 22% (160/727) Receiving objects: 23% (168/727) Receiving objects: 24% (175/727) Receiving objects: 25% (182/727) Receiving objects: 26% (190/727) Receiving objects: 27% (197/727) Receiving objects: 28% (204/727) Receiving objects: 29% (211/727) Receiving objects: 31% (226/727) Receiving objects: 33% (240/727) Receiving objects: 35% (255/727) Receiving objects: 38% (277/727) Receiving objects: 40% (291/727) Receiving objects: 41% (299/727) Receiving objects: 43% (313/727) Receiving objects: 45% (328/727) Receiving objects: 46% (335/727) Receiving objects: 47% (342/727) Receiving objects: 48% (349/727) Receiving objects: 50% (364/727) Receiving objects: 51% (371/727) Receiving objects: 54% (393/727) Receiving objects: 55% (400/727) Receiving objects: 58% (422/727) Receiving objects: 59% (429/727) Receiving objects: 62% (451/727) Receiving objects: 63% (459/727) Receiving objects: 64% (466/727) Receiving objects: 65% (473/727) Receiving objects: 66% (480/727) Receiving objects: 67% (488/727) Receiving objects: 68% (495/727) Receiving objects: 69% (502/727) Receiving objects: 70% (509/727) Receiving objects: 71% (517/727) Receiving objects: 72% (524/727) Receiving objects: 73% (531/727) Receiving objects: 74% (538/727) Receiving objects: 75% (546/727) Receiving objects: 76% (553/727) Receiving objects: 78% (568/727) Receiving objects: 79% (575/727) Receiving objects: 80% (582/727) Receiving objects: 82% (597/727) Receiving objects: 84% (611/727) Receiving objects: 85% (618/727) Receiving objects: 86% (626/727) Receiving objects: 87% (633/727) Receiving objects: 88% (640/727) Receiving objects: 89% (648/727) Receiving objects: 90% (655/727) Receiving objects: 91% (662/727) Receiving objects: 92% (669/727) Receiving objects: 94% (684/727) Receiving objects: 95% (691/727) Receiving objects: 96% (698/727) Receiving objects: 97% (706/727) Receiving objects: 98% (713/727) Receiving objects: 99% (720/727) Resolving deltas: 0% (0/490) Resolving deltas: 1% (5/490) Resolving deltas: 2% (12/490) Resolving deltas: 12% (59/490) Resolving deltas: 24% (120/490) Resolving deltas: 28% (138/490) Resolving deltas: 33% (166/490) Resolving deltas: 34% (168/490) Resolving deltas: 35% (175/490) Resolving deltas: 37% (184/490) Resolving deltas: 38% (190/490) Resolving deltas: 39% (194/490) Resolving deltas: 43% (215/490) Resolving deltas: 45% (223/490) Resolving deltas: 51% (254/490) Resolving deltas: 52% (258/490) Resolving deltas: 60% (298/490) Resolving deltas: 63% (310/490) Resolving deltas: 65% (320/490) Resolving deltas: 66% (324/490) Resolving deltas: 69% (339/490) Resolving deltas: 72% (354/490) Resolving deltas: 77% (379/490) Resolving deltas: 78% (384/490) Resolving deltas: 81% (398/490) Resolving deltas: 82% (403/490) Resolving deltas: 86% (424/490) Resolving deltas: 88% (432/490) Resolving deltas: 89% (438/490) Resolving deltas: 90% (445/490) Resolving deltas: 92% (451/490) Resolving deltas: 93% (459/490) Resolving deltas: 94% (461/490) Resolving deltas: 95% (466/490) Resolving deltas: 99% (487/490) Resolving deltas: 100% (490/490) Receiving objects: 0% (1/178) Receiving objects: 4% (8/178) Receiving objects: 7% (13/178) Receiving objects: 8% (15/178) Receiving objects: 11% (20/178) Receiving objects: 14% (25/178) Receiving objects: 15% (27/178) Receiving objects: 17% (31/178) Receiving objects: 19% (34/178) Receiving objects: 20% (36/178) Receiving objects: 22% (40/178) Receiving objects: 28% (50/178) Receiving objects: 29% (52/178) Receiving objects: 32% (57/178) Receiving objects: 33% (59/178) Receiving objects: 34% (61/178) Receiving objects: 35% (63/178) Receiving objects: 39% (70/178) Receiving objects: 40% (72/178) Receiving objects: 41% (73/178) Receiving objects: 42% (75/178) Receiving objects: 44% (79/178) Receiving objects: 46% (82/178) Receiving objects: 47% (84/178) Receiving objects: 48% (86/178) Receiving objects: 50% (89/178) Receiving objects: 54% (97/178) Receiving objects: 63% (113/178) Receiving objects: 76% (136/178) Receiving objects: 83% (148/178) Receiving objects: 89% (159/178) Receiving objects: 92% (164/178) Receiving objects: 93% (166/178) Receiving objects: 97% (173/178) Receiving objects: 99% (177/178) Resolving deltas: 0% (0/94) Resolving deltas: 2% (2/94) Resolving deltas: 4% (4/94) Resolving deltas: 34% (32/94) Resolving deltas: 40% (38/94) Resolving deltas: 43% (41/94) Resolving deltas: 51% (48/94) Resolving deltas: 60% (57/94) Resolving deltas: 67% (63/94) Resolving deltas: 71% (67/94) Resolving deltas: 72% (68/94) Resolving deltas: 80% (76/94) Resolving deltas: 85% (80/94) Resolving deltas: 86% (81/94) Resolving deltas: 88% (83/94) Resolving deltas: 100% (94/94) Receiving objects: 1% (4/305) Receiving objects: 6% (19/305) Receiving objects: 7% (22/305) Receiving objects: 8% (25/305) Receiving objects: 9% (28/305) Receiving objects: 12% (37/305) Receiving objects: 15% (46/305) Receiving objects: 17% (52/305) Receiving objects: 18% (55/305) Receiving objects: 20% (61/305) Receiving objects: 21% (65/305) Receiving objects: 22% (68/305) Receiving objects: 24% (74/305) Receiving objects: 26% (80/305) Receiving objects: 27% (83/305) Receiving objects: 29% (89/305) Receiving objects: 30% (92/305) Receiving objects: 31% (95/305) Receiving objects: 32% (98/305) Receiving objects: 33% (101/305) Receiving objects: 34% (104/305) Receiving objects: 35% (107/305) Receiving objects: 37% (113/305) Receiving objects: 38% (116/305) Receiving objects: 39% (119/305) Receiving objects: 40% (122/305) Receiving objects: 42% (129/305) Receiving objects: 43% (132/305) Receiving objects: 44% (135/305) Receiving objects: 45% (138/305) Receiving objects: 47% (144/305) Receiving objects: 50% (153/305) Receiving objects: 57% (174/305) Receiving objects: 61% (187/305) Receiving objects: 62% (190/305) Receiving objects: 65% (199/305) Receiving objects: 68% (208/305) Receiving objects: 71% (217/305) Receiving objects: 72% (220/305) Receiving objects: 76% (232/305) Receiving objects: 77% (235/305) Receiving objects: 84% (257/305) Receiving objects: 87% (266/305) Receiving objects: 92% (281/305) Receiving objects: 94% (287/305) Receiving objects: 96% (293/305) Receiving objects: 98% (299/305) Resolving deltas: 0% (0/188) Resolving deltas: 1% (3/188) Resolving deltas: 3% (6/188) Resolving deltas: 22% (43/188) Resolving deltas: 29% (55/188) Resolving deltas: 52% (99/188) Resolving deltas: 57% (108/188) Resolving deltas: 62% (117/188) Resolving deltas: 63% (119/188) Resolving deltas: 70% (133/188) Resolving deltas: 71% (135/188) Resolving deltas: 79% (149/188) Resolving deltas: 85% (160/188) Resolving deltas: 90% (171/188) Resolving deltas: 93% (176/188) Resolving deltas: 95% (180/188) Resolving deltas: 96% (181/188) Resolving deltas: 98% (186/188) Resolving deltas: 100% (188/188) Receiving objects: 2% (4/183) Receiving objects: 12% (22/183) Receiving objects: 15% (28/183) Receiving objects: 18% (33/183) Receiving objects: 20% (37/183) Receiving objects: 22% (41/183) Receiving objects: 24% (44/183) Receiving objects: 25% (46/183) Receiving objects: 26% (48/183) Receiving objects: 27% (50/183) Receiving objects: 28% (52/183) Receiving objects: 30% (55/183) Receiving objects: 31% (57/183) Receiving objects: 33% (61/183) Receiving objects: 34% (63/183) Receiving objects: 36% (66/183) Receiving objects: 37% (68/183) Receiving objects: 43% (79/183) Receiving objects: 47% (87/183) Receiving objects: 59% (108/183) Receiving objects: 71% (130/183) Receiving objects: 77% (141/183) Receiving objects: 80% (147/183) Receiving objects: 87% (160/183) Receiving objects: 88% (162/183) Receiving objects: 90% (165/183) Receiving objects: 92% (169/183) Receiving objects: 93% (171/183) Receiving objects: 94% (173/183) Receiving objects: 96% (176/183) Receiving objects: 99% (182/183) Resolving deltas: 0% (0/108) Resolving deltas: 3% (4/108) Resolving deltas: 18% (20/108) Resolving deltas: 39% (43/108) Resolving deltas: 53% (58/108) Resolving deltas: 61% (66/108) Resolving deltas: 66% (72/108) Resolving deltas: 71% (77/108) Resolving deltas: 76% (83/108) Resolving deltas: 88% (96/108) Resolving deltas: 93% (101/108) Resolving deltas: 98% (106/108) Resolving deltas: 99% (107/108) Resolving deltas: 100% (108/108) remote: Counting objects: 8% (8/95) remote: Counting objects: 20% (19/95) remote: Counting objects: 38% (37/95) remote: Counting objects: 55% (53/95) remote: Counting objects: 80% (76/95) remote: Counting objects: 93% (89/95) remote: Compressing objects: 2% (2/73) remote: Compressing objects: 4% (3/73) remote: Compressing objects: 10% (8/73) remote: Compressing objects: 13% (10/73) remote: Compressing objects: 16% (12/73) remote: Compressing objects: 17% (13/73) remote: Compressing objects: 19% (14/73) remote: Compressing objects: 21% (16/73) remote: Compressing objects: 23% (17/73) remote: Compressing objects: 24% (18/73) remote: Compressing objects: 26% (19/73) remote: Compressing objects: 27% (20/73) remote: Compressing objects: 28% (21/73) remote: Compressing objects: 36% (27/73) remote: Compressing objects: 63% (46/73) remote: Compressing objects: 71% (52/73) remote: Compressing objects: 72% (53/73) remote: Compressing objects: 76% (56/73) remote: Compressing objects: 80% (59/73) remote: Compressing objects: 82% (60/73) remote: Compressing objects: 83% (61/73) remote: Compressing objects: 87% (64/73) remote: Compressing objects: 90% (66/73) remote: Compressing objects: 93% (68/73) remote: Compressing objects: 95% (70/73) Receiving objects: 0% (1/772) Receiving objects: 1% (8/772) Receiving objects: 2% (16/772) Receiving objects: 3% (24/772) Receiving objects: 4% (31/772) Receiving objects: 5% (39/772) Receiving objects: 6% (47/772) Receiving objects: 7% (55/772) Receiving objects: 8% (62/772) Receiving objects: 9% (70/772) Receiving objects: 10% (78/772) Receiving objects: 11% (85/772) Receiving objects: 12% (93/772) Receiving objects: 13% (101/772) Receiving objects: 14% (109/772) Receiving objects: 15% (116/772) Receiving objects: 16% (124/772) Receiving objects: 17% (132/772) Receiving objects: 18% (139/772) Receiving objects: 19% (147/772) Receiving objects: 20% (155/772) Receiving objects: 21% (163/772) Receiving objects: 22% (170/772) Receiving objects: 23% (178/772) Receiving objects: 24% (186/772) Receiving objects: 25% (193/772) Receiving objects: 26% (201/772) Receiving objects: 27% (209/772) Receiving objects: 28% (217/772) Receiving objects: 29% (224/772) Receiving objects: 30% (232/772) Receiving objects: 31% (240/772) Receiving objects: 32% (248/772) Receiving objects: 33% (255/772) Receiving objects: 34% (263/772) Receiving objects: 35% (271/772) Receiving objects: 36% (278/772) Receiving objects: 39% (302/772) Receiving objects: 40% (309/772) Receiving objects: 43% (332/772) Receiving objects: 45% (348/772) Receiving objects: 48% (371/772) Receiving objects: 49% (379/772) Receiving objects: 50% (386/772) Receiving objects: 51% (394/772) Receiving objects: 52% (402/772) Receiving objects: 53% (410/772) Receiving objects: 54% (417/772) Receiving objects: 56% (433/772) Receiving objects: 57% (441/772) Receiving objects: 58% (448/772) Receiving objects: 59% (456/772) Receiving objects: 62% (479/772) Receiving objects: 64% (495/772) Receiving objects: 67% (518/772) Receiving objects: 68% (525/772) Receiving objects: 69% (533/772) Receiving objects: 71% (549/772) Receiving objects: 72% (556/772) Receiving objects: 74% (572/772) Receiving objects: 75% (579/772) Receiving objects: 79% (610/772) Receiving objects: 80% (618/772) Receiving objects: 81% (626/772) Receiving objects: 82% (634/772) Receiving objects: 83% (641/772) Receiving objects: 84% (649/772) Receiving objects: 85% (657/772) Receiving objects: 86% (664/772) Receiving objects: 87% (672/772) Receiving objects: 88% (680/772) Receiving objects: 89% (688/772) Receiving objects: 90% (695/772) Receiving objects: 91% (703/772) Receiving objects: 92% (711/772) Receiving objects: 93% (718/772) Receiving objects: 94% (726/772) Receiving objects: 95% (734/772) Receiving objects: 97% (749/772) Receiving objects: 100% (772/772) Resolving deltas: 0% (0/441) Resolving deltas: 1% (5/441) Resolving deltas: 6% (30/441) Resolving deltas: 18% (83/441) Resolving deltas: 19% (85/441) Resolving deltas: 24% (109/441) Resolving deltas: 28% (125/441) Resolving deltas: 30% (133/441) Resolving deltas: 32% (143/441) Resolving deltas: 35% (158/441) Resolving deltas: 38% (171/441) Resolving deltas: 39% (173/441) Resolving deltas: 41% (181/441) Resolving deltas: 65% (289/441) Resolving deltas: 66% (293/441) Resolving deltas: 69% (308/441) Resolving deltas: 71% (314/441) Resolving deltas: 73% (322/441) Resolving deltas: 75% (332/441) Resolving deltas: 76% (337/441) Resolving deltas: 78% (348/441) Resolving deltas: 83% (367/441) Resolving deltas: 84% (372/441) Resolving deltas: 87% (387/441) Resolving deltas: 91% (403/441) Resolving deltas: 95% (423/441) Resolving deltas: 96% (426/441) Resolving deltas: 97% (430/441) Resolving deltas: 99% (438/441) Resolving deltas: 100% (441/441) ai@azure:~$ exit
diff --git a/.images/python_training_diag.png b/.images/python_training_diag.png
index cbb6188..1a0e6a7 100644
Binary files a/.images/python_training_diag.png and b/.images/python_training_diag.png differ
diff --git a/LICENSE b/LICENSE
index 4b1ad51..f27f877 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
- MIT License
+MIT License
Copyright (c) Microsoft Corporation. All rights reserved.
@@ -18,4 +18,4 @@
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE
+ SOFTWARE
diff --git a/ML-For-Beginners b/ML-For-Beginners
new file mode 160000
index 0000000..f925c9a
--- /dev/null
+++ b/ML-For-Beginners
@@ -0,0 +1 @@
+Subproject commit f925c9afbba72b73690ec3bce07377e70b9b0383
diff --git a/ai200-architectures/TrainDistributedDeepModel b/ai200-architectures/TrainDistributedDeepModel
deleted file mode 160000
index d037c56..0000000
--- a/ai200-architectures/TrainDistributedDeepModel
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit d037c568bbd4394fbf2f668937d32122ae5a1a37
diff --git a/ai200-architectures/TrainDistributedDeepModel b/ai200-architectures/TrainDistributedDeepModel
new file mode 100644
index 0000000..0bcddee
--- /dev/null
+++ b/ai200-architectures/TrainDistributedDeepModel
@@ -0,0 +1 @@
+data:application/octet-stream;base64,<!DOCTYPE html><html lang="en" data-theme="dark"><head><!--remix-island-start--><meta charSet="utf-8"/><meta name="viewport" content="width=device-width, initial-scale=1"/><meta name="google-site-verification" content="3aKMXs6U8LIR3Up5mEKM3ldwwXxEr1kOORR0tg1ye0E"/><meta name="title" content="Idea to App in minutes with Dualite"/><meta name="description" content="Dualite is the fastest way for you to turn your ideas into reality. Build web and mobile apps, AI agents, dashboards, and more without any coding."/><meta name="keywords" content="Dualite, AI app builder, no-code, web development, mobile apps, AI agents, dashboard builder, Figma import, GitHub import, rapid prototyping, app development"/><meta name="author" content="Dualite"/><meta name="robots" content="index, follow"/><meta name="language" content="English"/><meta name="revisit-after" content="7 days"/><meta property="og:type" content="website"/><meta property="og:url" content="https://alpha.dualite.dev"/><meta property="og:title" content="Idea to App in minutes with Dualite"/><meta property="og:description" content="Dualite is the fastest way for you to turn your ideas into reality. Build web and mobile apps, AI agents, dashboards, and more without any coding."/><meta property="og:image" content="https://alpha.dualite.dev/images/ogImage.png"/><meta property="og:image:width" content="1200"/><meta property="og:image:height" content="630"/><meta property="og:image:alt" content="Dualite - Build apps without coding"/><meta property="og:site_name" content="Dualite"/><meta property="og:locale" content="en_US"/><meta name="twitter:card" content="summary_large_image"/><meta name="twitter:url" content="https://alpha.dualite.dev"/><meta name="twitter:title" content="Idea to App in minutes with Dualite"/><meta name="twitter:description" content="Dualite is the fastest way for you to turn your ideas into reality. Build web and mobile apps, AI agents, dashboards, and more without any coding."/><meta name="twitter:image" content="https://alpha.dualite.dev/images/ogImage.png"/><meta name="twitter:image:alt" content="Dualite - Build apps without coding"/><link rel="canonical" href="https://alpha.dualite.dev"/><meta name="theme-color" content="#10110C"/><meta name="apple-mobile-web-app-capable" content="yes"/><meta name="apple-mobile-web-app-status-bar-style" content="black-translucent"/><meta name="apple-mobile-web-app-title" content="Dualite"/><title>Dualite</title><meta name="description" content="Talk with Alpha, an AI Frontend Developer from Dualite"/><link rel="stylesheet" href="/assets/root-a-CKcjRb.css"/><link rel="stylesheet" href="/assets/ReactToastify-Bh76j7cs.css"/><link rel="stylesheet" href="/assets/tailwind-compat-Bwh-BmjE.css"/><link rel="stylesheet" href="/assets/index-B-qx8jzi.css"/><link rel="stylesheet" href="/assets/xterm-LZoznX6r.css"/><link rel="stylesheet" href="/assets/theme-variables-CVABnmx0.css"/><link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Instrument+Serif:ital@0;1&amp;display=swap"/><link rel="preload" href="/images/LoginBGnew.webp" as="image" type="image/webp"/><link rel="preload" href="/images/FooterBG.webp" as="image" type="image/webp"/><link rel="preload" href="/images/newbg.avif" as="image" type="image/webp"/><link rel="stylesheet" href="/assets/_index-BGYms7dn.css"/><script>window.ENV = {"SENTRY_DSN":"https://f19a856aec584756e50212b23574fc9c@o4509405738958848.ingest.us.sentry.io/4509405753245696","POSTHOG_KEY":"phc_QQk3lCYO0JswPMM5lHbIzXf9QF4VoNgF4vGKIWAoawn","POSTHOG_HOST":"https://us.i.posthog.com","EARLY_ACCESS_URL":"https://get-early-access.dualite.dev","POSTHOG_USE_PROXY":"false","VITE_QUESTIONNAIRE_SHEET_ID":"RSGqvRwKKCmVYXEA","VITE_QUESTIONNAIRE_TAB_ID":"temp","VITE_QUESTIONNAIRE_USERNAME":"mukul_dualite"};</script><script>setTutorialKitTheme();

function setTutorialKitTheme() {
document.querySelector('html')?.setAttribute('data-theme', 'dark');
}</script><script async="" src="https://www.googletagmanager.com/gtag/js?id=G-JRRE2F8Z2N"></script><script>
            window.dataLayer = window.dataLayer || [];
            function gtag(){dataLayer.push(arguments);}
            gtag('js', new Date());
            gtag('config', 'G-JRRE2F8Z2N');
          </script><!--remix-island-end--></head><body class="bg-alpha-background-primary m-0 p-0"><div id="root" class="w-full"><div style="background-color:#000000;min-height:100vh;width:100%;max-width:100vw;position:relative;overflow-x:hidden"><div class="transition-opacity duration-300 opacity-0 absolute inset-0 w-full h-full"></div><div class="relative z-10"><div class="flex flex-col h-full w-full"></div></div><script>((STORAGE_KEY2, restoreKey) => {
    if (!window.history.state || !window.history.state.key) {
      let key2 = Math.random().toString(32).slice(2);
      window.history.replaceState({
        key: key2
      }, "");
    }
    try {
      let positions = JSON.parse(sessionStorage.getItem(STORAGE_KEY2) || "{}");
      let storedY = positions[restoreKey || window.history.state.key];
      if (typeof storedY === "number") {
        window.scrollTo(0, storedY);
      }
    } catch (error4) {
      console.error(error4);
      sessionStorage.removeItem(STORAGE_KEY2);
    }
  })("positions", null)</script><link rel="modulepreload" href="/assets/manifest-18a311df.js"/><link rel="modulepreload" href="/assets/entry.client-DyWu6iqK.js"/><link rel="modulepreload" href="/assets/jsx-runtime-DBZ53tIq.js"/><link rel="modulepreload" href="/assets/api.get-netlify-access-control-token-CeE-mpAg.js"/><link rel="modulepreload" href="/assets/index-D-PPi0HE.js"/><link rel="modulepreload" href="/assets/posthog-manager-CJX_dmO_.js"/><link rel="modulepreload" href="/assets/preload-helper-CPOVbwjU.js"/><link rel="modulepreload" href="/assets/normalize-Co60zJur.js"/><link rel="modulepreload" href="/assets/i18nInstance-B6j_DNzc.js"/><link rel="modulepreload" href="/assets/components-DFFXo-bX.js"/><link rel="modulepreload" href="/assets/exports-D5zLjYZY.js"/><link rel="modulepreload" href="/assets/performance-gnfU4eSZ.js"/><link rel="modulepreload" href="/assets/trace-Dfo3vWDv.js"/><link rel="modulepreload" href="/assets/breadcrumbs-DiS5mVjz.js"/><link rel="modulepreload" href="/assets/module-CRvEd-YW.js"/><link rel="modulepreload" href="/assets/index-BhFLss8x.js"/><link rel="modulepreload" href="/assets/index-DTkiW1UT.js"/><link rel="modulepreload" href="/assets/theme-DyN7-bs8.js"/><link rel="modulepreload" href="/assets/react-toastify.esm-BG93ZoMF.js"/><link rel="modulepreload" href="/assets/Header.client-CLQ0JZce.js"/><link rel="modulepreload" href="/assets/useStorageStatus-BTWGWhzp.js"/><link rel="modulepreload" href="/assets/user-DpwfP4an.js"/><link rel="modulepreload" href="/assets/mobile-D-IFWH4M.js"/><link rel="modulepreload" href="/assets/useTranslation-CSs06fDZ.js"/><link rel="modulepreload" href="/assets/rules-Cg_OTgAB.js"/><link rel="modulepreload" href="/assets/index-BeS8WkJ5.js"/><link rel="modulepreload" href="/assets/db-R2g1K8D8.js"/><link rel="modulepreload" href="/assets/index-Dk0ysv_5.js"/><link rel="modulepreload" href="/assets/PricingSection-Bg5AMXpC.js"/><link rel="modulepreload" href="/assets/folder-IoY8W6LE.js"/><link rel="modulepreload" href="/assets/planNameCleaner-BX_cIIJy.js"/><link rel="modulepreload" href="/assets/Loading-C3rF83Zw.js"/><link rel="modulepreload" href="/assets/selectionStore-CexYX-kr.js"/><link rel="modulepreload" href="/assets/index-ob6hQ8nT.js"/><link rel="modulepreload" href="/assets/index-Db7ZlGoc.js"/><link rel="modulepreload" href="/assets/root-dcJZi-dq.js"/><link rel="modulepreload" href="/assets/_index-CduPpQon.js"/><link rel="modulepreload" href="/assets/index-BmINgAue.js"/><link rel="modulepreload" href="/assets/ConfirmationDialog.client-B16ytokT.js"/><link rel="modulepreload" href="/assets/IconsMapping-DFyLmDMy.js"/><link rel="modulepreload" href="/assets/loader-circle-DB95KHJn.js"/><link rel="modulepreload" href="/assets/_index-_NaJ8mnY.js"/><script>window.__remixContext = {"basename":"/","future":{"v3_fetcherPersist":true,"v3_relativeSplatPath":true,"v3_throwAbortReason":true,"v3_routeConfig":false,"v3_singleFetch":false,"v3_lazyRouteDiscovery":false,"unstable_optimizeDeps":false},"isSpaMode":false,"state":{"loaderData":{"root":{"baseUrl":"https://get-early-access.dualite.dev","newDomain":"https://alpha.dualite.dev","paymentUrl":"https://payments.dualite.dev","currency":"GBP","ENV":{"SENTRY_DSN":"https://f19a856aec584756e50212b23574fc9c@o4509405738958848.ingest.us.sentry.io/4509405753245696","POSTHOG_KEY":"phc_QQk3lCYO0JswPMM5lHbIzXf9QF4VoNgF4vGKIWAoawn","POSTHOG_HOST":"https://us.i.posthog.com","EARLY_ACCESS_URL":"https://get-early-access.dualite.dev","POSTHOG_USE_PROXY":"false","VITE_QUESTIONNAIRE_SHEET_ID":"RSGqvRwKKCmVYXEA","VITE_QUESTIONNAIRE_TAB_ID":"temp","VITE_QUESTIONNAIRE_USERNAME":"mukul_dualite"},"token":"5cb57ac3-b731-4c43-bfc8-294759595333","accessToken":"eyJhbGciOiJIUzI1NiJ9.eyJpZCI6Im90cF90b29sc19rZW5ueW53b2tveWVfY29tXzE3NjU5NTkyMzcwNjQiLCJlbWFpbCI6InRvb2xzQGtlbm55bndva295ZS5jb20iLCJwcm92aWRlciI6Im90cCIsInBpY3R1cmUiOiIiLCJpYXQiOjE3NjU5NTkyMzcsImV4cCI6MTc3MzczNTIzN30.yzGK3DNBLm2rmRajdOFNeVb5piV_ypeONldjwV_Q-90","user":{"id":"otp_tools_kennynwokoye_com_1765959237064","email":"tools@kennynwokoye.com","provider":"otp","picture":""},"prompt":"","userData":{"email":"tools@kennynwokoye.com","maxPrompts":"1000","promptsUsed":"640","isSubscribed":true,"topUpPrompts":"0","topUpPromptsUsed":"0","planName":"Launch-Discounted-Monthly","created_at":"2025-10-25T22:38:05.296Z","inviteCode":"#TOOALP386","name":"tools","photoUrl":""}},"routes/_index":{"FIGMA_CLIENT_ID":"QghtF6rAYBy5ZB7FpqcFV2","FIGMA_REDIRECT_URI":"https://alpha.dualite.dev/login","PROD_BASE_URL":"https://get-early-access.dualite.dev","NEXT_PUBLIC_NETLIFY_CLIENT_ID":"TbPH3-gI8cNY8TjxLjgYURJv0V-n7T3mUq7Ijqa71Us","NEXT_PUBLIC_NETLIFY_CLIENT_SECRET":"3c5CAzKvDWYMVLUG3VE_qWTcZaCYAnEsTUhXmJ6BuW4","NEXT_PUBLIC_NETLIFY_APP_URL":"https://alpha.dualite.dev/netlify/auth","NEXT_PUBLIC_GITHUB_REPO":"dualiteindia/alpha","NEXT_PUBLIC_GITHUB_REDIRECT_URI":"https://alpha.dualite.dev/github/callback","GITHUB_CLIENT_SECRET":"0f3246518f3d0be45e62d4b667a6dc6cc0b75fcc","GITHUB_CLIENT_ID":"Ov23liQKvgGA69PBdlir","NEW_DOMAIN":"https://alpha.dualite.dev","VITE_PUBLIC_PAYMENT_URL":"https://payments.dualite.dev","token":"5cb57ac3-b731-4c43-bfc8-294759595333"}},"actionData":null,"errors":null}};__remixContext.p = function(v,e,p,x) {
  if (typeof e !== 'undefined') {
    x=new Error(e.message);
    x.stack=e.stack;
    p=Promise.reject(x);
  } else {
    p=Promise.resolve(v);
  }
  return p;
};
__remixContext.n = function(i,k) {
  __remixContext.t = __remixContext.t || {};
  __remixContext.t[i] = __remixContext.t[i] || {};
  let p = new Promise((r, e) => {__remixContext.t[i][k] = {r:(v)=>{r(v);},e:(v)=>{e(v);}};});

  return p;
};
__remixContext.r = function(i,k,v,e,p,x) {
  p = __remixContext.t[i][k];
  if (typeof e !== 'undefined') {
    x=new Error(e.message);
    x.stack=e.stack;
    p.e(x);
  } else {
    p.r(v);
  }
};Object.assign(__remixContext.state.loaderData["root"], {});</script><script type="module" async="">import "/assets/manifest-18a311df.js";
import * as route0 from "/assets/root-dcJZi-dq.js";
import * as route1 from "/assets/_index-_NaJ8mnY.js";

window.__remixRouteModules = {"root":route0,"routes/_index":route1};

import("/assets/entry.client-DyWu6iqK.js");</script><div class="Toastify"></div><div class="fixed bottom-4 left-4 z-50"></div></div></div><!-- Cloudflare Web Analytics --><script defer src='https://static.cloudflareinsights.com/beacon.min.js' data-cf-beacon='{"token": "5fe744dd1ad14556b47f8017b3fa1ad1"}'></script><!-- End Cloudflare Web Analytics --></body></html>
diff --git a/contribution_plan.md b/contribution_plan.md
new file mode 100644
index 0000000..f894039
--- /dev/null
+++ b/contribution_plan.md
@@ -0,0 +1,379 @@
+# 🚀 AI/ML Open Source Contribution Plan
+
+## 🎯 Target Projects & High-Impact Opportunities
+
+Based on your Python, AI/ML, and Cloud Infrastructure background, here are the **TOP 3 RECOMMENDED CONTRIBUTIONS**:
+
+---
+
+## 🥇 **PRIORITY 1: Microsoft ML-For-Beginners**
+**Repository**: https://github.com/microsoft/ML-For-Beginners
+**Focus**: Educational ML content & Python examples
+
+### 🔥 **High-Impact Issue #1: Documentation Enhancement**
+**Issue**: [Add comprehensive documentation](https://github.com/microsoft/ML-For-Beginners/issues/835)
+
+#### **Problem Explanation**
+The ML-For-Beginners repository lacks comprehensive documentation, making it difficult for new contributors and learners to:
+- Understand the project structure
+- Set up development environment
+- Navigate between lessons
+- Contribute effectively
+
+#### **Suggested Solution**
+Create a comprehensive documentation framework with:
+
+```python
+# Documentation Structure
+docs/
+├── README.md # Main documentation
+├── getting-started/
+│ ├── installation.md # Setup instructions
+│ ├── environment.md # Development environment
+│ └── first-contribution.md # How to contribute
+├── lessons/
+│ ├── lesson-guide.md # How lessons are structured
+│ └── example-walkthrough.md # Sample lesson breakdown
+├── api/
+│ ├── code-reference.md # Code documentation
+│ └── utilities.md # Helper functions
+└── contributing/
+ ├── guidelines.md # Contribution guidelines
+ ├── code-style.md # Coding standards
+ └── review-process.md # PR review process
+```
+
+#### **Implementation Plan**
+1. **Audit existing content** - catalog all lessons and code
+2. **Create documentation framework** - structured markdown files
+3. **Add interactive examples** - code snippets with explanations
+4. **Include setup guides** - environment configuration
+5. **Write contributor guide** - detailed contribution process
+
+---
+
+### 🔥 **High-Impact Issue #2: Confusion Matrix Fix**
+**Issue**: [Wrong False Negative Definition](https://github.com/microsoft/ML-For-Beginners/issues/825)
+
+#### **Problem Explanation**
+The current definition of "False Negative" in the Confusion Matrix lesson is incorrect:
+- **Current (Wrong)**: False Negative = Model predicts positive when actual is negative
+- **Correct**: False Negative = Model predicts negative when actual is positive
+
+This error can mislead beginners learning fundamental ML concepts.
+
+#### **Suggested Solution**
+```python
+# Correct Confusion Matrix Implementation
+import numpy as np
+import matplotlib.pyplot as plt
+from sklearn.metrics import confusion_matrix, classification_report
+
+def create_confusion_matrix_tutorial():
+ """
+ Comprehensive confusion matrix tutorial with correct definitions
+ """
+
+ # Example predictions vs actual
+ y_true = [1, 0, 1, 1, 0, 1, 0, 0, 1, 0]
+ y_pred = [1, 0, 0, 1, 0, 1, 1, 0, 1, 0]
+
+ # Create confusion matrix
+ cm = confusion_matrix(y_true, y_pred)
+
+ # CORRECT DEFINITIONS:
+ tn, fp, fn, tp = cm.ravel()
+
+ print("📊 Confusion Matrix Breakdown:")
+ print(f"True Positives (TP): {tp}")
+ print(f"True Negatives (TN): {tn}")
+ print(f"False Positives (FP): {fp} - Model predicted POSITIVE when actual was NEGATIVE")
+ print(f"False Negatives (FN): {fn} - Model predicted NEGATIVE when actual was POSITIVE")
+
+ # Calculate metrics
+ accuracy = (tp + tn) / (tp + tn + fp + fn)
+ precision = tp / (tp + fp)
+ recall = tp / (tp + fn)
+
+ return cm, accuracy, precision, recall
+
+# Add interactive visualization
+def plot_confusion_matrix_with_explanations(cm):
+ """Visual confusion matrix with detailed explanations"""
+ fig, ax = plt.subplots(figsize=(10, 8))
+ im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
+ ax.figure.colorbar(im, ax=ax)
+
+ # Add labels and explanations
+ classes = ['Negative', 'Positive']
+ ax.set(xticks=np.arange(cm.shape[1]),
+ yticks=np.arange(cm.shape[0]),
+ xticklabels=classes, yticklabels=classes,
+ title='Confusion Matrix with Correct Definitions',
+ ylabel='True Label',
+ xlabel='Predicted Label')
+
+ # Add text annotations
+ thresh = cm.max() / 2.
+ for i in range(cm.shape[0]):
+ for j in range(cm.shape[1]):
+ ax.text(j, i, format(cm[i, j], 'd'),
+ ha="center", va="center",
+ color="white" if cm[i, j] > thresh else "black")
+
+ plt.tight_layout()
+ return fig
+```
+
+---
+
+## 🥈 **PRIORITY 2: Microsoft Recommenders**
+**Repository**: https://github.com/microsoft/recommenders
+**Focus**: Advanced recommendation systems
+
+### 🔥 **High-Impact Issue: Deep Learning Model Implementation**
+**Potential Contribution**: Implement transformer-based recommender system
+
+#### **Problem Explanation**
+Current recommender systems in the repo focus on traditional collaborative filtering and matrix factorization. There's growing demand for:
+- Transformer-based recommendation models
+- Sequential recommendation systems
+- Multi-modal recommendation approaches
+
+#### **Suggested Solution**
+```python
+# Transformer-Based Recommender Implementation
+import torch
+import torch.nn as nn
+from torch.nn import Transformer
+import pandas as pd
+import numpy as np
+
+class TransformerRecommender(nn.Module):
+ """
+ Transformer-based recommendation system for sequential user behavior
+ """
+
+ def __init__(self, vocab_size, d_model=512, nhead=8, num_layers=6, max_seq_len=100):
+ super().__init__()
+ self.d_model = d_model
+ self.embedding = nn.Embedding(vocab_size, d_model)
+ self.pos_encoding = PositionalEncoding(d_model, max_seq_len)
+ self.transformer = Transformer(
+ d_model=d_model,
+ nhead=nhead,
+ num_encoder_layers=num_layers,
+ num_decoder_layers=num_layers,
+ batch_first=True
+ )
+ self.output_layer = nn.Linear(d_model, vocab_size)
+
+ def forward(self, src, tgt):
+ # Embed and add positional encoding
+ src_emb = self.pos_encoding(self.embedding(src))
+ tgt_emb = self.pos_encoding(self.embedding(tgt))
+
+ # Transformer forward pass
+ output = self.transformer(src_emb, tgt_emb)
+
+ # Output projection
+ return self.output_layer(output)
+
+class PositionalEncoding(nn.Module):
+ """Positional encoding for transformer"""
+
+ def __init__(self, d_model, max_len=5000):
+ super().__init__()
+ pe = torch.zeros(max_len, d_model)
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() *
+ (-np.log(10000.0) / d_model))
+ pe[:, 0::2] = torch.sin(position * div_term)
+ pe[:, 1::2] = torch.cos(position * div_term)
+ self.register_buffer('pe', pe.unsqueeze(0))
+
+ def forward(self, x):
+ return x + self.pe[:, :x.size(1)]
+
+# Example usage and training loop
+def train_transformer_recommender():
+ """Complete training pipeline for transformer recommender"""
+
+ # Initialize model
+ model = TransformerRecommender(vocab_size=10000)
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
+ criterion = nn.CrossEntropyLoss()
+
+ # Training loop implementation
+ for epoch in range(100):
+ # Load batch data
+ # Forward pass
+ # Calculate loss
+ # Backward pass
+ # Update weights
+ pass
+
+ return model
+```
+
+---
+
+## 🥉 **PRIORITY 3: Azure Machine Learning Notebooks**
+**Repository**: https://github.com/Azure/MachineLearningNotebooks
+**Focus**: Cloud ML integration
+
+### 🔥 **High-Impact Issue: MLOps Pipeline Examples**
+**Potential Contribution**: End-to-end MLOps pipeline with Azure ML
+
+#### **Problem Explanation**
+Many developers struggle with implementing complete MLOps workflows that include:
+- Automated model training
+- Model versioning and registry
+- Continuous deployment
+- Monitoring and retraining
+
+#### **Suggested Solution**
+```python
+# Complete MLOps Pipeline Implementation
+from azureml.core import Workspace, Environment, ScriptRunConfig
+from azureml.core.model import Model
+from azureml.pipeline.core import Pipeline, PipelineData
+from azureml.pipeline.steps import PythonScriptStep
+from azureml.core.compute import ComputeTarget, AmlCompute
+
+class MLOpsPipeline:
+ """
+ Complete MLOps pipeline for Azure ML
+ """
+
+ def __init__(self, workspace, compute_target):
+ self.ws = workspace
+ self.compute_target = compute_target
+ self.env = self._create_environment()
+
+ def _create_environment(self):
+ """Create ML environment with dependencies"""
+ env = Environment.from_conda_specification(
+ name="mlops-env",
+ file_path="environment.yml"
+ )
+ return env
+
+ def create_training_pipeline(self):
+ """Create automated training pipeline"""
+
+ # Data preparation step
+ data_prep_step = PythonScriptStep(
+ script_name="data_preparation.py",
+ compute_target=self.compute_target,
+ environment=self.env,
+ allow_reuse=False
+ )
+
+ # Model training step
+ training_step = PythonScriptStep(
+ script_name="train_model.py",
+ compute_target=self.compute_target,
+ environment=self.env,
+ inputs=[data_prep_step.outputs['processed_data']],
+ allow_reuse=False
+ )
+
+ # Model evaluation step
+ evaluation_step = PythonScriptStep(
+ script_name="evaluate_model.py",
+ compute_target=self.compute_target,
+ environment=self.env,
+ inputs=[training_step.outputs['trained_model']],
+ allow_reuse=False
+ )
+
+ # Create pipeline
+ pipeline = Pipeline(
+ workspace=self.ws,
+ steps=[data_prep_step, training_step, evaluation_step]
+ )
+
+ return pipeline
+
+ def deploy_model(self, model_name):
+ """Deploy model with monitoring"""
+
+ # Register model
+ model = Model.register(
+ workspace=self.ws,
+ model_name=model_name,
+ model_path="outputs/model.pkl"
+ )
+
+ # Create deployment configuration
+ # Deploy to AKS or ACI
+ # Set up monitoring
+
+ return model
+
+# Example notebook implementation
+def create_mlops_notebook():
+ """Create comprehensive MLOps notebook"""
+
+ notebook_content = """
+ # Complete MLOps Pipeline with Azure ML
+
+ ## 1. Setup and Configuration
+ ## 2. Data Pipeline Creation
+ ## 3. Model Training Automation
+ ## 4. Model Deployment
+ ## 5. Monitoring and Alerts
+ ## 6. Continuous Integration/Deployment
+ """
+
+ return notebook_content
+```
+
+---
+
+## 📋 **Implementation Timeline**
+
+### **Week 1-2: Priority 1 (ML-For-Beginners)**
+- [ ] Fork repository and set up development environment
+- [ ] Create documentation structure
+- [ ] Fix confusion matrix definition
+- [ ] Write comprehensive setup guides
+- [ ] Submit PR with tests and examples
+
+### **Week 3-4: Priority 2 (Recommenders)**
+- [ ] Research transformer-based recommendation systems
+- [ ] Implement transformer recommender model
+- [ ] Create example notebooks and tutorials
+- [ ] Write unit tests and benchmarks
+- [ ] Submit PR with documentation
+
+### **Week 5-6: Priority 3 (Azure ML Notebooks)**
+- [ ] Design complete MLOps pipeline
+- [ ] Implement automated training workflow
+- [ ] Create deployment and monitoring examples
+- [ ] Write comprehensive documentation
+- [ ] Submit PR with full example
+
+---
+
+## 📊 **Contribution Tracking Template**
+
+| Project | Issue | PR Link | Status | Impact Score |
+|---------|-------|---------|--------|--------------|
+| ML-For-Beginners | Documentation | TBD | In Progress | High |
+| ML-For-Beginners | Confusion Matrix Fix | TBD | Planned | Medium |
+| Recommenders | Transformer Model | TBD | Planned | High |
+| Azure ML Notebooks | MLOps Pipeline | TBD | Planned | High |
+
+---
+
+## 🚀 **Next Steps**
+
+1. **Choose your priority project** from the list above
+2. **Set up development environment** for the selected repository
+3. **Start with the highest impact issue** that matches your expertise
+4. **Follow the detailed implementation plan** provided
+5. **Track progress** using the contribution template
+
+Would you like me to help you get started with any of these specific contributions?
diff --git a/contribution_tracker.md b/contribution_tracker.md
new file mode 100644
index 0000000..e429c1d
--- /dev/null
+++ b/contribution_tracker.md
@@ -0,0 +1,211 @@
+# 📈 Open Source Contribution Tracker
+
+## 🎯 Current Goals
+**Target**: 3 high-impact AI/ML contributions to Microsoft's open-source ecosystem
+**Timeline**: 6 weeks
+**Focus Areas**: Python, AI/ML, Cloud Infrastructure
+
+---
+
+## 📊 Active Contributions
+
+### 🟡 **IN PROGRESS**
+
+| **Project** | **Issue** | **Type** | **Difficulty** | **Impact** | **Status** |
+|-------------|-----------|----------|----------------|------------|------------|
+| ML-For-Beginners | [Documentation Enhancement #835](https://github.com/microsoft/ML-For-Beginners/issues/835) | Documentation | Medium | High | Planning |
+| ML-For-Beginners | [Confusion Matrix Fix #825](https://github.com/microsoft/ML-For-Beginners/issues/825) | Bug Fix | Easy | Medium | Ready to Start |
+
+### 🟢 **PLANNED**
+
+| **Project** | **Issue** | **Type** | **Difficulty** | **Impact** | **Target Week** |
+|-------------|-----------|----------|----------------|------------|-----------------|
+| Recommenders | Transformer Model Implementation | Feature | Hard | High | Week 3-4 |
+| Azure ML Notebooks | MLOps Pipeline Example | Tutorial | Medium | High | Week 5-6 |
+
+### ✅ **COMPLETED**
+
+| **Project** | **Issue** | **PR Link** | **Commit ID** | **Merged Date** | **Impact** |
+|-------------|-----------|-------------|---------------|-----------------|------------|
+| *None yet* | *First contribution pending* | - | - | - | - |
+
+---
+
+## 🚀 **Contribution Details**
+
+### **Priority 1: ML-For-Beginners Documentation**
+- **Repository**: https://github.com/microsoft/ML-For-Beginners
+- **Issue**: https://github.com/microsoft/ML-For-Beginners/issues/835
+- **My Role**: Lead Documentation Developer
+- **Scope**:
+ - Create comprehensive documentation framework
+ - Add setup guides and contributor guidelines
+ - Include interactive code examples
+ - Ensure beginner-friendly explanations
+- **Estimated Impact**: High (will help hundreds of learners and contributors)
+
+**Technical Approach**:
+```markdown
+docs/
+├── README.md (Main documentation hub)
+├── getting-started/ (Setup and installation)
+├── lessons/ (Lesson structure guides)
+├── api/ (Code reference documentation)
+└── contributing/ (Contribution guidelines)
+```
+
+### **Priority 2: Confusion Matrix Bug Fix**
+- **Repository**: https://github.com/microsoft/ML-For-Beginners
+- **Issue**: https://github.com/microsoft/ML-For-Beginners/issues/825
+- **My Role**: Bug Fix Developer
+- **Scope**:
+ - Correct False Negative definition
+ - Add comprehensive confusion matrix tutorial
+ - Include interactive examples with visualizations
+ - Add unit tests to prevent regression
+- **Estimated Impact**: Medium (corrects fundamental ML concept for learners)
+
+**Technical Fix**:
+```python
+# CORRECT: False Negative = Model predicts NEGATIVE when actual is POSITIVE
+# Example: Cancer test says "no cancer" but patient actually has cancer
+```
+
+### **Priority 3: Transformer Recommender Model**
+- **Repository**: https://github.com/microsoft/recommenders
+- **Issue**: To be created - "Implement Transformer-based Recommender"
+- **My Role**: ML Engineer / Feature Developer
+- **Scope**:
+ - Implement transformer architecture for recommendations
+ - Create sequential recommendation pipeline
+ - Add comprehensive notebook tutorials
+ - Include benchmarking against existing models
+- **Estimated Impact**: High (adds state-of-the-art recommendation capability)
+
+### **Priority 4: MLOps Pipeline Tutorial**
+- **Repository**: https://github.com/Azure/MachineLearningNotebooks
+- **Issue**: To be created - "End-to-end MLOps Pipeline Example"
+- **My Role**: Cloud ML Engineer
+- **Scope**:
+ - Create complete MLOps workflow
+ - Include automated training, deployment, monitoring
+ - Add CI/CD integration examples
+ - Provide Azure ML best practices
+- **Estimated Impact**: High (helps teams implement production ML systems)
+
+---
+
+## 📅 **Weekly Progress Timeline**
+
+### **Week 1** (Current)
+- [x] Identify target repositories and issues
+- [x] Create contribution plan and tracking system
+- [ ] Fork ML-For-Beginners repository
+- [ ] Set up development environment
+- [ ] Start documentation framework
+
+### **Week 2**
+- [ ] Complete documentation structure
+- [ ] Fix confusion matrix definition bug
+- [ ] Submit first PR for documentation
+- [ ] Begin code review process
+
+### **Week 3**
+- [ ] Start transformer recommender research
+- [ ] Design model architecture
+- [ ] Begin implementation
+- [ ] Create project proposal issue
+
+### **Week 4**
+- [ ] Complete transformer model implementation
+- [ ] Write comprehensive tests
+- [ ] Create tutorial notebook
+- [ ] Submit transformer recommender PR
+
+### **Week 5**
+- [ ] Design MLOps pipeline architecture
+- [ ] Implement automated training workflow
+- [ ] Add deployment and monitoring
+- [ ] Create comprehensive documentation
+
+### **Week 6**
+- [ ] Complete MLOps pipeline tutorial
+- [ ] Add CI/CD integration examples
+- [ ] Submit final PR
+- [ ] Document lessons learned
+
+---
+
+## 🏆 **Success Metrics**
+
+### **Quantitative Goals**
+- **3** merged pull requests
+- **500+** lines of quality code contributed
+- **3** comprehensive tutorials/documentation pages
+- **10+** interactive code examples
+
+### **Qualitative Goals**
+- **Educational Impact**: Help beginner ML practitioners learn effectively
+- **Technical Advancement**: Add state-of-the-art capabilities to open-source tools
+- **Community Building**: Engage with maintainers and other contributors
+- **Skill Development**: Deepen expertise in ML, documentation, and open-source practices
+
+### **Recognition Targets**
+- Get featured in project newsletters/announcements
+- Receive positive feedback from maintainers
+- Help other contributors with related issues
+- Build reputation in AI/ML open-source community
+
+---
+
+## 🔧 **Development Setup Checklist**
+
+### **General Setup**
+- [ ] Configure Git with proper credentials
+- [ ] Set up SSH keys for GitHub
+- [ ] Install Python development environment
+- [ ] Configure code editor with linting/formatting
+
+### **Project-Specific Setup**
+- [ ] Fork target repositories
+- [ ] Clone repositories locally
+- [ ] Install project dependencies
+- [ ] Run existing tests to ensure setup
+- [ ] Read CONTRIBUTING.md guidelines
+
+### **Quality Assurance**
+- [ ] Set up pre-commit hooks
+- [ ] Configure linting (flake8, black, isort)
+- [ ] Install testing frameworks (pytest)
+- [ ] Set up coverage reporting
+
+---
+
+## 📞 **Contact and Collaboration**
+
+### **Maintainer Engagement**
+- **Strategy**: Engage early and often with maintainers
+- **Approach**: Ask questions, provide updates, seek feedback
+- **Communication**: Use GitHub issues, discussions, and PR comments
+
+### **Community Involvement**
+- **Participate**: Join project discussions and community calls
+- **Support**: Help other contributors with similar issues
+- **Share**: Document learning and best practices
+
+### **Follow-up Plans**
+- **Long-term**: Become regular contributor to selected projects
+- **Mentoring**: Help onboard other new contributors
+- **Speaking**: Present about contributions at meetups/conferences
+
+---
+
+## 🎯 **Next Immediate Actions**
+
+1. **Choose Priority 1 or 2** to start with (recommend starting with Confusion Matrix fix for quick win)
+2. **Fork the ML-For-Beginners repository**
+3. **Set up local development environment**
+4. **Create first branch and start coding**
+5. **Engage with maintainers** on the selected issue
+
+**Ready to begin?** Let me know which issue you'd like to tackle first, and I'll provide detailed implementation guidance!
diff --git a/index.html b/index.html
new file mode 100644
index 0000000..1175191
--- /dev/null
+++ b/index.html
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+ Microsoft AI Open Source Collection
+
+
+
+
+
+
+
+
+
+
+
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..9a11a10
--- /dev/null
+++ b/package.json
@@ -0,0 +1,30 @@
+{
+ "name": "microsoft-ai-portal",
+ "private": true,
+ "version": "0.1.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite",
+ "build": "tsc && vite build",
+ "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "lucide-react": "^0.344.0",
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "framer-motion": "^11.0.8",
+ "clsx": "^2.1.0",
+ "tailwind-merge": "^2.2.1"
+ },
+ "devDependencies": {
+ "@types/react": "^18.2.64",
+ "@types/react-dom": "^18.2.21",
+ "@vitejs/plugin-react": "^4.2.1",
+ "autoprefixer": "^10.4.18",
+ "postcss": "^8.4.35",
+ "tailwindcss": "^3.4.1",
+ "typescript": "^5.2.2",
+ "vite": "^5.1.6"
+ }
+}
diff --git a/postcss.config.js b/postcss.config.js
new file mode 100644
index 0000000..2e7af2b
--- /dev/null
+++ b/postcss.config.js
@@ -0,0 +1,6 @@
+export default {
+ plugins: {
+ tailwindcss: {},
+ autoprefixer: {},
+ },
+}
diff --git a/src/App.tsx b/src/App.tsx
new file mode 100644
index 0000000..1df9499
--- /dev/null
+++ b/src/App.tsx
@@ -0,0 +1,29 @@
+import React from 'react';
+import { Navbar } from './components/Navbar';
+import { Hero } from './components/Hero';
+import { ResourceSection } from './components/ResourceSection';
+import { Footer } from './components/Footer';
+import { resources } from './data/resources';
+
+function App() {
+ return (
+
+
+
+
+
+ {resources.map((category, index) => (
+
+ ))}
+
+
+
+
+ );
+}
+
+export default App;
diff --git a/src/components/Footer.tsx b/src/components/Footer.tsx
new file mode 100644
index 0000000..dd183ee
--- /dev/null
+++ b/src/components/Footer.tsx
@@ -0,0 +1,18 @@
+import React from 'react';
+
+export const Footer: React.FC = () => {
+ return (
+
+
+
+
+ © {new Date().getFullYear()} Microsoft AI Open Source Collection. All rights reserved.
+
+
+ This portal is a frontend contribution to improve the accessibility of the Microsoft AI repository.
+
+
+
+
+ );
+};
diff --git a/src/components/Hero.tsx b/src/components/Hero.tsx
new file mode 100644
index 0000000..beee98c
--- /dev/null
+++ b/src/components/Hero.tsx
@@ -0,0 +1,52 @@
+import React from 'react';
+import { motion } from 'framer-motion';
+import { ArrowRight } from 'lucide-react';
+
+export const Hero: React.FC = () => {
+ return (
+
+
+
+
+ Microsoft AI
+ Open Source Collection
+
+
+ Discover a curated library of samples, reference architectures, and best practices designed to accelerate your AI journey on Azure.
+
+
+
+ Get Started
+
+
+ View Best Practices →
+
+
+
+
+
+ {/* Decorative background elements */}
+
+
+ );
+};
diff --git a/src/components/Navbar.tsx b/src/components/Navbar.tsx
new file mode 100644
index 0000000..c18ae65
--- /dev/null
+++ b/src/components/Navbar.tsx
@@ -0,0 +1,32 @@
+import React from 'react';
+import { Github, Menu } from 'lucide-react';
+
+export const Navbar: React.FC = () => {
+ return (
+
+
+
+
+ AI
+
+
Microsoft AI Open Source
+
+
+
+
+
+ );
+};
diff --git a/src/components/ResourceSection.tsx b/src/components/ResourceSection.tsx
new file mode 100644
index 0000000..b4638c2
--- /dev/null
+++ b/src/components/ResourceSection.tsx
@@ -0,0 +1,67 @@
+import React from 'react';
+import { ResourceCategory } from '../data/resources';
+import { ExternalLink } from 'lucide-react';
+import { motion } from 'framer-motion';
+
+interface Props {
+ category: ResourceCategory;
+ index: number;
+}
+
+export const ResourceSection: React.FC = ({ category, index }) => {
+ const isEven = index % 2 === 0;
+
+ return (
+
+
+
+
{category.title}
+
{category.description}
+
+
+
+ {category.items.map((item, itemIndex) => {
+ const Icon = item.icon;
+ return (
+
+
+ {Icon && }
+
+
+
+ {item.title}
+
+
+
+ {item.description}
+
+
+
+ {item.tags.map(tag => (
+
+ {tag}
+
+ ))}
+
+
+
+
+
+
+ );
+ })}
+
+
+
+ );
+};
diff --git a/src/data/resources.ts b/src/data/resources.ts
new file mode 100644
index 0000000..854c2da
--- /dev/null
+++ b/src/data/resources.ts
@@ -0,0 +1,189 @@
+import { BookOpen, Code2, Layers, Cpu, Database, Cloud, Terminal, BrainCircuit } from 'lucide-react';
+
+export interface ResourceItem {
+ id: string;
+ title: string;
+ description: string;
+ url: string;
+ tags: string[];
+ icon?: any;
+}
+
+export interface ResourceCategory {
+ id: string;
+ title: string;
+ description: string;
+ items: ResourceItem[];
+}
+
+export const resources: ResourceCategory[] = [
+ {
+ id: 'ai100',
+ title: 'AI100 - Samples',
+ description: 'A collection of open source Python repositories created by Microsoft product teams focusing on AI services.',
+ items: [
+ {
+ id: 'azure-ml-sdk',
+ title: 'Azure ML Python SDK',
+ description: 'Python notebooks with ML and deep learning examples with Azure Machine Learning.',
+ url: 'https://github.com/Azure/MachineLearningNotebooks',
+ tags: ['Python', 'Azure ML', 'Notebooks'],
+ icon: Code2
+ },
+ {
+ id: 'cognitive-services',
+ title: 'Azure Cognitive Services Python SDK',
+ description: 'Learn how to use the Cognitive Services Python SDK with these samples.',
+ url: 'https://github.com/Azure-Samples/cognitive-services-python-sdk-samples',
+ tags: ['Python', 'Cognitive Services', 'SDK'],
+ icon: BrainCircuit
+ },
+ {
+ id: 'intelligent-kiosk',
+ title: 'Azure Intelligent Kiosk',
+ description: 'Demos showcasing workflows and experiences built on top of Microsoft Cognitive Services.',
+ url: 'https://github.com/microsoft/Cognitive-Samples-IntelligentKiosk',
+ tags: ['Demos', 'Workflows', 'Cognitive Services'],
+ icon: Layers
+ },
+ {
+ id: 'mml-spark',
+ title: 'MML Spark Samples',
+ description: 'Ecosystem of tools aimed towards expanding the distributed computing framework Apache Spark.',
+ url: 'https://github.com/Azure/mmlspark/tree/master/notebooks/samples',
+ tags: ['Spark', 'Distributed Computing', 'MMLSpark'],
+ icon: Database
+ },
+ {
+ id: 'seismic-dl',
+ title: 'Seismic Deep Learning Samples',
+ description: 'Deep Learning for Seismic Imaging and Interpretation.',
+ url: 'https://github.com/microsoft/seismic-deeplearning/',
+ tags: ['Deep Learning', 'Seismic', 'Imaging'],
+ icon: ActivityIcon
+ }
+ ]
+ },
+ {
+ id: 'ai200',
+ title: 'AI200 - Reference Architectures',
+ description: 'Architectures arranged by scenario, including considerations for scalability, availability, manageability, and security.',
+ items: [
+ {
+ id: 'classic-ml-k8s',
+ title: 'Deploy Classic ML Model on Kubernetes',
+ description: 'Train LightGBM model locally using Azure ML, deploy on Kubernetes or IoT Edge for real-time scoring.',
+ url: 'https://github.com/microsoft/MLAKSDeployAML',
+ tags: ['Python', 'CPU', 'Real-Time Scoring', 'Kubernetes'],
+ icon: Cloud
+ },
+ {
+ id: 'dl-k8s',
+ title: 'Deploy Deep Learning Model on Kubernetes',
+ description: 'Deploy image classification model on Kubernetes or IoT Edge for real-time scoring using Azure ML.',
+ url: 'https://github.com/microsoft/AKSDeploymentTutorialAML',
+ tags: ['Python', 'Keras', 'Real-Time Scoring', 'Kubernetes'],
+ icon: Cloud
+ },
+ {
+ id: 'hyperparameter',
+ title: 'Hyperparameter Tuning',
+ description: 'Train LightGBM model locally and run Hyperparameter tuning using Hyperdrive in Azure ML.',
+ url: 'https://github.com/Microsoft/MLHyperparameterTuning',
+ tags: ['Python', 'CPU', 'Training', 'Hyperdrive'],
+ icon: Cpu
+ },
+ {
+ id: 'dl-pipelines',
+ title: 'Deploy Deep Learning Model on Pipelines',
+ description: 'Deploy PyTorch style transfer model for batch scoring using Azure ML Pipelines.',
+ url: 'https://github.com/Azure/Batch-Scoring-Deep-Learning-Models-With-AML',
+ tags: ['Python', 'GPU', 'Batch Scoring', 'PyTorch'],
+ icon: Layers
+ },
+ {
+ id: 'classic-ml-pipelines',
+ title: 'Deploy Classic ML Model on Pipelines',
+ description: 'Deploy one-class SVM for batch scoring anomaly detection using Azure ML Pipelines.',
+ url: 'https://github.com/Microsoft/AMLBatchScoringPipeline',
+ tags: ['Python', 'CPU', 'Batch Scoring', 'SVM'],
+ icon: Layers
+ },
+ {
+ id: 'r-ml-k8s',
+ title: 'Deploy R ML Model on Kubernetes',
+ description: 'Deploy ML model for real-time scoring on Kubernetes.',
+ url: 'https://github.com/Azure/RealtimeRDeployment',
+ tags: ['R', 'CPU', 'Real-Time Scoring', 'Kubernetes'],
+ icon: Terminal
+ },
+ {
+ id: 'spark-databricks',
+ title: 'Deploy Spark ML Model on Databricks',
+ description: 'Deploy a classification model for batch scoring using Databricks.',
+ url: 'https://github.com/Azure/BatchSparkScoringPredictiveMaintenance',
+ tags: ['Python', 'Spark', 'Batch Scoring', 'Databricks'],
+ icon: Database
+ }
+ ]
+ },
+ {
+ id: 'ai300',
+ title: 'AI300 - Best Practices',
+ description: 'Best practices arranged by topic, including open source methods and considerations for production.',
+ items: [
+ {
+ id: 'cv',
+ title: 'Computer Vision',
+ description: 'Accelerate the development of computer vision applications with examples and guidelines.',
+ url: 'https://github.com/microsoft/computervision',
+ tags: ['Computer Vision', 'Best Practices'],
+ icon: BookOpen
+ },
+ {
+ id: 'nlp',
+ title: 'Natural Language Processing',
+ description: 'State-of-the-art methods and common scenarios for text and language problems.',
+ url: 'https://github.com/microsoft/nlp',
+ tags: ['NLP', 'Text', 'Language'],
+ icon: BookOpen
+ },
+ {
+ id: 'recommenders',
+ title: 'Recommenders',
+ description: 'Examples and best practices for building recommendation systems (Jupyter notebooks).',
+ url: 'https://github.com/microsoft/recommenders',
+ tags: ['Recommenders', 'Jupyter'],
+ icon: BookOpen
+ },
+ {
+ id: 'mlops',
+ title: 'MLOps',
+ description: 'MLOps empowers data scientists and app developers to help bring ML models to production.',
+ url: 'https://github.com/microsoft/MLOps',
+ tags: ['MLOps', 'Production', 'DevOps'],
+ icon: BookOpen
+ }
+ ]
+ }
+];
+
+// Helper component for icon fallback
+function ActivityIcon(props: any) {
+ return (
+
+
+
+ );
+}
diff --git a/src/index.css b/src/index.css
new file mode 100644
index 0000000..d6e34ea
--- /dev/null
+++ b/src/index.css
@@ -0,0 +1,29 @@
+@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
+
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+@layer base {
+ body {
+ @apply font-sans;
+ }
+}
+
+/* Custom scrollbar for a cleaner look */
+::-webkit-scrollbar {
+ width: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: #f1f5f9;
+}
+
+::-webkit-scrollbar-thumb {
+ background: #cbd5e1;
+ border-radius: 4px;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: #94a3b8;
+}
diff --git a/tailwind.config.js b/tailwind.config.js
new file mode 100644
index 0000000..cc43103
--- /dev/null
+++ b/tailwind.config.js
@@ -0,0 +1,30 @@
+/** @type {import('tailwindcss').Config} */
+export default {
+ content: [
+ "./index.html",
+ "./src/**/*.{js,ts,jsx,tsx}",
+ ],
+ theme: {
+ extend: {
+ colors: {
+ primary: {
+ 50: '#eff6ff',
+ 100: '#dbeafe',
+ 200: '#bfdbfe',
+ 300: '#93c5fd',
+ 400: '#60a5fa',
+ 500: '#3b82f6',
+ 600: '#2563eb',
+ 700: '#1d4ed8',
+ 800: '#1e40af',
+ 900: '#1e3a8a',
+ 950: '#172554',
+ }
+ },
+ fontFamily: {
+ sans: ['Inter', 'system-ui', 'sans-serif'],
+ }
+ },
+ },
+ plugins: [],
+}
diff --git a/tsconfig.json b/tsconfig.json
new file mode 100644
index 0000000..4ed36bf
--- /dev/null
+++ b/tsconfig.json
@@ -0,0 +1,25 @@
+{
+ "compilerOptions": {
+ "target": "ES2020",
+ "useDefineForClassFields": true,
+ "lib": ["ES2020", "DOM", "DOM.Iterable"],
+ "module": "ESNext",
+ "skipLibCheck": true,
+ "moduleResolution": "bundler",
+ "allowImportingTsExtensions": true,
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "noEmit": true,
+ "jsx": "react-jsx",
+ "strict": true,
+ "noUnusedLocals": true,
+ "noUnusedParameters": true,
+ "noFallthroughCasesInSwitch": true,
+ "baseUrl": "./src",
+ "paths": {
+ "@/*": ["./*"]
+ }
+ },
+ "include": ["src"],
+ "references": [{ "path": "./tsconfig.node.json" }]
+}
diff --git a/tsconfig.node.json b/tsconfig.node.json
new file mode 100644
index 0000000..42872c5
--- /dev/null
+++ b/tsconfig.node.json
@@ -0,0 +1,10 @@
+{
+ "compilerOptions": {
+ "composite": true,
+ "skipLibCheck": true,
+ "module": "ESNext",
+ "moduleResolution": "bundler",
+ "allowSyntheticDefaultImports": true
+ },
+ "include": ["vite.config.ts"]
+}
diff --git a/vite.config.ts b/vite.config.ts
new file mode 100644
index 0000000..e2f45a7
--- /dev/null
+++ b/vite.config.ts
@@ -0,0 +1,13 @@
+import { defineConfig } from 'vite'
+import react from '@vitejs/plugin-react'
+import path from 'path'
+
+// https://vitejs.dev/config/
+export default defineConfig({
+ plugins: [react()],
+ resolve: {
+ alias: {
+ '@': path.resolve(__dirname, './src'),
+ },
+ },
+})