diff --git a/dotnet/Face/Detect.cs b/dotnet/Face/Detect.cs
new file mode 100644
index 00000000..89be6009
--- /dev/null
+++ b/dotnet/Face/Detect.cs
@@ -0,0 +1,104 @@
+using System.Drawing;
+
+using Azure;
+using Azure.AI.Vision.Face;
+
+namespace FaceQuickstart
+{
+ class Program
+ {
+ static string SUBSCRIPTION_KEY = "PASTE_YOUR_FACE_SUBSCRIPTION_KEY_HERE";
+ static string ENDPOINT = "PASTE_YOUR_FACE_ENDPOINT_HERE";
+
+ async static void Quickstart()
+ {
+ FaceClient faceClient = new FaceClient(new Uri(ENDPOINT), new AzureKeyCredential(SUBSCRIPTION_KEY));
+
+ var imageUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/faces.jpg";
+
+ //
+ var response = await faceClient.DetectAsync(new Uri(imageUrl), FaceDetectionModel.Detection03, FaceRecognitionModel.Recognition04, returnFaceId: false);
+ IReadOnlyList faces = response.Value;
+ //
+
+ //
+ foreach (var face in faces)
+ {
+ string id = face.FaceId.ToString();
+ FaceRectangle rect = face.FaceRectangle;
+ }
+ //
+
+ //
+ // Note DetectionModel.Detection02 cannot be used with returnFaceLandmarks.
+ var response2 = await faceClient.DetectAsync(new Uri(imageUrl), FaceDetectionModel.Detection03, FaceRecognitionModel.Recognition04, returnFaceId: false, returnFaceLandmarks: true);
+ IReadOnlyList faces2 = response2.Value;
+ //
+
+ //
+ foreach (var face in faces2)
+ {
+ var landmarks = face.FaceLandmarks;
+
+ double noseX = landmarks.NoseTip.X;
+ double noseY = landmarks.NoseTip.Y;
+
+ double leftPupilX = landmarks.PupilLeft.X;
+ double leftPupilY = landmarks.PupilLeft.Y;
+
+ double rightPupilX = landmarks.PupilRight.X;
+ double rightPupilY = landmarks.PupilRight.Y;
+ //
+
+ //
+ var upperLipBottom = landmarks.UpperLipBottom;
+ var underLipTop = landmarks.UnderLipTop;
+
+ var centerOfMouth = new Point(
+ (int)((upperLipBottom.X + underLipTop.X) / 2),
+ (int)((upperLipBottom.Y + underLipTop.Y) / 2));
+
+ var eyeLeftInner = landmarks.EyeLeftInner;
+ var eyeRightInner = landmarks.EyeRightInner;
+
+ var centerOfTwoEyes = new Point(
+ (int)((eyeLeftInner.X + eyeRightInner.X) / 2),
+ (int)((eyeLeftInner.Y + eyeRightInner.Y) / 2));
+
+ var faceDirectionVectorX = centerOfTwoEyes.X - centerOfMouth.X;
+ var faceDirectionVectorY = centerOfTwoEyes.Y - centerOfMouth.Y;
+ }
+ //
+
+ //
+ var requiredFaceAttributes = new FaceAttributeType[] {
+ FaceAttributeType.Detection03.Blur,
+ FaceAttributeType.Detection03.HeadPose,
+ FaceAttributeType.Detection03.Mask,
+ FaceAttributeType.Recognition04.QualityForRecognition
+ };
+ // Note DetectionModel.Detection02 cannot be used with returnFaceAttributes.
+ var response3 = await faceClient.DetectAsync(new Uri(imageUrl), FaceDetectionModel.Detection03, FaceRecognitionModel.Recognition04, returnFaceId: false, returnFaceAttributes: requiredFaceAttributes);
+ IReadOnlyList faces3 = response3.Value;
+ //
+
+ //
+ foreach (var face in faces3)
+ {
+ var attributes = face.FaceAttributes;
+ var blur = attributes.Blur;
+ var headPose = attributes.HeadPose;
+ var mask = attributes.Mask;
+ var qualityForRecognition = attributes.QualityForRecognition;
+ }
+ //
+ }
+
+ static void Main(string[] args)
+ {
+ Quickstart();
+ Console.WriteLine("Press any key to exit.");
+ Console.ReadKey();
+ }
+ }
+}
\ No newline at end of file
diff --git a/dotnet/Face/FindSimilar.cs b/dotnet/Face/FindSimilar.cs
new file mode 100644
index 00000000..04c8c6dd
--- /dev/null
+++ b/dotnet/Face/FindSimilar.cs
@@ -0,0 +1,107 @@
+//
+using Azure;
+using Azure.AI.Vision.Face;
+//
+
+namespace FaceQuickstart
+{
+ class Program
+ {
+ //
+ const string IMAGE_BASE_URL = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
+ //
+
+ //
+ static readonly string SUBSCRIPTION_KEY = Environment.GetEnvironmentVariable("FACE_APIKEY") ?? "";
+ static readonly string ENDPOINT = Environment.GetEnvironmentVariable("FACE_ENDPOINT") ?? "";
+ //
+
+ static void Main(string[] args)
+ {
+
+ //
+ FaceRecognitionModel RECOGNITION_MODEL4 = FaceRecognitionModel.Recognition04;
+ //
+
+ //
+ FaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY);
+ FindSimilar(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait();
+ //
+ }
+
+ //
+ public static FaceClient Authenticate(string endpoint, string key)
+ {
+ return new FaceClient(new Uri(endpoint), new AzureKeyCredential(key));
+ }
+ //
+
+ //
+ private static async Task> DetectFaceRecognize(FaceClient faceClient, string url, FaceRecognitionModel recognition_model)
+ {
+ // Detect faces from image URL.
+ Response> response = await faceClient.DetectAsync(new Uri(url), FaceDetectionModel.Detection03, recognition_model, returnFaceId: true, [FaceAttributeType.QualityForRecognition]);
+ IReadOnlyList detectedFaces = response.Value;
+ List sufficientQualityFaces = new List();
+ foreach (FaceDetectionResult detectedFace in detectedFaces)
+ {
+ var faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
+ if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.Low))
+ {
+ sufficientQualityFaces.Add(detectedFace);
+ }
+ }
+ Console.WriteLine($"{detectedFaces.Count} face(s) with {sufficientQualityFaces.Count} having sufficient quality for recognition detected from image `{Path.GetFileName(url)}`");
+
+ return sufficientQualityFaces;
+ }
+ //
+
+ public static async Task FindSimilar(FaceClient client, string base_url, FaceRecognitionModel recognition_model)
+ {
+ //
+ Console.WriteLine("========FIND SIMILAR========");
+ Console.WriteLine();
+
+ List targetImageFileNames = new List
+ {
+ "Family1-Dad1.jpg",
+ "Family1-Daughter1.jpg",
+ "Family1-Mom1.jpg",
+ "Family1-Son1.jpg",
+ "Family2-Lady1.jpg",
+ "Family2-Man1.jpg",
+ "Family3-Lady1.jpg",
+ "Family3-Man1.jpg"
+ };
+
+ string sourceImageFileName = "findsimilar.jpg";
+ IList targetFaceIds = new List();
+ foreach (var targetImageFileName in targetImageFileNames)
+ {
+ // Detect faces from target image url.
+ var faces = await DetectFaceRecognize(client, $"{base_url}{targetImageFileName}", recognition_model);
+ // Add detected faceId to list of GUIDs.
+ targetFaceIds.Add(faces[0].FaceId.Value);
+ }
+
+ // Detect faces from source image url.
+ IList detectedFaces = await DetectFaceRecognize(client, $"{base_url}{sourceImageFileName}", recognition_model);
+ Console.WriteLine();
+ //
+
+ //
+ // Find a similar face(s) in the list of IDs. Comapring only the first in list for testing purposes.
+ Response> response = await client.FindSimilarAsync(detectedFaces[0].FaceId.Value, targetFaceIds);
+ IList similarResults = response.Value.ToList();
+ //
+ //
+ foreach (var similarResult in similarResults)
+ {
+ Console.WriteLine($"Faces from {sourceImageFileName} & ID:{similarResult.FaceId} are similar with confidence: {similarResult.Confidence}.");
+ }
+ Console.WriteLine();
+ //
+ }
+ }
+}
\ No newline at end of file
diff --git a/dotnet/Face/Quickstart.cs b/dotnet/Face/Quickstart.cs
new file mode 100644
index 00000000..01734f63
--- /dev/null
+++ b/dotnet/Face/Quickstart.cs
@@ -0,0 +1,246 @@
+//
+using System.Net.Http.Headers;
+using System.Text;
+
+using Azure;
+using Azure.AI.Vision.Face;
+using Newtonsoft.Json;
+using Newtonsoft.Json.Linq;
+
+namespace FaceQuickstart
+{
+ class Program
+ {
+ static readonly string personGroupId = Guid.NewGuid().ToString();
+
+ // URL path for the images.
+ const string IMAGE_BASE_URL = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
+
+ // From your Face subscription in the Azure portal, get your subscription key and endpoint.
+ static readonly string SUBSCRIPTION_KEY = Environment.GetEnvironmentVariable("FACE_APIKEY") ?? "";
+ static readonly string ENDPOINT = Environment.GetEnvironmentVariable("FACE_ENDPOINT") ?? "";
+
+ static void Main(string[] args)
+ {
+ // Recognition model 4 was released in 2021 February.
+ // It is recommended since its accuracy is improved
+ // on faces wearing masks compared with model 3,
+ // and its overall accuracy is improved compared
+ // with models 1 and 2.
+ FaceRecognitionModel RECOGNITION_MODEL4 = FaceRecognitionModel.Recognition04;
+
+ // Authenticate.
+ FaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY);
+
+ // Identify - recognize a face(s) in a person group (a person group is created in this example).
+ IdentifyInPersonGroup(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait();
+
+ Console.WriteLine("End of quickstart.");
+ }
+
+ /*
+ * AUTHENTICATE
+ * Uses subscription key and region to create a client.
+ */
+ public static FaceClient Authenticate(string endpoint, string key)
+ {
+ return new FaceClient(new Uri(endpoint), new AzureKeyCredential(key));
+ }
+
+ // Detect faces from image url for recognition purposes. This is a helper method for other functions in this quickstart.
+ // Parameter `returnFaceId` of `DetectAsync` must be set to `true` (by default) for recognition purposes.
+ // Parameter `returnFaceAttributes` is set to include the QualityForRecognition attribute.
+ // Recognition model must be set to recognition_03 or recognition_04 as a result.
+ // Result faces with insufficient quality for recognition are filtered out.
+ // The field `faceId` in returned `DetectedFace`s will be used in Verify and Identify.
+ // It will expire 24 hours after the detection call.
+ private static async Task> DetectFaceRecognize(FaceClient faceClient, string url, FaceRecognitionModel recognition_model)
+ {
+ // Detect faces from image URL.
+ Response> response = await faceClient.DetectAsync(new Uri(url), FaceDetectionModel.Detection03, recognition_model, returnFaceId: true, [FaceAttributeType.QualityForRecognition]);
+ IReadOnlyList detectedFaces = response.Value;
+ List sufficientQualityFaces = new List();
+ foreach (FaceDetectionResult detectedFace in detectedFaces)
+ {
+ var faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
+ if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.Low))
+ {
+ sufficientQualityFaces.Add(detectedFace);
+ }
+ }
+ Console.WriteLine($"{detectedFaces.Count} face(s) with {sufficientQualityFaces.Count} having sufficient quality for recognition detected from image `{Path.GetFileName(url)}`");
+
+ return sufficientQualityFaces;
+ }
+
+ /*
+ * IDENTIFY FACES
+ * To identify faces, you need to create and define a person group.
+ * The Identify operation takes one or several face IDs from DetectedFace or PersistedFace and a PersonGroup and returns
+ * a list of Person objects that each face might belong to. Returned Person objects are wrapped as Candidate objects,
+ * which have a prediction confidence value.
+ */
+ public static async Task IdentifyInPersonGroup(FaceClient client, string url, FaceRecognitionModel recognitionModel)
+ {
+ Console.WriteLine("========IDENTIFY FACES========");
+ Console.WriteLine();
+
+ // Create a dictionary for all your images, grouping similar ones under the same key.
+ Dictionary personDictionary =
+ new Dictionary
+ { { "Family1-Dad", new[] { "Family1-Dad1.jpg", "Family1-Dad2.jpg" } },
+ { "Family1-Mom", new[] { "Family1-Mom1.jpg", "Family1-Mom2.jpg" } },
+ { "Family1-Son", new[] { "Family1-Son1.jpg", "Family1-Son2.jpg" } }
+ };
+ // A group photo that includes some of the persons you seek to identify from your dictionary.
+ string sourceImageFileName = "identification1.jpg";
+
+ // Create a person group.
+ Console.WriteLine($"Create a person group ({personGroupId}).");
+ HttpClient httpClient = new HttpClient();
+ httpClient.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", SUBSCRIPTION_KEY);
+ using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["name"] = personGroupId, ["recognitionModel"] = recognitionModel.ToString() }))))
+ {
+ content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
+ await httpClient.PutAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}", content);
+ }
+ // The similar faces will be grouped into a single person group person.
+ foreach (var groupedFace in personDictionary.Keys)
+ {
+ // Limit TPS
+ await Task.Delay(250);
+ string? personId = null;
+ using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["name"] = groupedFace }))))
+ {
+ content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
+ using (var response = await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}/persons", content))
+ {
+ string contentString = await response.Content.ReadAsStringAsync();
+ personId = (string?)(JsonConvert.DeserializeObject>(contentString)?["personId"]);
+ }
+ }
+ Console.WriteLine($"Create a person group person '{groupedFace}'.");
+
+ // Add face to the person group person.
+ foreach (var similarImage in personDictionary[groupedFace])
+ {
+ Console.WriteLine($"Check whether image is of sufficient quality for recognition");
+ Response> response = await client.DetectAsync(new Uri($"{url}{similarImage}"), FaceDetectionModel.Detection03, recognitionModel, returnFaceId: false, [FaceAttributeType.QualityForRecognition]);
+ IReadOnlyList detectedFaces1 = response.Value;
+ bool sufficientQuality = true;
+ foreach (var face1 in detectedFaces1)
+ {
+ var faceQualityForRecognition = face1.FaceAttributes.QualityForRecognition;
+ // Only "high" quality images are recommended for person enrollment
+ if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.High))
+ {
+ sufficientQuality = false;
+ break;
+ }
+ }
+
+ if (!sufficientQuality)
+ {
+ continue;
+ }
+
+ if (detectedFaces1.Count != 1)
+ {
+ continue;
+ }
+
+ // add face to the person group
+ Console.WriteLine($"Add face to the person group person({groupedFace}) from image `{similarImage}`");
+ using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["url"] = $"{url}{similarImage}" }))))
+ {
+ content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
+ await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}/persons/{personId}/persistedfaces?detectionModel=detection_03", content);
+ }
+ }
+ }
+
+ // Start to train the person group.
+ Console.WriteLine();
+ Console.WriteLine($"Train person group {personGroupId}.");
+ await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}/train", null);
+
+ // Wait until the training is completed.
+ while (true)
+ {
+ await Task.Delay(1000);
+ string? trainingStatus = null;
+ using (var response = await httpClient.GetAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}/training"))
+ {
+ string contentString = await response.Content.ReadAsStringAsync();
+ trainingStatus = (string?)(JsonConvert.DeserializeObject>(contentString)?["status"]);
+ }
+ Console.WriteLine($"Training status: {trainingStatus}.");
+ if ("succeeded".Equals(trainingStatus)) { break; }
+ }
+ Console.WriteLine();
+
+ Console.WriteLine("Pausing for 60 seconds to avoid triggering rate limit on free account...");
+ await Task.Delay(60000);
+
+ List sourceFaceIds = new List();
+ // Detect faces from source image url.
+ List detectedFaces = await DetectFaceRecognize(client, $"{url}{sourceImageFileName}", recognitionModel);
+
+ // Add detected faceId to sourceFaceIds.
+ foreach (var detectedFace in detectedFaces) { sourceFaceIds.Add(detectedFace.FaceId.Value); }
+
+ // Identify the faces in a person group.
+ List> identifyResults = new List>();
+ using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["faceIds"] = sourceFaceIds, ["largePersonGroupId"] = personGroupId }))))
+ {
+ content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
+ using (var response = await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/identify", content))
+ {
+ string contentString = await response.Content.ReadAsStringAsync();
+ identifyResults = JsonConvert.DeserializeObject>>(contentString) ?? [];
+ }
+ }
+
+ foreach (var identifyResult in identifyResults)
+ {
+ string faceId = (string)identifyResult["faceId"];
+ List> candidates = JsonConvert.DeserializeObject>>(((JArray)identifyResult["candidates"]).ToString()) ?? [];
+ if (candidates.Count == 0)
+ {
+ Console.WriteLine($"No person is identified for the face in: {sourceImageFileName} - {faceId},");
+ continue;
+ }
+
+ string? personName = null;
+ using (var response = await httpClient.GetAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}/persons/{candidates.First()["personId"]}"))
+ {
+ string contentString = await response.Content.ReadAsStringAsync();
+ personName = (string?)(JsonConvert.DeserializeObject>(contentString)?["name"]);
+ }
+ Console.WriteLine($"Person '{personName}' is identified for the face in: {sourceImageFileName} - {faceId}," +
+ $" confidence: {candidates.First()["confidence"]}.");
+
+ Dictionary verifyResult = new Dictionary();
+ using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["faceId"] = faceId, ["personId"] = candidates.First()["personId"], ["largePersonGroupId"] = personGroupId }))))
+ {
+ content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
+ using (var response = await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/verify", content))
+ {
+ string contentString = await response.Content.ReadAsStringAsync();
+ verifyResult = JsonConvert.DeserializeObject>(contentString) ?? [];
+ }
+ }
+ Console.WriteLine($"Verification result: is a match? {verifyResult["isIdentical"]}. confidence: {verifyResult["confidence"]}");
+ }
+ Console.WriteLine();
+
+ // Delete person group.
+ Console.WriteLine("========DELETE PERSON GROUP========");
+ Console.WriteLine();
+ await httpClient.DeleteAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{personGroupId}");
+ Console.WriteLine($"Deleted the person group {personGroupId}.");
+ Console.WriteLine();
+ }
+ }
+}
+//
\ No newline at end of file
diff --git a/javascript/Face/Quickstart.js b/javascript/Face/Quickstart.js
new file mode 100644
index 00000000..f070a436
--- /dev/null
+++ b/javascript/Face/Quickstart.js
@@ -0,0 +1,176 @@
+//
+const { randomUUID } = require("crypto");
+
+const { AzureKeyCredential } = require("@azure/core-auth");
+
+const createFaceClient = require("@azure-rest/ai-vision-face").default,
+ { getLongRunningPoller } = require("@azure-rest/ai-vision-face");
+
+const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
+
+const main = async () => {
+ const endpoint = process.env["FACE_ENDPOINT"] ?? "";
+ const apikey = process.env["FACE_APIKEY"] ?? "";
+ const credential = new AzureKeyCredential(apikey);
+ const client = createFaceClient(endpoint, credential);
+
+ const imageBaseUrl =
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
+ const personGroupId = randomUUID();
+
+ console.log("========IDENTIFY FACES========");
+ console.log();
+
+ // Create a dictionary for all your images, grouping similar ones under the same key.
+ const personDictionary = {
+ "Family1-Dad": ["Family1-Dad1.jpg", "Family1-Dad2.jpg"],
+ "Family1-Mom": ["Family1-Mom1.jpg", "Family1-Mom2.jpg"],
+ "Family1-Son": ["Family1-Son1.jpg", "Family1-Son2.jpg"],
+ };
+
+ // A group photo that includes some of the persons you seek to identify from your dictionary.
+ const sourceImageFileName = "identification1.jpg";
+
+ // Create a person group.
+ console.log(`Creating a person group with ID: ${personGroupId}`);
+ await client.path("/largepersongroups/{personGroupId}", personGroupId).put({
+ body: {
+ name: personGroupId,
+ recognitionModel: "recognition_04",
+ },
+ });
+
+ // The similar faces will be grouped into a single person group person.
+ console.log("Adding faces to person group...");
+ await Promise.all(
+ Object.keys(personDictionary).map(async (name) => {
+ console.log(`Create a persongroup person: ${name}`);
+ const createPersonGroupPersonResponse = await client
+ .path("/largepersongroups/{personGroupId}/persons", personGroupId)
+ .post({
+ body: { name },
+ });
+
+ const { personId } = createPersonGroupPersonResponse.body;
+
+ await Promise.all(
+ personDictionary[name].map(async (similarImage) => {
+ // Check if the image is of sufficent quality for recognition.
+ const detectResponse = await client.path("/detect").post({
+ contentType: "application/json",
+ queryParameters: {
+ detectionModel: "detection_03",
+ recognitionModel: "recognition_04",
+ returnFaceId: false,
+ returnFaceAttributes: ["qualityForRecognition"],
+ },
+ body: { url: `${imageBaseUrl}${similarImage}` },
+ });
+
+ const sufficientQuality = detectResponse.body.every(
+ (face) => face.faceAttributes?.qualityForRecognition === "high",
+ );
+ if (!sufficientQuality) {
+ return;
+ }
+
+ if (detectResponse.body.length != 1) {
+ return;
+ }
+
+ // Quality is sufficent, add to group.
+ console.log(
+ `Add face to the person group person: (${name}) from image: (${similarImage})`,
+ );
+ await client
+ .path(
+ "/largepersongroups/{personGroupId}/persons/{personId}/persistedfaces",
+ personGroupId,
+ personId,
+ )
+ .post({
+ queryParameters: { detectionModel: "detection_03" },
+ body: { url: `${imageBaseUrl}${similarImage}` },
+ });
+ }),
+ );
+ }),
+ );
+ console.log("Done adding faces to person group.");
+
+ // Start to train the person group.
+ console.log();
+ console.log(`Training person group: ${personGroupId}`);
+ const trainResponse = await client
+ .path("/largepersongroups/{personGroupId}/train", personGroupId)
+ .post();
+ const poller = await getLongRunningPoller(client, trainResponse);
+ await poller.pollUntilDone();
+ console.log(`Training status: ${poller.getOperationState().status}`);
+ if (poller.getOperationState().status !== "succeeded") {
+ return;
+ }
+
+ console.log("Pausing for 60 seconds to avoid triggering rate limit on free account...");
+ await sleep(60000);
+
+ // Detect faces from source image url and only take those with sufficient quality for recognition.
+ const detectResponse = await client.path("/detect").post({
+ contentType: "application/json",
+ queryParameters: {
+ detectionModel: "detection_03",
+ recognitionModel: "recognition_04",
+ returnFaceId: true,
+ returnFaceAttributes: ["qualityForRecognition"],
+ },
+ body: { url: `${imageBaseUrl}${sourceImageFileName}` },
+ });
+ const faceIds = detectResponse.body.filter((face) => face.faceAttributes?.qualityForRecognition !== "low").map((face) => face.faceId);
+
+ // Identify the faces in a person group.
+ const identifyResponse = await client.path("/identify").post({
+ body: { faceIds, largePersonGroupId: personGroupId },
+ });
+ await Promise.all(
+ identifyResponse.body.map(async (result) => {
+ try {
+ const getPersonGroupPersonResponse = await client
+ .path(
+ "/largepersongroups/{personGroupId}/persons/{personId}",
+ personGroupId,
+ result.candidates[0].personId,
+ )
+ .get();
+ const person = getPersonGroupPersonResponse.body;
+ console.log(
+ `Person: ${person.name} is identified for face in: ${sourceImageFileName} with ID: ${result.faceId}. Confidence: ${result.candidates[0].confidence}`,
+ );
+
+ // Verification:
+ const verifyResponse = await client.path("/verify").post({
+ body: {
+ faceId: result.faceId,
+ largePersonGroupId: personGroupId,
+ personId: person.personId,
+ },
+ });
+ console.log(
+ `Verification result between face ${result.faceId} and person ${person.personId}: ${verifyResponse.body.isIdentical} with confidence: ${verifyResponse.body.confidence}`,
+ );
+ } catch (error) {
+ console.log(`No persons identified for face with ID ${result.faceId}`);
+ }
+ }),
+ );
+ console.log();
+
+ // Delete person group.
+ console.log(`Deleting person group: ${personGroupId}`);
+ await client.path("/largepersongroups/{personGroupId}", personGroupId).delete();
+ console.log();
+
+ console.log("Done.");
+};
+
+main().catch(console.error);
+//
\ No newline at end of file
diff --git a/python/Face/Quickstart.py b/python/Face/Quickstart.py
new file mode 100644
index 00000000..6dbcd628
--- /dev/null
+++ b/python/Face/Quickstart.py
@@ -0,0 +1,232 @@
+#
+import os
+import time
+import uuid
+import requests
+
+from azure.core.credentials import AzureKeyCredential
+from azure.ai.vision.face import FaceClient
+from azure.ai.vision.face.models import (
+ FaceAttributeTypeRecognition04,
+ FaceDetectionModel,
+ FaceRecognitionModel,
+ QualityForRecognition,
+)
+
+
+# This key will serve all examples in this document.
+KEY = os.environ["FACE_APIKEY"]
+
+# This endpoint will be used in all examples in this quickstart.
+ENDPOINT = os.environ["FACE_ENDPOINT"]
+
+# Used in the Person Group Operations and Delete Person Group examples.
+# PERSON_GROUP_ID should be all lowercase and alphanumeric. For example, 'mygroupname' (dashes are OK).
+PERSON_GROUP_ID = str(uuid.uuid4()) # assign a random ID (or name it anything)
+
+HEADERS = {"Ocp-Apim-Subscription-Key": KEY, "Content-Type": "application/json"}
+
+# Create an authenticated FaceClient.
+with FaceClient(endpoint=ENDPOINT, credential=AzureKeyCredential(KEY)) as face_client:
+ '''
+ Create the PersonGroup
+ '''
+ # Create empty Person Group. Person Group ID must be lower case, alphanumeric, and/or with '-', '_'.
+ print("Person group:", PERSON_GROUP_ID)
+ response = requests.put(
+ ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}",
+ headers=HEADERS,
+ json={"name": PERSON_GROUP_ID, "recognitionModel": "recognition_04"})
+ response.raise_for_status()
+
+ # Define woman friend
+ response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/persons", headers=HEADERS, json={"name": "Woman"})
+ response.raise_for_status()
+ woman = response.json()
+ # Define man friend
+ response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/persons", headers=HEADERS, json={"name": "Man"})
+ response.raise_for_status()
+ man = response.json()
+ # Define child friend
+ response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/persons", headers=HEADERS, json={"name": "Child"})
+ response.raise_for_status()
+ child = response.json()
+
+ '''
+ Detect faces and register them to each person
+ '''
+ # Find all jpeg images of friends in working directory (TBD pull from web instead)
+ woman_images = [
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom1.jpg", # noqa: E501
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom2.jpg", # noqa: E501
+ ]
+ man_images = [
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad1.jpg", # noqa: E501
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad2.jpg", # noqa: E501
+ ]
+ child_images = [
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son1.jpg", # noqa: E501
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son2.jpg", # noqa: E501
+ ]
+
+ # Add to woman person
+ for image in woman_images:
+ # Check if the image is of sufficent quality for recognition.
+ sufficientQuality = True
+ detected_faces = face_client.detect_from_url(
+ url=image,
+ detection_model=FaceDetectionModel.DETECTION_03,
+ recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ return_face_id=True,
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ for face in detected_faces:
+ if face.face_attributes.quality_for_recognition != QualityForRecognition.HIGH:
+ sufficientQuality = False
+ break
+
+ if not sufficientQuality:
+ continue
+
+ if len(detected_faces) != 1:
+ continue
+
+ response = requests.post(
+ ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/persons/{woman['personId']}/persistedFaces",
+ headers=HEADERS,
+ json={"url": image})
+ response.raise_for_status()
+ print(f"face {face.face_id} added to person {woman['personId']}")
+
+
+ # Add to man person
+ for image in man_images:
+ # Check if the image is of sufficent quality for recognition.
+ sufficientQuality = True
+ detected_faces = face_client.detect_from_url(
+ url=image,
+ detection_model=FaceDetectionModel.DETECTION_03,
+ recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ return_face_id=True,
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ for face in detected_faces:
+ if face.face_attributes.quality_for_recognition != QualityForRecognition.HIGH:
+ sufficientQuality = False
+ break
+
+ if not sufficientQuality:
+ continue
+
+ if len(detected_faces) != 1:
+ continue
+
+ response = requests.post(
+ ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/persons/{man['personId']}/persistedFaces",
+ headers=HEADERS,
+ json={"url": image})
+ response.raise_for_status()
+ print(f"face {face.face_id} added to person {man['personId']}")
+
+ # Add to child person
+ for image in child_images:
+ # Check if the image is of sufficent quality for recognition.
+ sufficientQuality = True
+ detected_faces = face_client.detect_from_url(
+ url=image,
+ detection_model=FaceDetectionModel.DETECTION_03,
+ recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ return_face_id=True,
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ for face in detected_faces:
+ if face.face_attributes.quality_for_recognition != QualityForRecognition.HIGH:
+ sufficientQuality = False
+ break
+ if not sufficientQuality:
+ continue
+
+ if len(detected_faces) != 1:
+ continue
+
+ response = requests.post(
+ ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/persons/{child['personId']}/persistedFaces",
+ headers=HEADERS,
+ json={"url": image})
+ response.raise_for_status()
+ print(f"face {face.face_id} added to person {child['personId']}")
+
+ '''
+ Train PersonGroup
+ '''
+ # Train the person group
+ print(f"Train the person group {PERSON_GROUP_ID}")
+ response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/train", headers=HEADERS)
+ response.raise_for_status()
+
+ while (True):
+ response = requests.get(ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}/training", headers=HEADERS)
+ response.raise_for_status()
+ training_status = response.json()["status"]
+ if training_status == "succeeded":
+ break
+ print(f"The person group {PERSON_GROUP_ID} is trained successfully.")
+
+ '''
+ Identify a face against a defined PersonGroup
+ '''
+ # Group image for testing against
+ test_image = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/identification1.jpg" # noqa: E501
+
+ print("Pausing for 60 seconds to avoid triggering rate limit on free account...")
+ time.sleep(60)
+
+ # Detect faces
+ face_ids = []
+ # We use detection model 03 to get better performance, recognition model 04 to support quality for
+ # recognition attribute.
+ faces = face_client.detect_from_url(
+ url=test_image,
+ detection_model=FaceDetectionModel.DETECTION_03,
+ recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ return_face_id=True,
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ for face in faces:
+ # Only take the face if it is of sufficient quality.
+ if face.face_attributes.quality_for_recognition != QualityForRecognition.LOW:
+ face_ids.append(face.face_id)
+
+ # Identify faces
+ response = requests.post(
+ ENDPOINT + f"/face/v1.0/identify",
+ headers=HEADERS,
+ json={"faceIds": face_ids, "largePersonGroupId": PERSON_GROUP_ID})
+ response.raise_for_status()
+ results = response.json()
+ print("Identifying faces in image")
+ if not results:
+ print("No person identified in the person group")
+ for identifiedFace in results:
+ if len(identifiedFace["candidates"]) > 0:
+ print(f"Person is identified for face ID {identifiedFace['faceId']} in image, with a confidence of "
+ f"{identifiedFace['candidates'][0]['confidence']}.") # Get topmost confidence score
+
+ # Verify faces
+ response = requests.post(
+ ENDPOINT + f"/face/v1.0/verify",
+ headers=HEADERS,
+ json={"faceId": identifiedFace["faceId"], "personId": identifiedFace["candidates"][0]["personId"], "largePersonGroupId": PERSON_GROUP_ID})
+ response.raise_for_status()
+ verify_result = response.json()
+ print(f"verification result: {verify_result['isIdentical']}. confidence: {verify_result['confidence']}")
+ else:
+ print(f"No person identified for face ID {identifiedFace['faceId']} in image.")
+
+ print()
+
+ # Delete the person group
+ response = requests.delete(ENDPOINT + f"/face/v1.0/largepersongroups/{PERSON_GROUP_ID}", headers=HEADERS)
+ response.raise_for_status()
+ print(f"The person group {PERSON_GROUP_ID} is deleted.")
+
+ print()
+ print("End of quickstart.")
+
+#
\ No newline at end of file